func readEncDNA() []byte { in, startTok := bufio.NewReader(os.Stdin), []byte(">THREE ") for line, err := in.ReadSlice('\n'); !bytes.HasPrefix(line, startTok); line, err = in.ReadSlice('\n') { if err != nil { log.Panicf("Error: Could not read input from stdin; Details: %s", err) } } ascii, err := ioutil.ReadAll(in) if err != nil { log.Panicf("Error: Could not read input from stdin; Details: %s", err) } j := 0 for i, c, asciic := 0, byte(0), len(ascii); i < asciic; i++ { c = ascii[i] switch c { case 'a', 'A': c = 0 case 't', 'T': c = 1 case 'g', 'G': c = 2 case 'c', 'C': c = 3 case '\n': continue default: log.Fatalf("Error: Invalid nucleotide value: '%c'", ascii[i]) } ascii[j] = c j++ } return ascii[:j+1] }
func filterStateChanges(in <-chan *dbus.Signal, out chan<- bool) { for signal := range in { if len(signal.Body) != 3 { log.Panicf("protocol error: NetworkManager sent a StateChanged signal with %d members, expected 3\n", len(signal.Body)) } newState, ok := signal.Body[0].(uint32) if !ok { log.Panicf("protocol error: NetworkManager sent a StateChanged signal where members are not uint32\n") } if newState != NM_DEVICE_STATE_ACTIVATED { continue } // We don’t use NetworkManager’s CheckConnectivity method because it // has many false-positives (at least with NetworkManager 1.0.6), i.e. // it will say you have full connectivity, even though you are // connected to an Android tethering hotspot without upstream // connectivity. So, we save the code complexity of dealing with an API // that doesn’t provide us useful data. // Trigger a check if we can. If we can’t, the implication is that a // check is currently running, which is fine as well. select { case out <- true: default: } } }
// TODO: non-blocking snapshot func (s *EtcdServer) snapshot(snapi uint64, confState *raftpb.ConfState) { d, err := s.store.Save() // TODO: current store will never fail to do a snapshot // what should we do if the store might fail? if err != nil { log.Panicf("etcdserver: store save should never fail: %v", err) } err = s.raftStorage.Compact(snapi, confState, d) if err != nil { // the snapshot was done asynchronously with the progress of raft. // raft might have already got a newer snapshot and called compact. if err == raft.ErrCompacted { return } log.Panicf("etcdserver: unexpected compaction error %v", err) } log.Printf("etcdserver: compacted log at index %d", snapi) if err := s.storage.Cut(); err != nil { log.Panicf("etcdserver: rotate wal file should never fail: %v", err) } snap, err := s.raftStorage.Snapshot() if err != nil { log.Panicf("etcdserver: snapshot error: %v", err) } if err := s.storage.SaveSnap(snap); err != nil { log.Fatalf("etcdserver: save snapshot error: %v", err) } log.Printf("etcdserver: saved snapshot at index %d", snap.Metadata.Index) }
func (r *Router) route(parts []string) *Router { if len(parts) == 0 { return r } part := parts[0] if len(part) > 0 && part[0] == ':' { part = part[1:] if r.varName != "" && part != r.varName { log.Panicf("overlapping vars: %q / %q", r.varName, part) } if r.varRouter == nil { r.varName = part r.varRouter = &Router{} } r = r.varRouter } else if part == "*" { if r.fallbackRouter != nil { log.Panicf("overlapping fallback routes") } r.fallbackRouter = &Router{} return r.fallbackRouter } else { if r.matchers == nil { r.matchers = make(map[string]*Router) } if r.matchers[part] == nil { r.matchers[part] = &Router{} } r = r.matchers[part] } return r.route(parts[1:]) }
func (m *Map) InitFromString(s string, hasBorder bool) { m.SetNullDeadline() lines := strings.Fields(s) rows := len(lines) var cols int for row, line := range lines { if row == 0 { cols = len(line) m.Init(rows, cols, 0, hasBorder) } else { if cols != len(line) { log.Panicf("different-length lines in %v", lines) } } for col, letter := range line { p := Point{row, col} switch letter { case '.': // Unknown territory case '%': m.MarkWater(p) case '*': m.MarkFood(p) case 'a': m.AddAnt(p, 0) case 'b': m.AddAnt(p, 1) default: log.Panicf("unknown letter: %v", letter) } } } SetAttackRadius2(5) // default for testing }
func (c *RestClient) Do_returns_json_array(req *http.Request, opName string) (mresp []interface{}) { start := time.Now() resp, serialNumber := c.DoRaw(req, opName) if resp == nil { return } if !strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { log.Panicf("Non-JSON response for %v", req) return } defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { log.Panicf("Can't read HTTP response: %v", err) return } if err = json.Unmarshal(body, &mresp); err != nil { log.Panicf("Can't parse response JSON: %v\nrequest = %v\n%s", err, req, body) } if c.Verbose { log.Printf("#%05d: finished in %v", serialNumber, time.Since(start)) } return }
func membersFromStore(st store.Store) (map[types.ID]*Member, map[types.ID]bool) { members := make(map[types.ID]*Member) removed := make(map[types.ID]bool) e, err := st.Get(storeMembersPrefix, true, true) if err != nil { if isKeyNotFound(err) { return members, removed } log.Panicf("get storeMembers should never fail: %v", err) } for _, n := range e.Node.Nodes { m, err := nodeToMember(n) if err != nil { log.Panicf("nodeToMember should never fail: %v", err) } members[m.ID] = m } e, err = st.Get(storeRemovedMembersPrefix, true, true) if err != nil { if isKeyNotFound(err) { return members, removed } log.Panicf("get storeRemovedMembers should never fail: %v", err) } for _, n := range e.Node.Nodes { removed[mustParseMemberIDFromKey(n.Key)] = true } return members, removed }
// AddType makes a type known to a schema func (sch *Schema) addType(def ast.TypeDefinition) { if !sch.mutable { panic("Attempted to mutate schema after it has been finalized") } var name string // Do extra type validation for Objects, Interfaces and Unions switch t := def.(type) { case *ast.ScalarDefinition: name = t.Name case *ast.EnumDefinition: name = t.Name case *ast.ObjectDefinition: name = t.Name assertFieldsUnique(t.Fields, t.Name) case *ast.InterfaceDefinition: name = t.Name assertFieldsUnique(t.Fields, t.Name) case *ast.UnionDefinition: name = t.Name if len(t.Members) == 0 { log.Panicf("Union '%s' must have one or more member types", name) } } if _, exists := sch.types[name]; exists { log.Panicf("Type '%s' already exists in schema", name) } sch.types[name] = def }
func (t *Tiler) setupDrawer() { t.bondFudgeX = int(Floor(float64(t.TileWidth) / 80)) t.bondFudgeY = int(Floor(float64(t.TileHeight) / 80)) t.fontSize = int(Sqrt(float64(t.TileHeight*t.TileWidth)) / 4) t.tileHorizShift = int(float64(t.TileWidth) / 10) t.tileVertShift = int(float64(t.TileHeight) / 10) t.tileHorizMargin = int(float64(t.TileWidth) / 20) t.tileVertMargin = int(float64(t.TileHeight) / 20) bytes, err := ioutil.ReadFile(t.FontPath) if err != nil { log.Panicf("Couldn't read font: %s", err) } t.font, err = freetype.ParseFont(bytes) if err != nil { log.Panicf("Couldn't parse font: %s", err) } // figure out how big a representative character is for approximate layout purposes ex := '5' fupe := t.font.FUnitsPerEm() horiz := t.font.HMetric(fupe, t.font.Index(ex)) vert := t.font.VMetric(fupe, t.font.Index(ex)) t.fontX = int(horiz.LeftSideBearing) t.fontWidth = int(horiz.AdvanceWidth) t.fontY = int(vert.TopSideBearing) t.fontHeight = int(vert.AdvanceHeight) log.Printf("%#v", t) }
func (m *Map) Update(words []string) { if words[0] == "turn" { turn := Turn(atoi(words[1])) if turn != TURN+1 { log.Panicf("Turn number out of sync, expected %v got %v", TURN+1, turn) } TURN = turn log.SetPrefix(fmt.Sprintf("%s %d ", *logPrefix, turn)) return } p := Point{atoi(words[1]), atoi(words[2])} var ant Item if len(words) == 4 { ant = Item(atoi(words[3])) } switch words[0] { case "w": m.MarkWater(p) case "f": m.MarkFood(p) case "h": m.MarkHill(p, ant) case "a": m.AddAnt(p, ant) case "d": m.DeadAnt(p, ant) default: log.Panicf("unknown command updating map: %v\n", words) } }
func setupSocket(pluginDir string, driverName string) string { exists, err := dirExists(pluginDir) if err != nil { log.Panicf("Stat Plugin Directory error '%s'", err) os.Exit(1) } if !exists { err = createDir(pluginDir) if err != nil { log.Panicf("Create Plugin Directory error: '%s'", err) os.Exit(1) } log.Printf("Created Plugin Directory: '%s'", pluginDir) } socketFile := pluginDir + "/" + driverName + ".sock" exists, err = fileExists(socketFile) if err != nil { log.Panicf("Stat Socket File error: '%s'", err) os.Exit(1) } if exists { err = deleteFile(socketFile) if err != nil { log.Panicf("Delete Socket File error: '%s'", err) os.Exit(1) } log.Printf("Deleted Old Socket File: '%s'", socketFile) } return socketFile }
func main() { log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile) // the server meeds a private key // generate one with `ssh-keygen -t rsa` pemBytes, err := ioutil.ReadFile("id_rsa") if err != nil { log.Panicf("Failed to load private key: %#v\n", err) } // pass the private key and an authentication function server, err := sshgate.NewServer(pemBytes, Authenticate) if err != nil { log.Panicf("NewServer error: %#v\n", err) } // listen on specific port and address // "" is equivalent to "0.0.0.0" or “all interfaces” port, err := strconv.Atoi(os.Getenv("PORT")) if err != nil { log.Panicf("Can't parse port: %#v\n", err) } if err := server.Listen("", port); err != nil { log.Panicf("Listen error: %#v\n", err) } }
func inboundConnectionCandidates(neuron *ng.Neuron) []*ng.NodeId { if neuron == nil { log.Panicf("neuron is nil") } cortex := neuron.Cortex if cortex == nil { log.Panicf("neuron has no cortex associated: %v", neuron) } neuronNodeIds := cortex.NeuronNodeIds() sensorNodeIds := cortex.SensorNodeIds() availableNodeIds := append(neuronNodeIds, sensorNodeIds...) // hackish way to delete a vew elements from this slice. // put in a map and delete from map, then back to slice. TODO: fixme availableNodeIdMap := make(map[string]*ng.NodeId) for _, nodeId := range availableNodeIds { availableNodeIdMap[nodeId.UUID] = nodeId } // remove things we already have inbound connections from for _, inboundConnection := range neuron.Inbound { nodeId := inboundConnection.NodeId delete(availableNodeIdMap, nodeId.UUID) } availableNodeIds = make([]*ng.NodeId, 0) for _, nodeId := range availableNodeIdMap { availableNodeIds = append(availableNodeIds, nodeId) } return availableNodeIds }
func (server *Server) handleFreezeRequest(freq *freezeRequest, fs *frozenServer) { pr, pw := io.Pipe() freq.readCloser = pr freq.done <- true zw, err := gzip.NewWriterLevel(pw, gzip.BestCompression) if err != nil { if err = pw.CloseWithError(err); err != nil { log.Panicf("Unable to close PipeWriter: %v", err.String()) } return } enc := gob.NewEncoder(zw) err = enc.Encode(fs) if err != nil { if err = pw.CloseWithError(err); err != nil { log.Panicf("Unable to close PipeWriter: %v", err.String()) } } if err = pw.CloseWithError(zw.Close()); err != nil { log.Panicf("Unable to close PipeWriter: %v", err.String()) } }
func (s *SimplePoint) Send(r *Request) { if s.b.ch == nil { panic("EndPoint is not running") } if !s.standalone { log.Panicf("you should not call Send on child endpoint %+v", s) } if !r.SetPending() { /* this could happen if SetDeadline already respond with timeout */ if r.Performed() { return } log.Panicf("Request already sent somewhere %+v") } r.SetTimeout(s.Timeout) /* this could happen if SetDeadline already respond with timeout */ if r.Performed() { return } s.b.push(r) }
//仅用于测试用的client 不走苹果发送,只看tps func NewMockApnsClient(cert tls.Certificate, pushGateway string, feedbackChan chan<- *entry.Feedback, feedbackGateWay string, storage entry.IMessageStorage) *ApnsClient { //发送失败后的响应channel respChan := make(chan *entry.Response, 1000) deadline := 10 * time.Second err, factory := NewConnPool(10, 30, 50, 60*time.Minute, func(id int32) (error, IConn) { err, apnsconn := NewApnsConnectionMock(respChan, cert, pushGateway, deadline, id) return err, apnsconn }) if nil != err { log.Panicf("APN SERVICE|CREATE MOCK CONNECTION POOL|FAIL|%s", err) return nil } err, feedbackFactory := NewConnPool(1, 2, 5, 10*time.Minute, func(id int32) (error, IConn) { err, conn := NewFeedbackConn(feedbackChan, cert, feedbackGateWay, deadline, id) return err, conn }) if nil != err { log.Panicf("APN SERVICE|CREATE FEEDBACK CONNECTION POOL|FAIL|%s", err) return nil } return newApnsClient(factory, feedbackFactory, storage, respChan) }
func get_carrera(url_tmp string, c chan Carrera) { doc, err := Get_Doc(url_tmp) if err != nil { log.Panic(err) } materias_tmp := make(map[int]*Materia) carrera := Carrera{Doc: doc, Materias: &materias_tmp} h1_tag := doc.Find("#info-contenido h1:first-of-type") if len(h1_tag.Nodes) != 1 { log.Panicf("Se encontraron %v h1 posibles nombres de Carrera en la url %v\n", len(h1_tag.Nodes), doc.Url.String()) } carrera_str := strings.TrimSpace(h1_tag.Text()) carrera_tokens := strings.Split(carrera_str, ")") if len(carrera_tokens) != 2 { log.Panicf("Se encontraron %v tokens de la carrera en la url '%v' al partir con ')'\n", len(carrera_tokens), doc.Url.String()) } carrera_tokens = strings.Split(carrera_tokens[0], "(") if len(carrera_tokens) != 2 { log.Panicf("Se encontraron %v tokens de la carrera en la url '%v' al partir con '('\n", len(carrera_tokens), doc.Url.String()) } idCarrera, err := Get_Id_url(doc.Url.Path) if err != nil { log.Panic(err) } carrera.IdCiencias = idCarrera carrera.Nombre = strings.TrimSpace(carrera_tokens[0]) carrera.Plan = strings.TrimPrefix(strings.TrimSpace(carrera_tokens[1]), "plan ") c <- carrera }
// Creates a transaction spending amt with additional fee. Fee is in addition // to the base required fee given amt.Hours. // TODO // - pull in outputs from blockchain from wallet // - create transaction here // - sign transction and return func Spend2(self *visor.Visor, wrpc *WalletRPC, walletID wallet.WalletID, amt wallet.Balance, fee uint64, dest cipher.Address) (coin.Transaction, error) { wallet := wrpc.Wallets.Get(walletID) if wallet == nil { return coin.Transaction{}, fmt.Errorf("Unknown wallet %v", walletID) } //pull in outputs and do this here //FIX tx, err := visor.CreateSpendingTransaction(*wallet, self.Unconfirmed, &self.Blockchain.Unspent, self.Blockchain.Time(), amt, dest) if err != nil { return tx, err } if err := tx.Verify(); err != nil { log.Panicf("Invalid transaction, %v", err) } if err := visor.VerifyTransactionFee(self.Blockchain, &tx); err != nil { log.Panicf("Created invalid spending txn: visor fail, %v", err) } if err := self.Blockchain.VerifyTransaction(tx); err != nil { log.Panicf("Created invalid spending txn: blockchain fail, %v", err) } return tx, err }
func init() { if err != nil { log.Panicf("game over: %v\n", err) } err = os.MkdirAll(testArtifactPathDir, 0755) if err != nil { log.Panicf("game over: %v\n", err) } for _, p := range testArtifactPaths { if filepath.Base(p.Path) == "nonexistent" { continue } fd, err := os.Create(p.Path) if err != nil { log.Panicf("game over: %v\n", err) } defer fd.Close() for i := 0; i < 512; i++ { fmt.Fprintf(fd, "something\n") } if filepath.Base(p.Path) == "unreadable" { fd.Chmod(0000) } } }
func NewWalletRPC() *WalletRPC { rpc := WalletRPC{} //wallet directory //cleanup, pass as parameter during init DataDirectory := util.InitDataDir("") rpc.WalletDirectory = filepath.Join(DataDirectory, "wallets/") logger.Debug("Wallet Directory= %v", rpc.WalletDirectory) util.InitDataDir(rpc.WalletDirectory) rpc.Wallets = wallet.Wallets{} //util.InitDataDir(".skycoin") //util.InitDataDir(".skycoin/wallets") //if rpc.WalletDirectory != "" { w, err := wallet.LoadWallets(rpc.WalletDirectory) if err != nil { log.Panicf("Failed to load all wallets: %v", err) } rpc.Wallets = w //} if len(rpc.Wallets) == 0 { rpc.Wallets.Add(wallet.NewWallet("")) //deterministic if rpc.WalletDirectory != "" { errs := rpc.Wallets.Save(rpc.WalletDirectory) if len(errs) != 0 { log.Panicf("Failed to save wallets: %v", errs) } } } return &rpc }
func (self *Config) preprocess() Config { config := *self if config.Daemon.LocalhostOnly { if config.Daemon.Address == "" { local, err := LocalhostIP() if err != nil { log.Panicf("Failed to obtain localhost IP: %v", err) } config.Daemon.Address = local } else { if !IsLocalhost(config.Daemon.Address) { log.Panicf("Invalid address for localhost-only: %s", config.Daemon.Address) } } config.Peers.AllowLocalhost = true } config.Pool.port = config.Daemon.Port config.Pool.address = config.Daemon.Address if config.Daemon.DisableNetworking { config.Peers.Disabled = true config.Daemon.DisableIncomingConnections = true config.Daemon.DisableOutgoingConnections = true } else { if config.Daemon.DisableIncomingConnections { logger.Info("Incoming connections are disabled.") } if config.Daemon.DisableOutgoingConnections { logger.Info("Outgoing connections are disabled.") } } return config }
// formatInfo returns the 15-bit Format Information value for a QR // code. func (v qrCodeVersion) formatInfo(maskPattern int) *bitset.Bitset { formatID := 0 switch v.level { case Low: formatID = 0x08 // 0b01000 case Medium: formatID = 0x00 // 0b00000 case High: formatID = 0x18 // 0b11000 case Highest: formatID = 0x10 // 0b10000 default: log.Panicf("Invalid level %d", v.level) } if maskPattern < 0 || maskPattern > 7 { log.Panicf("Invalid maskPattern %d", maskPattern) } formatID |= maskPattern & 0x7 result := bitset.New() result.AppendUint32(formatBitSequence[formatID].regular, formatInfoLengthBits) return result }
// PermutedInts returns all the permutaions of the original array // length of array must be in range of [0..6] func PermutedInts(a Ints) []Ints { ln := len(a) if ln <= 1 { return []Ints{a} } if ln == 2 { return []Ints{{a[0], a[1]}, {a[1], a[0]}} } if ln > 6 { log.Panicf("permute array length > 6 (test phase only)\n") } expLen := Factorial(len(a)) //fmt.Printf("PermutedInts should return %d elements each with %d elements\n", expLen, len(a)) // make the empty return value array with zero length but full capacity var rv = make([]Ints, 0, Factorial(len(a))) for i := 0; i < ln; i++ { var x = a y := PermutedInts(x[1:]) // permute the tail for _, p := range y { x = append(Ints{a[0]}, p...) rv = append(rv, x) } a = a.RotH2T() //fmt.Printf("rotated a = %v\n", a) } if len(rv) != expLen { log.Panicf("return array has wrong length\n") } return rv }
func (r *_ShuffledRDD) compute(split Split) Yielder { parklog("Computing <%s> on Split[%d]", r, split.getIndex()) r.shuffleJob.Do(func() { r.runShuffleJob() }) yield := make(chan interface{}, 1) go func() { outputId := split.getIndex() combinePath := env.getLocalShufflePath(r.shuffleId, SHUFFLE_MAGIC_ID, outputId) input, err := os.Open(combinePath) if err != nil { log.Panicf("Error when open/decode shuffle-%d split[%d] from file %s, %v", r.shuffleId, outputId, combinePath, err) } defer input.Close() parklog("Decoding shuffle-%d[GOB] from local file %s", r.shuffleId, combinePath) var buffer []interface{} encoder := NewBufferEncoder(ENCODE_BUFFER_SIZE) for err == nil { buffer, err = encoder.Decode(input) if err != nil { break } for _, value := range buffer { yield <- value } } if err != nil && err != io.EOF { log.Panicf("Error when open/decode shuffle split[%d] from file %s, %v", outputId, combinePath, err) } close(yield) }() return yield }
// Delete deletes all IRC output messages that were generated in reply to the // input message with inputID. func (os *OutputStream) Delete(inputID types.RobustId) error { var key [8]byte os.messagesMu.Lock() defer os.messagesMu.Unlock() if inputID.Id == os.lastseen.Messages[0].Id.Id { // When deleting the last message, lastseen needs to be set to the // previous message to avoid blocking in GetNext() forever. i := os.db.NewIterator(nil, nil) defer i.Release() if !i.Last() { log.Panicf("outputstream LevelDB is empty, which is a BUG\n") } if !i.Prev() { // We should always keep the first message (RobustId{Id: 0}). log.Panicf("Delete() called on _all_ messages\n") } mb := unmarshalMessageBatch(i.Value()) os.lastseen = messageBatch{ Messages: mb.Messages, NextID: math.MaxUint64, } binary.BigEndian.PutUint64(key[:], uint64(os.lastseen.Messages[0].Id.Id)) if err := os.db.Put(key[:], os.lastseen.marshal(), nil); err != nil { return err } } os.cacheMu.Lock() delete(os.messagesCache, uint64(inputID.Id)) os.cacheMu.Unlock() binary.BigEndian.PutUint64(key[:], uint64(inputID.Id)) return os.db.Delete(key[:], nil) }
func main() { flag.Parse() zone := os.Getenv("GODYN_ZONE") fqdn := os.Getenv("GODYN_FQDN") if fqdn == "" { hostname, err := os.Hostname() if err != nil { log.Panicf("Can't determine hostname and GODYN_FQDN not specified: %v", err) } fqdn = fmt.Sprintf("%s.%s", hostname, zone) } publicIp, err := getPublicIpFromHosts() if err != nil { log.Panicf("Can't determine public IP for this container: %v", err) } log.Printf("Trying to update A record for %s to current container IP %s", fqdn, publicIp) dnsProvider, err = dynectProvider.NewProvider() if err != nil { log.Panicf("Can't create DNS provider: %v", err) } _, err = dnsProvider.UpdateARecord(zone, fqdn, publicIp, false) if err != nil { log.Fatalf("Failed to update A record: %v", err) } else { log.Printf("Successfully updated FQDN %s with A record for %s", fqdn, publicIp) } }
func CreateConverter(i interface{}, wrappedInResponse bool, convertTypeDecoderConfig *DecoderConfig, convertTypeTagName string) ConverterFunction { typ := reflect.TypeOf(i) if typ.Kind() != reflect.Ptr { log.Panicf("Only pointers to structs may be registered as a response type: %#v", typ) } customUnpack := typ.Implements(reflect.TypeOf((*Unpacker)(nil)).Elem()) typ = typ.Elem() if typ.Kind() != reflect.Struct { log.Panicf("Only pointers to structs may be registered as a response type: %#v", typ) } converter := func(input interface{}) (output interface{}, err error) { if wrappedInResponse { responseValue, err := ConvertValue(responseType, input, false, convertTypeDecoderConfig, convertTypeTagName) if err != nil { return nil, err } parsedResponse := responseValue.Interface().(*Response) if !parsedResponse.Success { return nil, &ResponseError{text: parsedResponse.Error, code: parsedResponse.ErrorCode} } input = parsedResponse.Result } inputValue, err := ConvertValue(typ, input, customUnpack, convertTypeDecoderConfig, convertTypeTagName) if err != nil { return } output = inputValue.Interface() return } return converter }
func (t *TWriter) FillStruct() { t.Write = t.structWriter for i := range t.Writer.Flds { fld := &t.Writer.Flds[i] ipro := fld.Tag.Get("sbox") for _, m := range strings.Split(ipro, ",") { if t.Tail != NoTail { log.Panicf("Sbox Tail could be only last field in a struct %+v", t.Writer.Type) } if m == "tail" { if fld.Type.Kind() != reflect.Slice { log.Panicf("Could apply sbox:tail only for slices") } t.Tail = Tail } else if m == "tailsplit" { if fld.Type.Kind() != reflect.Slice { log.Panicf("Could apply sbox:tailsplit only for slices") } if fld.Type.Elem().Kind() != reflect.Struct { log.Panicf("Could apply sbox:tailsplit only for slices of struct") } t.Tail = TailSplit } } } }
func (r Renderer) RenderPost(pc *PostContext, templates *template.Template) error { err := os.MkdirAll(path.Join(r.OutputPath, pc.Slug), 0755) if err != nil { log.Panicf(err.Error()) } outfile, err := os.Create(path.Join(r.OutputPath, pc.Slug, "index.html")) if err != nil { log.Panicf(err.Error()) } err = templates.ExecuteTemplate(outfile, "post.html", pc) if err != nil { log.Panicf(err.Error()) } // copy images log.Printf("\"%s\": Using %d %s", pc.Slug, len(pc.Images), pluralize("image", len(pc.Images))) for _, image := range pc.Images { err = cp(image.SrcAbsPath, path.Join(r.OutputPath, pc.Slug, image.Filename)) if err != nil { log.Panicf(err.Error()) } } log.Printf("\"%s\": Done rendering", pc.Slug) return nil }
func initialize() { privkey_fname := util.AppBaseFileName() + ".privkey" privkey_bytes, err := ioutil.ReadFile(privkey_fname) if err != nil { log.Panicf("privkey load error: %s", err) } signer, err := ssh.ParsePrivateKey(privkey_bytes) if err != nil { log.Panicf("privkey parse error: %s", err) } clientConfig = &ssh.ClientConfig{ User: username, Auth: []ssh.AuthMethod{ ssh.PublicKeys(signer), }, } hostlist_fname := util.AppBaseFileName() + ".hostlist" f, err := os.Open(hostlist_fname) if err != nil { log.Panicf("hostlist open error: %s", err) } defer f.Close() scanner := bufio.NewScanner(f) scanner.Split(bufio.ScanLines) for scanner.Scan() { hostlist = append(hostlist, scanner.Text()) } }