func (s *Server) DeleteMessageHandler(w http.ResponseWriter, r *http.Request) { login, err := getYandexLogin(r) if login == "" || err != nil { writeError(w, 403, "forbidden") return } subject := r.FormValue("subject") if subject != "" { log.Printf("DELETE login=%s subject=%s", login, subject) err = s.deleteMessagesBySubject(login, subject) if err != nil { writeError(w, 500, "failed to delete message: "+err.Error()) return } } id := r.FormValue("id") if id != "" { log.Printf("DELETE login=%s id=%s", login, id) err = s.deleteMessagesByID(login, id) if err != nil { writeError(w, 500, "failed to delete message: "+err.Error()) return } } writeResponse(w, 200, map[string]interface{}{ "status": "success", }) }
// Processes new tasks func (m *etcdMinion) processTask(t *task.Task) error { var buf bytes.Buffer // Update state of task to indicate that we are now processing it t.State = task.TaskStateProcessing m.SaveTaskResult(t) cmd := exec.Command(t.Command, t.Args...) cmd.Stdout = &buf cmd.Stderr = &buf log.Printf("Processing task %s\n", t.TaskID) cmdError := cmd.Run() t.TimeProcessed = time.Now().Unix() t.Result = buf.String() if cmdError != nil { log.Printf("Failed to process task %s\n", t.TaskID) t.Error = cmdError.Error() t.State = task.TaskStateFailed } else { log.Printf("Finished processing task %s\n", t.TaskID) t.State = task.TaskStateSuccess } m.SaveTaskResult(t) return cmdError }
// RootObject returns the root permanode for this importer account. func (h *Host) RootObject() (*Object, error) { res, err := h.search.GetPermanodesWithAttr(&search.WithAttrRequest{ N: 2, // only expect 1 Attr: "camliImportRoot", Value: h.imp.Prefix(), }) if err != nil { log.Printf("RootObject searching GetPermanodesWithAttr: %v", err) return nil, err } if len(res.WithAttr) == 0 { obj, err := h.NewObject() if err != nil { return nil, err } log.Printf("No root object found. Created %v", obj.pn) if err := obj.SetAttr("camliImportRoot", h.imp.Prefix()); err != nil { return nil, err } return obj, nil } if len(res.WithAttr) > 1 { return nil, fmt.Errorf("Found %d import roots for %q; want 1", len(res.WithAttr), h.imp.Prefix()) } pn := res.WithAttr[0].Permanode return h.ObjectFromRef(pn) }
func (s *Session) handleBind(stream conn.Conn, bind *proto.Bind) (err error) { // stream.Debug("Binding new tunnel: %v", bind) log.Printf("Binding new tunnel: %v\n", bind) respond := func(resp *proto.BindResp) { if err = proto.WriteMsg(stream, resp); err != nil { err := fmt.Errorf("Failed to send bind response: %v", err) log.Println(err) // err = stream.Error("Failed to send bind response: %v", err) } } if err = s.hooks.OnBind(s, bind); err != nil { return } t, err := newTunnel(bind, s, s.binders, s.tunnelHooks) if err != nil { respond(&proto.BindResp{Error: err.Error()}) return } // t.Info("Registered new tunnel on session %s", s.id) log.Printf("[INFO] Registered new tunnel on session %s", s.id) // add it to the list of tunnels s.addTunnel(t) // acknowledge success respond(&proto.BindResp{Url: t.url}) return }
func handleHook(h *hook.Hook, headers, query, payload *map[string]interface{}, body *[]byte) string { cmd := exec.Command(h.ExecuteCommand) cmd.Args = h.ExtractCommandArguments(headers, query, payload) cmd.Dir = h.CommandWorkingDirectory log.Printf("executing %s (%s) with arguments %s using %s as cwd\n", h.ExecuteCommand, cmd.Path, cmd.Args, cmd.Dir) out, err := cmd.CombinedOutput() log.Printf("command output: %s\n", out) var errorResponse string if err != nil { log.Printf("error occurred: %+v\n", err) errorResponse = fmt.Sprintf("%+v", err) } log.Printf("finished handling %s\n", h.ID) var response []byte response, err = json.Marshal(&hook.CommandStatusResponse{ResponseMessage: h.ResponseMessage, Output: string(out), Error: errorResponse}) if err != nil { log.Printf("error marshalling response: %+v", err) return h.ResponseMessage } return string(response) }
func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) error { s3conn := meta.(*AWSClient).s3conn bucket := d.Get("bucket").(string) key := d.Get("key").(string) etag := d.Get("etag").(string) resp, err := s3conn.HeadObject( &s3.HeadObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), IfMatch: aws.String(etag), }) if err != nil { // If S3 returns a 404 Request Failure, mark the object as destroyed if awsErr, ok := err.(awserr.RequestFailure); ok && awsErr.StatusCode() == 404 { d.SetId("") log.Printf("[WARN] Error Reading Object (%s), object not found (HTTP status 404)", key) return nil } return err } d.Set("cache_control", resp.CacheControl) d.Set("content_disposition", resp.ContentDisposition) d.Set("content_encoding", resp.ContentEncoding) d.Set("content_language", resp.ContentLanguage) d.Set("content_type", resp.ContentType) d.Set("version_id", resp.VersionId) log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp) return nil }
// Classifies the minion func (m *etcdMinion) SetClassifier(c *classifier.Classifier) error { // Classifiers in etcd expire after an hour opts := &etcdclient.SetOptions{ PrevExist: etcdclient.PrevIgnore, TTL: time.Hour, } // Serialize classifier to JSON and save it in etcd data, err := json.Marshal(c) if err != nil { log.Printf("Failed to serialize classifier %s: %s\n", c.Key, err) return err } // Classifier key in etcd klassifierKey := filepath.Join(m.classifierDir, c.Key) _, err = m.kapi.Set(context.Background(), klassifierKey, string(data), opts) if err != nil { log.Printf("Failed to set classifier %s: %s\n", c.Key, err) return err } return nil }
// realMain is the actual entry point, but we wrap it to set // a proper exit code on return func realMain() int { if len(os.Args) == 1 { usage() return 1 } // Read the configuration conf, err := getConfig() if err != nil { log.Printf("[ERR] %v", err) return 1 } // Sanity check the configuration if errs := validateConfig(conf); len(errs) != 0 { for _, err := range errs { log.Printf("[ERR] %v", err) } return 1 } // Start watching for changes stopCh, finishCh := watch(conf) // Wait for termination return waitForTerm(conf, stopCh, finishCh) }
// RefreshCache updates the list of metric names in the cache from the local // file store. Blocks until completion. Does not check cache freshness // so use with care. func (m *MetricsCacheType) RefreshCache() error { m.lock.Lock() m.updating = true examine := func(path string, info os.FileInfo, err error) error { ok, err := checkWalk(path, info, err) if err != nil { return err } if ok { //log.Printf("Found %s or %s", path, PathToMetric(path)) m.metrics = append(m.metrics, PathToMetric(path)) } return nil } // Create new empty slice log.Printf("Scaning %s for metrics...", Prefix) m.metrics = make([]string, 0) err := filepath.Walk(Prefix, examine) log.Printf("Scan complete.") if err != nil { log.Printf("Scan returned an Error: %s", err) } m.timestamp = time.Now().Unix() m.updating = false m.lock.Unlock() return nil }
func populateServer(serv *server) []sfs.ReplicateChunkArgs { str := fmt.Sprintf("%s:%d", serv.addr.IP.String(), serv.addr.Port) log.Printf("master: PopulateServer: populating %s\n", str) log.Printf("master: PopulateServer: server heap state:\n%s\n", sHeap.printPresent()) if len(chunks) == 0 { return nil } thisVec := new(vector.Vector) for _, chunk := range chunks { //log.Printf("master: PopulateServer: examining chunk %+v, nservers %d\n", *chunk, chunk.servers.Len()) if chunk.servers.Len() < sfs.NREPLICAS { //populate chunk location list chunklist := make([]net.TCPAddr, chunk.servers.Len()) for cnt1 := 0; cnt1 < chunk.servers.Len(); cnt1++ { chunklist[cnt1] = chunk.servers.At(cnt1).(*server).addr } //send rpc call off thisVec.Push(sfs.ReplicateChunkArgs{chunk.chunkID, chunklist}) } } cnt := thisVec.Len() thisSlice := make([]sfs.ReplicateChunkArgs, cnt) for i := 0; i < cnt; i++ { thisSlice[i] = thisVec.Pop().(sfs.ReplicateChunkArgs) //horribly inefficient but what can you do... } return thisSlice }
func (self *Task) Signal(sig os.Signal) { log.Printf("Signaling: %s", self.Config.ShortName) for _, proc := range self.Running { log.Printf("Signaled: %d - %s", proc.Process.Pid, sig) proc.Process.Signal(sig) } }
func GetMember(email, password string) (Member, error) { log.Printf("Get member '%s' ('%s')", email, password) db, err := GetDBConnection() if err == nil { defer db.Close() pwd := sha256.Sum256([]byte(password)) log.Printf("Encrypted password: %s", hex.EncodeToString(pwd[:])) row := db.QueryRow(`SELECT id, email, first_name FROM Member WHERE email = $1 AND password = $2`, email, hex.EncodeToString(pwd[:]), ) result := Member{} err = row.Scan(&result.id, &result.email, &result.firstName) log.Printf("Err: %v", err) if err == nil { return result, nil } else { return result, errors.New("Unable to find Member with email: " + email) } } else { return Member{}, errors.New("Unable to get database connection") } }
// parser() reads all incoming messages from the consumer, and parses them into // influxdb metric points. func (k *Kafka) parser() { for { select { case <-k.done: return case err := <-k.errs: log.Printf("Kafka Consumer Error: %s\n", err.Error()) case msg := <-k.in: points, err := jsonToPoints(msg.Value) if err != nil { log.Printf("Could not parse kafka message: %s, error: %s", string(msg.Value), err.Error()) } for _, point := range points { select { case k.pointChan <- point: continue default: log.Printf("Kafka Consumer buffer is full, dropping a point." + " You may want to increase the point_buffer setting") } } if !k.doNotCommitMsgs { // TODO(cam) this locking can be removed if this PR gets merged: // https://github.com/wvanbergen/kafka/pull/84 k.Lock() k.Consumer.CommitUpto(msg) k.Unlock() } } } }
// // Make sure everything is ok to proceed. // 1. Ensure that data directory is present and writable. // func ensureEnvironment(conf *Config) { log.Printf("Checking Environment...\n") ensureDataDir(conf) log.Printf("done.\n") }
func (self *FlumeClientPool) Destroy() { self.mutex.Lock() self.running = false self.mutex.Unlock() for i := 0; i < 3; { time.Sleep(5 * time.Second) if self.ActivePoolSize() <= 0 { break } log.Printf("flume client pool closing : activepool:%d\n", self.ActivePoolSize()) i++ } self.mutex.Lock() defer self.mutex.Unlock() //关闭掉空闲的client for e := self.idlePool.Front(); e != nil; e = e.Next() { fclient := e.Value.(*IdleClient) fclient.flumeclient.Destroy() self.idlePool.Remove(e) fclient = nil } //关闭掉已经 for e := self.checkOutPool.Front(); e != nil; e = e.Next() { fclient := e.Value.(*client.FlumeClient) fclient.Destroy() self.checkOutPool.Remove(e) fclient = nil } log.Printf("FLUME_POOL|DESTORY|%s", self.GetHostPort()) }
func init() { // find sqlite3 command sqlite3, err := exec.LookPath("sqlite3") if err != nil { log.Fatalf("error finding sqlite3 in system: %#v", err.Error()) } log.Printf("sqlite3 path: %#v", sqlite3) // open the schema file file, err := os.Open("_test/schema.sqlite3.sql") if err != nil { log.Fatalf("error opening test schema: %#v", err.Error()) } // initialize test database with sql file cmd := exec.Command(sqlite3, dbpath) var outstd bytes.Buffer var outerr bytes.Buffer cmd.Stdin = file cmd.Stdout = &outstd cmd.Stderr = &outerr if err := cmd.Run(); err != nil { log.Printf("output: %#v", outstd.String()) log.Printf("error: %#v", outerr.String()) log.Fatalf("Failed to run sqlite command") } }
func (m *Master) MapChunkToFile(args *sfs.MapChunkToFileArgs, ret *sfs.MapChunkToFileReturn) os.Error { file, ok, error := QueryFile(args.Name) if !ok { log.Printf("master: MapChunkToFile: File %s does not exist\n", args.Name) return error } log.Printf("master: MapChunkToFile: ChunkID: %d Offset: %d nservers: %d Hash: %x\n", args.Chunk.ChunkID, args.Offset, len(args.Chunk.Servers), args.Chunk.Hash) thisChunk, ok := chunks[args.Chunk.ChunkID] if !ok { thisChunk = new(chunk) thisChunk.chunkID = args.Chunk.ChunkID thisChunk.size = args.Chunk.Size thisChunk.servers = new(vector.Vector) for i := 0; i < len(args.Chunk.Servers); i++ { thisChunk.AssociateServer(addrToServerMap[args.Chunk.Servers[i].String()]) } thisChunk.hash = args.Chunk.Hash } _, err := file.MapChunk(args.Offset, thisChunk) if err != nil { return os.NewError("Could not add chunk! Ruh roh") } return nil }
func (ec2 *EC2) query(params map[string]string, resp interface{}) error { params["Version"] = "2013-02-01" params["Timestamp"] = timeNow().In(time.UTC).Format(time.RFC3339) endpoint, err := url.Parse(ec2.Region.EC2Endpoint) if err != nil { return err } if endpoint.Path == "" { endpoint.Path = "/" } sign(ec2.Auth, "GET", endpoint.Path, params, endpoint.Host) endpoint.RawQuery = multimap(params).Encode() if debug { log.Printf("get { %v } -> {\n", endpoint.String()) } r, err := http.Get(endpoint.String()) if err != nil { return err } defer r.Body.Close() if debug { dump, _ := httputil.DumpResponse(r, true) log.Printf("response:\n") log.Printf("%v\n}\n", string(dump)) } if r.StatusCode != 200 { return buildError(r) } err = xml.NewDecoder(r.Body).Decode(resp) return err }
func (f *face) subface(r rune) (*subface, rune) { // Fall back on U+FFFD if we can't find r. for _, rr := range [2]rune{r, '\ufffd'} { // We have to do linear, not binary search. plan9port's // lucsans/unicode.8.font says: // 0x2591 0x2593 ../luc/Altshades.7.0 // 0x2500 0x25ee ../luc/FormBlock.7.0 // and the rune ranges overlap. for i := range f.runeRanges { x := &f.runeRanges[i] if rr < x.lo || x.hi < rr || x.bad { continue } if x.subface == nil { data, err := f.readFile(x.relFilename) if err != nil { log.Printf("plan9font: couldn't read subfont %q: %v", x.relFilename, err) x.bad = true continue } sub, err := ParseSubfont(data, x.lo-x.offset) if err != nil { log.Printf("plan9font: couldn't parse subfont %q: %v", x.relFilename, err) x.bad = true continue } x.subface = sub.(*subface) } return x.subface, rr } } return nil, 0 }
// Walker visits every file inside path, recursing into subdirectories // and sending all filenames it encounters on "in" func walker(path string, in chan string) { if *debug { log.Printf("examining %s", path) } // When encountering a symlink to a directory Lstat will return false for IsDir, but Stat will // return true. lfi, err := os.Lstat(path) if err != nil { log.Printf("%v", err) return } if lfi.IsDir() { dir, err := ioutil.ReadDir(path) if err != nil { log.Printf("%v", err) return } for _, v := range dir { walker(path+"/"+v.Name(), in) } return } fi, err := os.Stat(path) if err != nil { log.Printf("%v", err) return } if fi.IsDir() { return } in <- path }
func (bs *BusServer) OnRequestServiceByType(serviceType string, adviceServer *ServerConfig) (err error) { log.Printf("OnRequestServiceByType:serviceType(%+v)\n", serviceType) s, ok := bs.priorServiceByType[serviceType] if s == nil || !ok { return errorUnsupportServiceType } for s != nil && ok { if checkIfServiceOK(s) { break } err = bs.deleteServer(s) if err != nil { return } s, ok = bs.priorServiceByType[serviceType] } if s == nil || !ok { return errorUnsupportServiceType } *adviceServer = s.ServerConfig log.Printf("OnRequestServiceByType:adviceServer(%+v)\n", *adviceServer) return nil }
// validateToken returns true if token is valid func validateToken(p Provider, access_token string, header http.Header) bool { if access_token == "" || p.Data().ValidateUrl == nil { return false } endpoint := p.Data().ValidateUrl.String() if len(header) == 0 { params := url.Values{"access_token": {access_token}} endpoint = endpoint + "?" + params.Encode() } resp, err := api.RequestUnparsedResponse(endpoint, header) if err != nil { log.Printf("GET %s", endpoint) log.Printf("token validation request failed: %s", err) return false } body, _ := ioutil.ReadAll(resp.Body) resp.Body.Close() log.Printf("%d GET %s %s", resp.StatusCode, endpoint, body) if resp.StatusCode == 200 { return true } log.Printf("token validation request failed: status %d - %s", resp.StatusCode, body) return false }
func (self *Visor) CreateGenesisBlockInit() (SignedBlock, error) { self.GenesisPreconditions() if len(self.Blockchain.Blocks) != 0 || len(self.blockSigs.Sigs) != 0 { log.Panic("Blockchain already has genesis") } if self.Config.BlockchainPubkey != cipher.PubKeyFromSecKey(self.Config.BlockchainSeckey) { log.Panicf("Cannot create genesis block. Invalid secret key for pubkey") } gb := self.Blockchain.CreateGenesisBlock(self.Config.GenesisAddress, self.Config.GenesisTimestamp, self.Config.GenesisCoinVolume) sb := self.SignBlock(gb) if err := self.verifySignedBlock(&sb); err != nil { log.Panic("Signed a fresh genesis block, but its invalid: %v", err) } self.blockSigs.record(&sb) log.Printf("New Genesis:") log.Printf("genesis_time= %v", sb.Block.Head.Time) log.Printf("genesis_address= %v", self.Config.GenesisAddress.String()) log.Printf("genesis_signature= %v", sb.Sig.Hex()) return sb, nil }
func (s *serfDiscovery) Start() error { conn, err := net.ListenPacket("udp4", "0.0.0.0:1024") if err != nil { return err } s.pconn = ipv4.NewPacketConn(conn) if err := s.pconn.JoinGroup(s.iface, &net.UDPAddr{IP: s.group}); err != nil { conn.Close() return err } if err := s.pconn.SetControlMessage(ipv4.FlagDst, true); err != nil { conn.Close() return err } go func() { <-s.stop conn.Close() }() go func() { b := make([]byte, 1500) for { _, cm, src, err := s.pconn.ReadFrom(b) if err != nil { if strings.Contains(err.Error(), "closed network connection") { log.Printf("Closed connection, stopping discovery listener...") return } log.Printf("Failed to read packet: %s", err) continue } if cm.Dst.IsMulticast() { if cm.Dst.Equal(s.group) { sip, _, err := net.SplitHostPort(src.String()) if err != nil { log.Printf("Multicast src '%s' has unexpected format: %s", src, err) } if sip == s.self.String() { continue } err = s.serf.Join(sip) if err != nil { log.Printf("Failed to join serf gossip at '%s': %s ", sip, err) } } else { continue } } } }() return nil }
// submitCachedState periodically samples the cached state, sends it to Flapjack. func submitCachedState(states map[string]State, config Config) { transport, err := flapjack.Dial(config.Server, config.Database) if err != nil { fmt.Printf("Error: %s\n", err) os.Exit(1) } for { log.Printf("Number of cached states: %d\n", len(states)) for id, state := range states { now := time.Now().Unix() event := flapjack.Event{ Entity: state.Entity, Check: state.Check, Type: state.Type, State: state.State, Summary: state.Summary, Time: now, } // Stale state sends UNKNOWNs elapsed := now - state.Time if state.TTL >= 0 && elapsed > state.TTL { log.Printf("State for %s is stale. Sending UNKNOWN.\n", id) event.State = "UNKNOWN" event.Summary = fmt.Sprintf("httpbroker: Cached state is stale (%ds old, should be < %ds)", elapsed, state.TTL) } if config.Debug { log.Printf("Sending event data for %s\n", id) } transport.Send(event) } time.Sleep(config.Interval) } }
// addCommit adds the commit with the named hash to the dashboard. // key is the secret key for authentication to the dashboard. // It avoids duplicate effort. func addCommit(hash, key string) bool { l := logByHash[hash] if l == nil { return false } if l.added { return true } // Check for already added, perhaps in an earlier run. if dashboardCommit(hash) { log.Printf("%s already on dashboard\n", hash) // Record that this hash is on the dashboard, // as must be all its parents. for l != nil { l.added = true l = logByHash[l.Parent] } return true } // Create parent first, to maintain some semblance of order. if !addCommit(l.Parent, key) { return false } // Create commit. if err := postCommit(key, l); err != nil { log.Printf("failed to add %s to dashboard: %v", key, err) return false } return true }
func main() { var filename string var v bool flag.StringVar(&filename, "cfg", "", "path to config file") flag.BoolVar(&v, "v", false, "show version") flag.Parse() if v { fmt.Println(version) return } cfg, err := config.Load(filename) if err != nil { log.Fatalf("[FATAL] %s. %s", version, err) } log.Printf("[INFO] Runtime config\n" + toJSON(cfg)) log.Printf("[INFO] Version %s starting", version) log.Printf("[INFO] Go runtime is %s", runtime.Version()) initRuntime(cfg) initMetrics(cfg) initBackend(cfg) go watchBackend() startAdmin(cfg) startListeners(cfg.Listen, cfg.Proxy.ShutdownWait, newProxy(cfg)) registry.Default.Deregister() }
func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) // Load project service log.Printf("[DEBUG] Loading project service: %s", config.Project) project, err := config.clientCompute.Projects.Get(config.Project).Do() if err != nil { return fmt.Errorf("Error loading project '%s': %s", config.Project, err) } md := project.CommonInstanceMetadata // Remove all items md.Items = nil op, err := config.clientCompute.Projects.SetCommonInstanceMetadata(config.Project, md).Do() log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata") if err != nil { return err } return resourceComputeProjectMetadataRead(d, meta) }
func init() { hooks = hook.Hooks{} flag.Parse() log.SetPrefix("[webhook] ") log.SetFlags(log.Ldate | log.Ltime) if !*verbose { log.SetOutput(ioutil.Discard) } log.Println("version " + version + " starting") // load and parse hooks log.Printf("attempting to load hooks from %s\n", *hooksFilePath) err := hooks.LoadFromFile(*hooksFilePath) if err != nil { if !*verbose && !*noPanic { log.SetOutput(os.Stdout) log.Fatalf("couldn't load any hooks from file! %+v\naborting webhook execution since the -verbose flag is set to false.\nIf, for some reason, you want webhook to start without the hooks, either use -verbose flag, or -nopanic", err) } log.Printf("couldn't load hooks from file! %+v\n", err) } else { log.Printf("loaded %d hook(s) from file\n", len(hooks)) for _, hook := range hooks { log.Printf("\t> %s\n", hook.ID) } } }
// It's annoying asking them to set lots of things. So let's try to figure it out. func guessgoarch() { config.Arch = os.Getenv("GOARCH") if config.Arch != "" { config.Arch = path.Clean(config.Arch) return } log.Printf("GOARCH is not set, trying to guess") u, err := uroot.Uname() if err != nil { log.Printf("uname failed, using default amd64") config.Arch = "amd64" } else { switch { case u.Machine == "i686" || u.Machine == "i386" || u.Machine == "x86": config.Arch = "386" case u.Machine == "x86_64" || u.Machine == "amd64": config.Arch = "amd64" case u.Machine == "armv7l" || u.Machine == "armv6l": config.Arch = "arm" case u.Machine == "ppc" || u.Machine == "ppc64": config.Arch = "ppc64" default: log.Printf("Unrecognized arch") config.Fail = true } } }