func (hp *HttpProtocol) transData(server string, transData *TransData) (err error) { /* compose req */ uri := hp.regTopic.ReplaceAllString(hp.uri, transData.topic) uri = hp.regMethod.ReplaceAllString(uri, transData.method) uri = hp.regPartition.ReplaceAllString(uri, strconv.FormatInt(int64(transData.partition), 10)) uri = hp.regTransid.ReplaceAllString(uri, strconv.FormatInt(transData.transid, 10)) url := fmt.Sprintf("http://%s%s", server, uri) req, err := http.NewRequest("POST", url, bytes.NewReader(transData.data)) if err != nil { logger.Warning("module [%s]: fail to transData: url=%s, topic=%s, partition=%d, transid=%d, method=%s, err=%s", hp.moduleName, url, transData.topic, transData.partition, transData.transid, transData.method, err.Error()) return err } /* add header */ req.Header = hp.header req.Host = hp.reqHeaderHost /* Post */ res, err := hp.client.Do(req) if err != nil { logger.Warning("module [%s]: fail to transData: url=%s, topic=%s, partition=%d, transid=%d, method=%s, err=%s", hp.moduleName, url, transData.topic, transData.partition, transData.transid, transData.method, err.Error()) return err } defer res.Body.Close() /* check res: 200 不重试;其他,重试 */ if res.StatusCode == http.StatusOK { logger.Notice("module [%s]: success transData: url=%s, topic=%s, partition=%d, transid=%d, method=%s, datalen=%d, http_status_code=%d", hp.moduleName, url, transData.topic, transData.partition, transData.transid, transData.method, len(transData.data), res.StatusCode) return nil } else { logger.Warning("module [%s]: fail to transData: url=%s, topic=%s, partition=%d, transid=%d, method=%s, http_status_code=%d", hp.moduleName, url, transData.topic, transData.partition, transData.transid, transData.method, res.StatusCode) return errors.New("fail to trans") } return nil }
func (z *Zookeeper) updateBrokerListAndWatch() { for { children, _, eventChan, err := z.zkConn.ChildrenW(fmt.Sprintf("%s/brokers/ids", z.cc.chroot)) if err != nil { err = errors.New(fmt.Sprintf("fail to zk.ChildrenW: %s", err.Error())) logger.Fatal(err.Error()) z.fatalErrorChan <- err return } if len(children) == 0 { logger.Warning("no broker found") } else { logger.Debug("using child: %s", children[0]) val, _, err := z.zkConn.Get(fmt.Sprintf("%s/brokers/ids/%s", z.cc.chroot, children[0])) var brokerIdVal BrokerIdVal err = json.Unmarshal(val, &brokerIdVal) if err != nil { logger.Warning("fail to json.Unmarshal for broker id %s: %s", children[0], err.Error()) } logger.Debug("broker id %s: host [%s] port [%d]", children[0], brokerIdVal.Host, brokerIdVal.Port) z.brokerIdValChan <- &brokerIdVal } <-eventChan } }
func (h *GetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { startTime := time.Now() qv := r.URL.Query() id, partition, offset, err := h.bs.km.parseKey(qv.Get("key")) if err != nil { logger.Warning("fail to parseKey: %s, %s", r.URL.String(), err.Error()) w.WriteHeader(http.StatusBadRequest) return } val, err := h.getData(id, partition, offset) if err != nil { logger.Warning("fail to getData: %s, %s", r.URL.String(), err.Error()) w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) _, err = w.Write(val) if err != nil { logger.Warning("fail to write response: %s, %s", r.URL.String(), err.Error()) w.WriteHeader(http.StatusInternalServerError) return } endTime := time.Now() costTimeUS := endTime.Sub(startTime) / time.Microsecond logger.Notice("success process get: %s, cost_us=%d, datalen=%d", r.URL.String(), costTimeUS, len(val)) return }
func (h *StoreHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { startTime := time.Now() /* check query */ if r.ContentLength <= 0 { logger.Warning("invalid query, need post data: %s", r.URL.String()) w.WriteHeader(http.StatusBadRequest) return } // qv := r.URL.Query() sr := h.getStoreReq() defer h.putStoreReq(sr) nr, err := sr.reqBuffer.ReadFrom(r.Body) if int64(nr) != r.ContentLength || err != nil { logger.Warning("fail to read body: %s, %s", r.URL.String(), err.Error()) w.WriteHeader(http.StatusInternalServerError) return } err = h.parseReq(sr) if err != nil { logger.Warning("invalid query, parseReq failed : %s, %s", r.URL.String(), err.Error()) w.WriteHeader(http.StatusBadRequest) return } err = h.bs.store.addNewData(sr) if err != nil { logger.Warning("fail to write response: %s, %s", r.URL.String(), err.Error()) w.WriteHeader(http.StatusInternalServerError) return } w.WriteHeader(http.StatusOK) endTime := time.Now() costTimeUS := endTime.Sub(startTime) / time.Microsecond logger.Notice("success process store: %s, id=%d, cost_us=%d", r.URL.String(), sr.id, costTimeUS) return }
func (b *Broker) updateBrokerOffset() { if b.client == nil || b.client.Closed() { logger.Notice("broker.client not ready yet") return } topics, err := b.client.Topics() if err != nil { logger.Warning("fail to b.client.Topics(): %s", err.Error()) return } for _, topic := range topics { partitions, err := b.client.Partitions(topic) if err != nil { logger.Warning("fail to b.client.Partitions(): %s", err.Error()) continue } for _, partition := range partitions { offset, err := b.client.GetOffset(topic, partition, sarama.OffsetNewest) if err != nil { logger.Warning("fail to b.client.GetOffset topic=%s, partition=%d, err=%s", topic, partition, err.Error()) continue } // TODO: use pool b.brokerOffsetChan <- &BrokerOffset{ topic: topic, partition: partition, offset: offset, } logger.Debug("offset: topic=%s, partition=%d, offset=%d", topic, partition, offset) } } }
func (ch *CmHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { startTime := time.Now() /* check query */ if r.ContentLength <= 0 { logger.Warning("invalid query, need post data: %s", r.URL.String()) w.WriteHeader(http.StatusBadRequest) return } qv := r.URL.Query() post := make([]byte, r.ContentLength) nr, err := io.ReadFull(r.Body, post) if int64(nr) != r.ContentLength { logger.Warning("fail to read body: %s, %s", r.URL.String(), err.Error()) w.WriteHeader(http.StatusInternalServerError) return } /* compose CmData */ cmData := ch.cdp.fetch() defer ch.cdp.put(cmData) switch qv.Get("acks") { case "0": cmData.requiredAcks = sarama.NoResponse case "1": cmData.requiredAcks = sarama.WaitForLocal case "-1": cmData.requiredAcks = sarama.WaitForAll default: cmData.requiredAcks = sarama.WaitForLocal } cmData.topic = qv.Get("topic") cmData.key = qv.Get("key") cmData.data = post /* commit */ ch.kp.producer.produce(cmData) /* wait res */ <-cmData.cmDoneChan if cmData.err != nil { logger.Warning("fail to commit req: %s, error: %s", r.URL.String(), (*(cmData.err)).Error()) w.WriteHeader(http.StatusInternalServerError) return } endTime := time.Now() costTimeUS := endTime.Sub(startTime) / time.Microsecond // TODO logger.Notice("success process commit: %s, cost_us=%d, datalen=%d, offset=%d, partition=%d", r.URL.String(), costTimeUS, nr, cmData.offset, cmData.partition) w.WriteHeader(http.StatusOK) return }
func (c *Cluster) computeTransDelay(transDelayChan chan TransDelay) { resMap := make(map[string]map[string]int64) for cg, tm := range c.cs.transfer.transferOffset { for topic, pm := range tm { dpm, ok := resMap[cg] if !ok { dpm = make(map[string]int64) resMap[cg] = dpm } for partition, tor := range pm { bor, ok := c.cs.broker.brokerOffset[topic][partition] if !ok { logger.Warning("no broker offset record found: topic=%s, partition=%d", topic, partition) dpm[strconv.FormatInt(int64(partition), 10)] = 0 continue } bo := c.computeBrokerOffsetAtTime(tor.last.timestamp, bor) delay := bo - tor.last.offset if delay < 0 { delay = 0 } dpm[strconv.FormatInt(int64(partition), 10)] = delay } } } transDelayChan <- TransDelay(resMap) }
/* do not change state in this func */ func (td *TransDi) processAckData(ackData *AckData) { transid := ackData.transid workerId := ackData.workerId logger.Debug("process ack: transid=%d, workerId=%d", transid, workerId) /* narrow window */ msg := td.transWindow.ackOne(transid) if msg != nil && msg.Offset > td.ackedMinTransid { td.ackedMinTransid = msg.Offset err := td.cg.CommitUpto(msg) if err != nil { // TODO: optimized logger.Warning("fail to consumergroup.CommitUpto(): %s", err.Error()) td.fatalErrorChan <- err return } logger.Debug("consumergroup.CommitUpTo %d", msg.Offset) } logger.Debug("transWindow size: %d", td.transWindow.window.Len()) /* move worker */ worker := td.transWorkers[workerId] worker.inWork = false err := worker.workIfNeed() if err != nil { logger.Fatal("fail to let worker to work: %s", err.Error()) td.fatalErrorChan <- err return } }
func (z *Zookeeper) updateCgAndWatch(cg string) { path := fmt.Sprintf("%s/consumers/%s/offsets", z.cc.chroot, cg) for { children, _, eventChan, err := z.zkConn.ChildrenW(path) if err == zk.ErrNoNode { _, _, eventChan, err = z.zkConn.ExistsW(path) if err != nil { err = errors.New(fmt.Sprintf("fail to zk.ExistsW: path=%s, %s", path, err.Error())) logger.Fatal(err.Error()) z.fatalErrorChan <- err return } <-eventChan continue } if err != nil { err = errors.New(fmt.Sprintf("fail to zk.ChildrenW: path=%s, %s", path, err.Error())) logger.Fatal(err.Error()) z.fatalErrorChan <- err return } if len(children) == 0 { logger.Warning("no transfer topic found: path=%s", path) } else { for _, topic := range children { go z.updateTopicAndWatch(cg, topic) } } event := <-eventChan // let root watcher to re-init if event.Type == zk.EventNotWatching { return } } }
func (d *Deleter) Run() (err error) { err = d.httpServer.ListenAndServe() if err != nil { logger.Warning("fail to httpServer.ListenAndServe: %s", err.Error()) return } logger.Notice("httpServer.ListenAndServe end") return nil }
func (pmp *PMsgPool) fetch() *PMsgPoolEle { if pmp.poolFreeSize <= 0 { logger.Warning("Producer Message Pool is too small") return pmp.newPMsgPoolEle() } r := pmp.root.r.Unlink(1) pmp.poolFreeSize-- return r.Value.(*PMsgPoolEle) }
func (cdp *CmDataPool) fetch() *CmData { var cd *CmData select { case cd = <-cdp.pool: default: logger.Warning("cm_data_pool is not big enough") cd = cdp.newCmData() } return cd }
func (zk *ZK) run() { for { err := zk.updateOffsets() if err != nil { logger.Warning("fail to zk.updateOffsets: %s", err.Error()) time.Sleep(zk.config.zkFailRetryInterval) } time.Sleep(zk.config.zkUpdateInterval) } }
func (worker *TransWorker) initProtocol() (err error) { name, ok := worker.protocolConfig["name"] if !ok { err = errors.New(fmt.Sprintf("protocol.name not found in module conf file, module [%s]", worker.moduleName)) logger.Warning("%s", err.Error()) return } worker.protocolName = name.(string) switch worker.protocolName { case "http": err = worker.initHttpProtocol() default: err = errors.New(fmt.Sprintf("unknown protocol: %s", worker.protocolName)) } if err != nil { logger.Warning("fail to init protocol for module [%s]: %s", worker.moduleName, err.Error()) return } return nil }
func (httpServer *HttpServer) handleCm(w http.ResponseWriter, r *http.Request) { startTime := time.Now() /* check query */ if r.ContentLength <= 0 { logger.Warning("invalid query, need post data: %s", r.URL.String()) w.WriteHeader(http.StatusBadRequest) return } qv := r.URL.Query() post := make([]byte, r.ContentLength) nr, err := io.ReadFull(r.Body, post) if int64(nr) != r.ContentLength { logger.Warning("fail to read body: %s, %s", r.URL.String(), err.Error()) w.WriteHeader(http.StatusInternalServerError) return } /* compose CmData */ cmData := httpServer.cdp.fetch() defer httpServer.cdp.put(cmData) cmData.topic = qv.Get("topic") cmData.key = qv.Get("key") cmData.data = post /* commit */ httpServer.cmDataChan <- cmData /* wait res */ <-cmData.cmDoneChan if cmData.err != nil { logger.Warning("fail to commit req: %s, error: %s", r.URL.String(), (*(cmData.err)).Error()) w.WriteHeader(http.StatusInternalServerError) return } endTime := time.Now() costTimeUS := endTime.Sub(startTime) / time.Microsecond // TODO logger.Notice("success process commit: %s, cost_us=%d, datalen=%d, offset=%d, partition=%d", r.URL.String(), costTimeUS, nr, cmData.offset, cmData.partition) w.WriteHeader(http.StatusOK) return }
func (hp *HttpProtocol) initRegs() (err error) { hp.regTopic, err = regexp.Compile("{#TOPIC}") if err != nil { logger.Warning("fail to regexp.Compile: %s", err.Error()) return } hp.regMethod, err = regexp.Compile("{#METHOD}") if err != nil { logger.Warning("fail to regexp.Compile: %s", err.Error()) return } hp.regPartition, err = regexp.Compile("{#PARTITION}") if err != nil { logger.Warning("fail to regexp.Compile: %s", err.Error()) return } hp.regTransid, err = regexp.Compile("{#TRANSID}") if err != nil { logger.Warning("fail to regexp.Compile: %s", err.Error()) return } return nil }
func (d *Deleter) response(w http.ResponseWriter, statusCode int, errMsg string) { var buf bytes.Buffer buf.Reset() res := map[string]interface{}{ "error": map[string]interface{}{ "errno": statusCode, /* some msgpack lib cannot support new encoding-spec, so let len(string) < 31 */ // "errmsg": "success process: /del?topic=ap_", "errmsg": "see errno please", }, } wr := msgp.NewWriter(&buf) err := wr.WriteIntf(res) if err != nil { logger.Warning("fail to msgp.wr.WriteIntf: %s", err.Error()) w.WriteHeader(http.StatusInternalServerError) return } wr.Flush() w.WriteHeader(statusCode) _, err = w.Write(buf.Bytes()) if err != nil { logger.Warning("fail to http.ResponseWriter.Write(): %s", err.Error()) return } // /* unpack test */ // b := bytes.NewReader(buf.Bytes()) // b.Seek(0, 0) // msgr := msgp.NewReader(b) // reqi, err := msgr.ReadIntf() // if err != nil { // msg := fmt.Sprintf("fail to decode post data: %s", err.Error()) // logger.Warning("%s", msg) // return // } // logger.Warning("unpack: %v", reqi) }
func (z *Zookeeper) updatePartitionAndWatch(cg, topic string, partition int32) { path := fmt.Sprintf("%s/consumers/%s/offsets/%s/%d", z.cc.chroot, cg, topic, partition) for { val, _, eventChan, err := z.zkConn.GetW(path) if err == zk.ErrNoNode { _, _, eventChan, err = z.zkConn.ExistsW(path) if err != nil { err = errors.New(fmt.Sprintf("fail to zk.ExistsW: path=%s, %s", path, err.Error())) logger.Fatal(err.Error()) z.fatalErrorChan <- err return } <-eventChan continue } if err != nil { err = errors.New(fmt.Sprintf("fail to zk.GetW: path=%s, %s", path, err.Error())) logger.Fatal(err.Error()) z.fatalErrorChan <- err return } if len(val) == 0 { logger.Warning("transfer partition val is empty: path=%s", path) } else { offset, err := strconv.ParseInt(string(val), 10, 64) if err != nil { err = errors.New(fmt.Sprintf("fail to transfer: strconv.ParseInt: %s", err.Error())) logger.Fatal(err.Error()) z.fatalErrorChan <- err return } z.transferOffsetChan <- &TransferOffset{ cg: cg, topic: topic, partition: partition, offset: offset, } logger.Debug("see transfer: cg=%s, topic=%s, partition=%d, offset=%d", cg, topic, partition, offset) } event := <-eventChan // let root watcher to re-init if event.Type == zk.EventNotWatching { return } } }
func (z *Zookeeper) updateTransferAndWatch() { path := fmt.Sprintf("%s/consumers", z.cc.chroot) for { children, _, eventChan, err := z.zkConn.ChildrenW(path) if err != nil { err = errors.New(fmt.Sprintf("fail to zk.ChildrenW: path=%s, %s", path, err.Error())) logger.Fatal(err.Error()) z.fatalErrorChan <- err return } if len(children) == 0 { logger.Warning("no transfer cg found: path=%s", path) } else { for _, cg := range children { go z.updateCgAndWatch(cg) } } <-eventChan } }
func (m *Module) initConfig() (err error) { content, err := ioutil.ReadFile(fmt.Sprintf("%s/%s.yaml", m.moduleConfDir, m.name)) if err != nil { logger.Warning("fail to read module config: %s", err.Error()) return } mc := make(map[interface{}]interface{}) err = yaml.Unmarshal(content, &mc) if err != nil { return } m.moduleConfig = mc /* topic conf */ topic, ok := mc["topic"] if !ok { err = errors.New(fmt.Sprintf("topic not found in module conf file, module [%s]", m.name)) logger.Warning("%s", err.Error()) return } m.topic = topic.(string) /* method conf */ m.methodEnabled = make(map[string]bool) methods, ok := mc["methods"] if ok { for _, method := range methods.([]interface{}) { m.methodEnabled[method.(string)] = true } } /* protocol conf */ protocol, ok := mc["protocol"] if !ok { err = errors.New(fmt.Sprintf("protocol not found in module conf file, module [%s]", m.name)) logger.Warning("%s", err.Error()) return } m.protocolConfig = protocol.(map[interface{}]interface{}) /* worker conf */ workerNum, ok := mc["worker_num"] if !ok { err = errors.New(fmt.Sprintf("worker_num not found in module conf file, module [%s]", m.name)) logger.Warning("%s", err.Error()) return } m.workerNum = uint32(workerNum.(int)) maxRetryTimes, ok := mc["max_retry_times"] if !ok { err = errors.New(fmt.Sprintf("max_retry_times not found in module conf file, module [%s]", m.name)) logger.Warning("%s", err.Error()) return } m.maxRetryTimes = maxRetryTimes.(int) failRetryInterval, ok := mc["fail_retry_interval_ms"] if !ok { err = errors.New(fmt.Sprintf("fail_retry_interval_ms not in module conf file, module [%s]", m.name)) logger.Warning("%s", err.Error()) return } m.failRetryInterval = time.Duration(failRetryInterval.(int)) * time.Millisecond sbykey, ok := mc["serialize_by_key"] if ok { m.serializeByKey = sbykey.(bool) } else { m.serializeByKey = true } /* window size */ windowSize, ok := mc["window_size"] if !ok { err = errors.New(fmt.Sprintf("window_size not found in module conf file, module [%s]", m.name)) logger.Warning("%s", err.Error()) return } m.windowSize = windowSize.(int) /* backend servers */ backendServers, ok := mc["backend_servers"] if !ok { err = errors.New(fmt.Sprintf("backend_servers not found in module conf file, module [%s]", m.name)) logger.Warning("%s", err.Error()) return } for _, server := range backendServers.([]interface{}) { m.backendServers = append(m.backendServers, server.(string)) } /* consumer config */ ti, ok := mc["expected_processing_time_ms"] if ok { m.expectedProcessingTime = time.Duration(ti.(int)) * time.Millisecond } ti, ok = mc["zk_offset_update_interval_sec"] if ok { m.zkOffsetUpdateInterval = time.Duration(ti.(int)) * time.Second } qs, ok := mc["waiting_queue_size"] if ok { m.waitingQueueSize = qs.(int) } return nil }
func (mc *MqClient) addKeys(keys []interface{}, doneServers map[string]interface{}, delTime, nextDelay int64, firstDel bool) (err error) { /* pack data */ data := map[string]interface{}{ gDelKeys: keys, gDelDelTime: delTime, gDelCurDelay: nextDelay, gDelFromMq: true, gFirstDel: firstDel, "method": fmt.Sprintf("%s%d", gDelMethodPrefix, nextDelay), // ktransfer need it } if len(doneServers) > 0 { data[gDoneServers] = doneServers } var buf bytes.Buffer wr := msgp.NewWriter(&buf) err = wr.WriteIntf(data) if err != nil { logger.Warning("fail to msgp.WriteIntf: %s", err.Error()) return } wr.Flush() r := bytes.NewReader(buf.Bytes()) /* write mq */ var mqServerAddrs []string if firstDel && len(mc.idcMqServerAddrs) > 0 { mqServerAddrs = mc.idcMqServerAddrs } else { mqServerAddrs = mc.delMqServerAddrs } toc := make(chan bool, 1) go mc.checkMqTimeout(toc) ns := len(mqServerAddrs) mc.lastServerId = (mc.lastServerId + 1) % ns i := 0 for { r.Seek(0, 0) url := fmt.Sprintf("http://%s?topic=%s&method=%s%d", mqServerAddrs[mc.lastServerId], mc.clusterName, gDelMethodPrefix, nextDelay) req, err1 := http.NewRequest("POST", url, r) if err1 != nil { logger.Warning("fail to http.NewRequest: %s, %s", url, err1.Error()) err = err1 return } rsp, err1 := mc.client.Do(req) if err1 != nil { logger.Warning("fail to http.Client.Do: %s, %s", url, err1.Error()) select { case <-toc: err = errors.New("add back to mq timeout") return default: /* do nothing */ } i++ if i == ns { err = errors.New("fail to add back to mq, all servers have been retried") return } mc.lastServerId = (mc.lastServerId + 1) % ns continue } else { rsp.Body.Close() logger.Notice("success add back to mq: %s", url) return nil } } }
func (l FakeLogger) Printf(fmt string, args ...interface{}) { logger.Warning(fmt, args...) }
func (d *Deleter) initCacheClusters() (err error) { content, err := ioutil.ReadFile(d.clusterConfFile) if err != nil { return } m := make(map[interface{}]interface{}) err = yaml.Unmarshal(content, &m) if err != nil { return } d.cacheClusters = make(map[string]*CacheCluster) for name, ccc := range d.cacheClusterConfs { clusterConf, ok := m[name] if !ok { err = errors.New(fmt.Sprintf("cluster [%s] not found in %s", name, d.clusterConfFile)) logger.Warning("%s", err.Error()) return } cluster := clusterConf.(map[interface{}]interface{}) redis, ok := cluster["redis"] isRedis := false if ok && redis.(bool) { isRedis = true } servers, ok := cluster["servers"] if !ok { err = errors.New(fmt.Sprintf("servers not found in cluster [%s], in %s", name, d.clusterConfFile)) logger.Warning("%s", err.Error()) return } if len(servers.([]interface{})) == 0 { err = errors.New(fmt.Sprintf("num of servers is zero in cluster [%s], in %s", name, d.clusterConfFile)) logger.Warning("%s", err.Error()) return } var addrs []string for _, server := range servers.([]interface{}) { sa := strings.Split(server.(string), ":") if sa[0][0] == 'm' || sa[0][0] == 's' { err = errors.New(fmt.Sprintf("servers is not in cache mode, cluster [%s]", name)) logger.Warning("%s", err.Error()) return } addrs = append(addrs, fmt.Sprintf("%s:%s", sa[0], sa[1])) } ccc.isRedis = isRedis ccc.serverAddrs = addrs cc := &CacheCluster{ clusterName: name, ccc: ccc, } err = cc.init() if err != nil { logger.Warning("fail to init CacheCluster for [%s]", name) return } d.cacheClusters[name] = cc } return nil }
func (d *Deleter) doDel(w http.ResponseWriter, r *http.Request) { /* get topic and method */ logger.Debug("get one req: %s", r.URL.String()) qv := r.URL.Query() topic := qv.Get("topic") if len(topic) == 0 { msg := fmt.Sprintf("invalid query, topic cannot be empty: %s", r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } cc, ok := d.cacheClusters[topic] if !ok { msg := fmt.Sprintf("invalid query, cache cluster is not exist: cluster [%s], %s", topic, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } /* get post data */ if r.ContentLength == 0 { logger.Warning("post data cannot be empty") d.response(w, http.StatusBadRequest, "post data cannot be empty") return } data := make([]byte, r.ContentLength) _, err := io.ReadFull(r.Body, data) if err != nil { msg := fmt.Sprintf("fail to read post data: %s", err.Error()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } /* unpack */ buf := bytes.NewReader(data) buf.Seek(0, 0) msgr := msgp.NewReader(buf) reqi, err := msgr.ReadIntf() if err != nil { msg := fmt.Sprintf("fail to decode post data: %s", err.Error()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } req, ok := reqi.(map[string]interface{}) if !ok { msg := fmt.Sprintf("invalid post data: data need map", r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } keysi, ok := req[gDelKeys] if !ok { msg := fmt.Sprintf("invalid post data: %s not exist: %s", gDelKeys, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } keys, ok := keysi.([]interface{}) if !ok { msg := fmt.Sprintf("invalid post data: %s not array: %s", gDelKeys, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } if len(keys) == 0 { msg := fmt.Sprintf("invalid post data: len of %s array is 0: %s", gDelKeys, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } delTimei, ok := req[gDelDelTime] if !ok { msg := fmt.Sprintf("invalid post data: %s not exist: %s", gDelDelTime, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } delTime, ok := delTimei.(int64) if !ok { delTimet, ok := delTimei.(int) if !ok { delTimetu, ok := delTimei.(uint64) if !ok { msg := fmt.Sprintf("invalid post data: %s not int64 or uint64 or int: %s", gDelDelTime, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } else { delTime = int64(delTimetu) } } else { delTime = int64(delTimet) } } var curDelay int64 curDelayi, ok := req[gDelCurDelay] if !ok { curDelay = 0 } else if curDelay, ok = curDelayi.(int64); !ok { curDelayt, ok := curDelayi.(int) if !ok { curDelaytu, ok := curDelayi.(uint64) if !ok { msg := fmt.Sprintf("invalid post data: %s not int64 or int: %s", gDelCurDelay, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } else { curDelay = int64(curDelaytu) } } else { curDelay = int64(curDelayt) } } var isFromMq bool isFromMqi, ok := req[gDelFromMq] if !ok { isFromMq = false } else if isFromMq, ok = isFromMqi.(bool); !ok { msg := fmt.Sprintf("invalid post data: %s not bool: %s", gDelFromMq, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } var doneServers map[string]interface{} doneServersi, ok := req[gDoneServers] if !ok { doneServers = make(map[string]interface{}) } else if doneServers, ok = doneServersi.(map[string]interface{}); !ok { msg := fmt.Sprintf("invalid post data: %s not map[string]interface{}: %s", gDoneServers, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } var firstDel bool firstDeli, ok := req[gFirstDel] if !ok { firstDel = true } else if firstDel, ok = firstDeli.(bool); !ok { msg := fmt.Sprintf("invalid post data: %s not bool: %s", gFirstDel, r.URL.String()) logger.Warning("%s", msg) d.response(w, http.StatusBadRequest, msg) return } /* do del */ err = cc.doDel(keys, delTime, curDelay, doneServers, isFromMq, firstDel) if err != nil { msg := fmt.Sprintf("fail to del: %s", err.Error()) logger.Warning("%s", msg) d.response(w, http.StatusInternalServerError, msg) return } msg := fmt.Sprintf("success process: %s", r.URL.String()) logger.Notice("%s", msg) d.response(w, http.StatusOK, msg) return }
func (hp *HttpProtocol) initConfig() (err error) { /* get uri */ uri, ok := hp.config["uri"] if !ok { err = errors.New(fmt.Sprintf("fail to init HttpProtocol for module [%s]: uri not found", hp.moduleName)) logger.Warning("%s", err.Error()) return } hp.uri = uri.(string) /* get headers */ /* a little trick, see http.Header */ hp.header = http.Header(make(map[string][]string)) headers, ok := hp.config["headers"] if ok { for _, header := range headers.([]interface{}) { for key, val := range header.(map[interface{}]interface{}) { hp.header.Add(key.(string), val.(string)) } } } hh := hp.header.Get("Host") if len(hh) > 0 { hp.reqHeaderHost = hh } /* get timeout, 0 means no timeout */ readTimeO, ok := hp.config["read_timeout_ms"] if !ok { err = errors.New(fmt.Sprintf("fail to init HttpProtocol for module [%s]: read_timeout_ms not found", hp.moduleName)) logger.Warning("%s", err.Error()) return } writeTimeO, ok := hp.config["write_timeout_ms"] if !ok { err = errors.New(fmt.Sprintf("fail to init HttpProtocol for module [%s]: write_timeout_ms not found", hp.moduleName)) logger.Warning("%s", err.Error()) return } connTimeO, ok := hp.config["conn_timeout_ms"] if !ok { err = errors.New(fmt.Sprintf("fail to init HttpProtocol for module [%s]: conn_timeout_ms not found", hp.moduleName)) logger.Warning("%s", err.Error()) return } if readTimeO == 0 || writeTimeO == 0 { hp.processTimeout = 0 } else { hp.processTimeout = time.Duration(readTimeO.(int)+writeTimeO.(int)) * time.Millisecond } /* http client */ hp.client = &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: time.Duration(connTimeO.(int)) * time.Millisecond, KeepAlive: 30 * time.Second, }).Dial, TLSHandshakeTimeout: 10 * time.Second, //ExpectContinueTimeout: 1 * time.Second, }, Timeout: hp.processTimeout, } return nil }
func (td *TransDi) processConsumerMessage(msg *sarama.ConsumerMessage) { /* unpack */ /* pack { 'topic' => string, 'method' => string, 'key' => string, 'data' => []byte, } */ // FIXME: need be optimized here sleep := 100 * time.Millisecond for { var err error method := gDefaultMethod if len(td.methodEnabled) > 0 { // if methods is configured, we should check if this method needed be send buf := bytes.NewReader(msg.Value) buf.Seek(0, 0) msgr := msgp.NewReader(buf) dii, err := msgr.ReadIntf() if err != nil { err = errors.New(fmt.Sprintf("fail to de-msgpack: %s", err.Error())) logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } di, ok := dii.(map[string]interface{}) if !ok { err = errors.New("invalid di: should be map") logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } methodi, ok := di["method"] if !ok { err = errors.New(fmt.Sprintf("invalid di: method not exists")) logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } method, ok = methodi.(string) if !ok { err = errors.New(fmt.Sprintf("invlid di: method is not string")) logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } if _, ok = td.methodEnabled[method]; !ok { // not ours return } } // dispatch td.transWindow.addSlot(msg) var workerId uint32 if msg.Key == nil || len(msg.Key) == 0 || !td.serializeByKey { workerId = uint32(rand.Int31n(int32(td.workerNum))) } else { td.dispatchHasher.Reset() _, err = td.dispatchHasher.Write(msg.Key) if err != nil { err = errors.New(fmt.Sprintf("fail to compute key-hash: %s", err.Error())) logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } hash := td.dispatchHasher.Sum32() workerId = hash % td.workerNum } worker := td.transWorkers[workerId] transData := &TransData{ transid: msg.Offset, topic: msg.Topic, method: method, partition: msg.Partition, data: msg.Value, } err = worker.addTrans(transData) if err != nil { logger.Warning("fail to processConsumerMessage: topic=%s, partition=%d, transid=%d, error: %s", msg.Topic, msg.Partition, msg.Offset, err.Error()) time.Sleep(sleep) continue } td.state = WAIT_ACK_NONBLOCK return } }
func (sl *SaramaLogger) Println(v ...interface{}) { logger.Warning("SARAMA: %s", fmt.Sprintln(v...)) }
func (rcp *RedisCachePool) doDel(keys []interface{}, doneServers map[string]interface{}) (err error) { toc := make(chan bool, 1) go rcp.delAllTimeout(toc) hasError := false needReceiveConns := make(map[string]redis.Conn, len(rcp.redisServerPool)) var firstErr error for addr, pool := range rcp.redisServerPool { if _, ok := doneServers[addr]; ok { continue } conn := pool.Get() if err = conn.Err(); err != nil { hasError = true if firstErr == nil { firstErr = err } logger.Warning("fail to get conn for server: %s, %s", addr, err.Error()) continue } defer conn.Close() err = conn.Send("del", keys...) if err != nil { hasError = true if firstErr == nil { firstErr = err } logger.Warning("fail to conn.Send for server: %s, %s", addr, err.Error()) continue } err = conn.Flush() if err != nil { hasError = true if firstErr == nil { firstErr = err } logger.Warning("fail to conn.Flush for server: %s, %s", addr, err.Error()) continue } needReceiveConns[addr] = conn } select { case <-toc: hasError = true if firstErr == nil { firstErr = err } err = errors.New(fmt.Sprintf("del All timeout for cluster: %s", rcp.clusterName)) goto errout default: /* do nothine */ } // logger.Debug("need receive servers: %d", len(needReceiveConns)) for addr, conn := range needReceiveConns { _, err = conn.Receive() if err != nil { hasError = true if firstErr == nil { firstErr = err } logger.Warning("fail to conn.Receive: %s", err.Error()) continue } else { doneServers[addr] = true } select { case <-toc: hasError = true if firstErr == nil { firstErr = err } err = errors.New(fmt.Sprintf("del All timeout for cluster: %s", rcp.clusterName)) goto errout default: /* do nothine */ } } if hasError { goto errout } return nil errout: err = firstErr logger.Warning("%s", err.Error()) return }
func (h *AddHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { startTime := time.Now() /* check query */ if r.ContentLength <= 0 { logger.Warning("invalid query, need post data: %s", r.URL.String()) w.WriteHeader(http.StatusBadRequest) return } // qv := r.URL.Query() bs := h.bs ad := bs.adp.fetch() defer bs.adp.put(ad) ad.buffer.Reset() nr, err := ad.buffer.ReadFrom(r.Body) if int64(nr) != r.ContentLength || err != nil { logger.Warning("fail to read body: %s, %s", r.URL.String(), err.Error()) w.WriteHeader(http.StatusInternalServerError) return } ad.checksum() ad.Key = "" err = bs.dd.checkDup(ad) id := uint64(0) if err != nil { logger.Warning("fail to dd.checkDup: %s, %s", r.URL.String(), err.Error()) // TODO: continue ? } if len(ad.Key) > 0 { // duplication // done } else { // new data id, err = bs.km.getNewId() if err != nil { logger.Warning("fail to getNewId: %s", err.Error()) w.WriteHeader(http.StatusInternalServerError) return } p, o, err := bs.broker.addNewData(id, ad) if err != nil { logger.Warning("fail to addNewData: %s", err.Error()) w.WriteHeader(http.StatusInternalServerError) return } key, err := bs.km.generateKey(id, p, o) if err != nil { logger.Warning("fail to generateKey: %s", err.Error()) w.WriteHeader(http.StatusInternalServerError) return } ad.Key = key err = bs.dd.insertNew(ad) if err != nil { logger.Warning("fail to dd.insertNew: %s", err.Error()) // only warning here } } // response ok w.WriteHeader(http.StatusOK) _, err = w.Write([]byte(ad.Key)) if err != nil { logger.Warning("fail to write response: %s, %s", r.URL.String(), err.Error()) w.WriteHeader(http.StatusInternalServerError) return } endTime := time.Now() costTimeUS := endTime.Sub(startTime) / time.Microsecond logger.Notice("success process add: %s, cost_us=%d, datalen=%d, id=%d, md5a=%d, md5b=%d, fnv1a32=%d, key=%s", r.URL.String(), costTimeUS, nr, id, ad.md5a, ad.md5b, ad.fnv1a32, ad.Key) return }
func (sl *SaramaLogger) Printf(format string, v ...interface{}) { logger.Warning("SARAMA: %s", fmt.Sprintf(format, v...)) }