// decodes an nntp message and writes it to a section of the file. func decodeMsg(c *nntp.Conn, f *file, groups []string, MsgID string) { defer f.Done() g, err := findGroup(c, groups) if err != nil { putBroken(c) glog.Errorf("switching to group %s: %s", g, err) return } rc, err := c.GetMessage(MsgID) if err != nil { glog.Errorf("getting message %s from group %s: %s", MsgID, g, err) if _, ok := err.(*textproto.Error); ok { putConn(c) } else { putBroken(c) } return } putConn(c) yread, err := yenc.NewPart(bytes.NewReader(rc)) if err != nil { glog.Errorln(err) return } wr := f.WriterAt(yread.Begin) _, err = io.Copy(wr, yread) if err != nil { glog.Errorln(err) } }
func (i *instance) connectionHandler(c *net.UnixConn) { defer c.Close() for { req, err := readRequest(c) if err == io.EOF { return // Client closed the connection. } if err != nil { glog.Errorln("Failed to read a message from socket:", err) } f, t := funcMap[req.Type] if t != true { sendError(c, fmt.Errorf("unknown request type")) continue } resp, err := f(i, req) if err != nil { sendError(c, err) continue } err = writeResponse(c, resp) if err != nil { glog.Errorln("Failed to write a message to socket:", err) return } } }
func createNewLevelDB(path string, _ graph.Options) error { opts := &opt.Options{} db, err := leveldb.OpenFile(path, opts) if err != nil { glog.Errorf("Error: could not create database: %v", err) return err } defer db.Close() qs := &QuadStore{} qs.db = db qs.writeopts = &opt.WriteOptions{ Sync: true, } qs.readopts = &opt.ReadOptions{} _, err = qs.db.Get([]byte(horizonKey), qs.readopts) if err != nil && err != leveldb.ErrNotFound { glog.Errorln("couldn't read from leveldb during init") return err } if err != leveldb.ErrNotFound { return graph.ErrDatabaseExists } // Write some metadata if err = setVersion(qs.db, latestDataVersion, qs.writeopts); err != nil { glog.Errorln("couldn't write leveldb version during init") return err } qs.Close() return nil }
// Main loop for dispatching SyncQueue // TODO exit func Dispatch() { var err error for { select { case si := <-SyncQueue: if si.FullSync { syncAlbum(si) } else if si, err = NewSyncItemPhoto(si.Filename); err != nil { glog.Errorln(err) } else { syncAlbum(si) } case ai := <-AlbumQueue: switch { case ai.MetaUpdate: glog.Infoln("Updating Album meta") if err = updateMeta(ai.AlbumId); err != nil { glog.Errorln(err) } else { glog.Infof("Metainfo was updated for albumId", ai.AlbumId) } case ai.StatusUpdate: glog.Infoln("Reloading albums") if err = updateAlbums(); err != nil { glog.Errorln(err) } else { glog.Infoln("Albums were reloaded") } } } } }
func (p *pusher) Push(entry *protolog.Entry) error { if entry.Level == protolog.Level_LEVEL_DEBUG && !p.logDebug { return nil } dataBytes, err := p.marshaller.Marshal(entry) if err != nil { return err } data := string(dataBytes) switch entry.Level { case protolog.Level_LEVEL_DEBUG, protolog.Level_LEVEL_INFO: glog.Infoln(data) case protolog.Level_LEVEL_WARN: glog.Warningln(data) case protolog.Level_LEVEL_ERROR: glog.Errorln(data) case protolog.Level_LEVEL_FATAL: // cannot use fatal since this will exit before logging completes, // which is particularly important for a multi-pusher glog.Errorln(data) case protolog.Level_LEVEL_PANIC: // cannot use panic since this will panic before logging completes, // which is particularly important for a multi-pusher glog.Errorln(data) } return nil }
func (this *TaskType) realUpTask() { for { one := this.buff[1].Front() if one == nil { time.Sleep(100 * time.Millisecond) continue } t := one.Value.(*TaskInfo2DB) if t.sign == 'A' { _, err := newTask2DB(this.Name, t.taskInfo.Tid, t.taskInfo.Rid, t.taskInfo.Info, t.stat) if err != nil { glog.Errorln("NEWTASK2DB:", err.Error()) } else if t.stat == 2 { this.lock[0].Lock() this.buff[0].PushBack(t.taskInfo) // 直接入分发队列 this.lock[0].Unlock() } } else if t.sign == 'U' { _, err := upTask2DB(this.Name, t.taskInfo.Tid, t.rapper, t.msg, t.stat) if err != nil { glog.Errorln("UPTASK2DB:", err.Error()) } } this.lock[1].Lock() this.buff[1].Remove(one) this.lock[1].Unlock() } }
func init() { if client, err := db.Client(); err != nil { glog.Errorln(err) } else { defer db.Release(client) { if len(config.Cfg.Metrics.AddScript) > 0 { if addSha, err = client.Cmd("SCRIPT", "LOAD", config.Cfg.Metrics.AddScript).Str(); err != nil { glog.Errorln(err) } else { glog.Infoln("ADD SHA", addSha) } } if len(config.Cfg.Metrics.GetScript) > 0 { if getSha, err = client.Cmd("SCRIPT", "LOAD", config.Cfg.Metrics.GetScript).Str(); err != nil { glog.Errorln(err) } else { glog.Infoln("GET SHA", getSha) } } if len(config.Cfg.Metrics.TtlScript) > 0 { if ttlSha, err = client.Cmd("SCRIPT", "LOAD", config.Cfg.Metrics.TtlScript).Str(); err != nil { glog.Errorln(err) } else { glog.Infoln("TTL SHA", ttlSha) } } } } }
func (r *Remote) writePump(outbound chan interface{}) { ticker := time.NewTicker(pingPeriod) defer func() { ticker.Stop() }() for { select { case message, ok := <-outbound: if !ok { r.ws.WriteMessage(websocket.CloseMessage, []byte{}) return } b, err := json.Marshal(message) if err != nil { glog.Errorln(err) return } glog.V(2).Infoln(string(b)) if err := r.ws.WriteMessage(websocket.TextMessage, b); err != nil { glog.Errorln(err) return } case <-ticker.C: if err := r.ws.WriteMessage(websocket.PingMessage, []byte{}); err != nil { glog.Errorln(err) return } } } }
// GetSession returns a SeverSession from the database. // It checks memcached/redis first, and then falls back on the // database. func GetSession(id string) (*ServerSession, error) { conn := database.Pool.Get() defer conn.Close() var ss ServerSession reply, err := conn.Do("GET", id) if reply != nil && err == nil { b, ok := reply.([]byte) if ok { err = ss.UnmarshalJSON(b) if err != nil { glog.Errorln(err) } } } else { err = database.DB.QueryRow(`SELECT * FROM sessions WHERE session_id=$1`, id). Scan(&ss.ID, &ss.AuthToken, &ss.CSRFToken, &ss.Email, &ss.School, &ss.Date) } if err != nil { if err == sql.ErrNoRows { err = ErrNoSession } else { glog.Errorln(err) } } return &ss, err }
func Main() { var ( SQS *sqs.SQS getUserQueueUrlOutput *sqs.GetQueueUrlOutput getContainerQueueUrlOutput *sqs.GetQueueUrlOutput UserQueueUrl *string ContainerQueueUrl *string Dynamo *dynamodb.DynamoDB socialWorker *workers.SocialWorker ) SQS = sqs.New(&aws.Config{Region: aws.String("cn-north-1")}) getUserQueueUrlOutput, err := SQS.GetQueueUrl(&sqs.GetQueueUrlInput{QueueName: aws.String(USER_QUEUE_NAME)}) if err != nil { glog.Errorln("Error on connect user queue url:", err.Error()) return } UserQueueUrl = getUserQueueUrlOutput.QueueUrl getContainerQueueUrlOutput, err = SQS.GetQueueUrl(&sqs.GetQueueUrlInput{QueueName: aws.String(CONTAINER_QUEUE_NAME)}) if err != nil { glog.Errorln("Error on connect container queue url:", err.Error()) return } ContainerQueueUrl = getContainerQueueUrlOutput.QueueUrl Dynamo = dynamodb.New(&aws.Config{Region: aws.String("cn-north-1")}) socialWorker = workers.NewSocialWorker(SQS, UserQueueUrl, ContainerQueueUrl, Dynamo) socialWorker.Start() }
func (m *Manager) Start() { m.started = time.Now() tick := time.NewTicker(time.Minute) for { select { case <-tick.C: glog.Infoln("Manager:", m.String()) case in := <-m.Incoming: switch v := in.(type) { case *data.Ledger: m.stats["ledgers"]++ wait := m.ledgers.Set(v.LedgerSequence) glog.V(2).Infof("Manager: Received: %d %0.04f/secs ", v.LedgerSequence, wait.Seconds()) if err := m.db.Insert(v); err != nil { glog.Errorln("Manager: Ledger Insert:", err.Error()) } case data.Transaction: m.stats["transactions"]++ if err := m.db.Insert(v); err != nil { glog.Errorln("Manager: Transaction Insert:", err.Error()) } } case missing := <-m.Missing: m.ledgers.Extend(missing.Request.End) missing.Response <- m.ledgers.TakeMiddle(missing.Request) } } }
func handleGetPagination(w http.ResponseWriter, r *http.Request) { var err error switch r.Method { case "GET": engine, statuses := getJobsQueryParams(r) var minID int64 if s := r.URL.Query().Get("min-id"); s != "" { if minID, err = strconv.ParseInt(s, 0, 64); err != nil { glog.Errorf("Failed to parse min-id (%s): %s", s, err) http.Error(w, "Invalid Param(s)", http.StatusBadRequest) return } } pagination, err := db.GetPagination(engine, statuses, minID) if err != nil || len(pagination) != 1 { glog.Errorln("Error getting pagination info from DB", err) error500(w) return } if res, err := json.Marshal(pagination[0]); err != nil { glog.Errorln("Error Marshalling pagination JSON:", err) error500(w) return } else { w.Write(res) } } }
func (context *clientContext) processSend() { if err := context.sendInitialize(); err != nil { glog.Errorln(err.Error()) return } buf := make([]byte, 1024) for { size, err := context.pty.Read(buf) if err != nil { glog.Errorf("Command exited for: %s", context.request.RemoteAddr) return } context.record(append([]byte{rec.Output}, buf[:size]...)) safeMessage := base64.StdEncoding.EncodeToString([]byte(buf[:size])) if errs := context.write(append([]byte{rec.Output}, []byte(safeMessage)...)); len(errs) > 0 { for _, e := range errs { glog.Errorln(e.err.Error()) context.close(e.key) } if len(*context.connections) == 0 { return } } } }
func upgradeLevelDB(path string, opts graph.Options) error { db, err := leveldb.OpenFile(path, &opt.Options{}) defer db.Close() if err != nil { glog.Errorln("Error, couldn't open! ", err) return err } version, err := getVersion(db) if err != nil { glog.Errorln("error:", err) return err } if version == latestDataVersion { fmt.Printf("Already at latest version: %d\n", latestDataVersion) return nil } if version > latestDataVersion { err := fmt.Errorf("Unknown data version: %d -- upgrade this tool", version) glog.Errorln("error:", err) return err } for i := version; i < latestDataVersion; i++ { err := migrateFunctions[i](db) if err != nil { return err } setVersion(db, i+1, nil) } return nil }
func (s *queueServer) handleUpload(w http.ResponseWriter, r *http.Request) { defer logPanic() file, header, err := r.FormFile("upload") if err != nil { glog.Errorln("Couldn't create job", err) error500(w) return } jid, err := db.JobCreate(s.Name, header.Filename) if err != nil { glog.Errorln("Couldn't create job", err) error500(w) return } if err = s.doUpload(w, file, jid, header.Filename); err != nil { if err = db.JobSetError(jid, "Error starting job"); err != nil { glog.Errorf("Error setting job %d error: %s", jid, err) } } else { if err = db.JobSetStarted(jid); err != nil { glog.Errorf("Error setting job %d to running: %s", jid, err) } } glog.Infof("Job %d (%s) created successfully", jid, s.Name) }
func (it *AllIterator) makeCursor() { var cursor *sql.Rows var err error if it.cursor != nil { it.cursor.Close() } if it.table == "quads" { cursor, err = it.qs.db.Query(`SELECT subject_hash, predicate_hash, object_hash, label_hash FROM quads;`) if err != nil { glog.Errorln("Couldn't get cursor from SQL database: %v", err) cursor = nil } } else { glog.V(4).Infoln("sql: getting node query") cursor, err = it.qs.db.Query(`SELECT hash FROM nodes;`) if err != nil { glog.Errorln("Couldn't get cursor from SQL database: %v", err) cursor = nil } glog.V(4).Infoln("sql: got node query") } it.cursor = cursor }
func Listen(m *Manager, port string) { addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf(":%s", port)) if err != nil { glog.Fatalln("HandleIncoming:", err) } l, err := net.ListenTCP("tcp", addr) if err != nil { glog.Fatalln("HandleIncoming:", err) } defer l.Close() for { conn, err := l.AcceptTCP() if err != nil { glog.Errorln("HandleIncoming:", err) continue } host, port, err := net.SplitHostPort(conn.RemoteAddr().String()) if err != nil { glog.Errorln("HandleIncoming:", err) continue } glog.Infoln("Incoming Host: %s Port: %s", host, port) m.AddPeer(host, port, false, conn) } }
// tryFlushResponse flushes the response buffer (if not empty); returns true if flush succeeded func (d *httpDecoder) tryFlushResponse(out *bytes.Buffer) { log.V(2).Infof(d.idtag+"try-flush-responses: %d bytes to flush", out.Len()) // set a write deadline here so that we don't block for very long. err := d.setWriteTimeout() if err != nil { // this is a problem because if we can't set the timeout then we can't guarantee // how long a write op might block for. Log the error and skip this response. log.Errorln("failed to set write deadline, aborting response:", err.Error()) } else { _, err = out.WriteTo(d.rw.Writer) if err != nil { if neterr, ok := err.(net.Error); ok && neterr.Timeout() && out.Len() > 0 { // we couldn't fully write before timing out, return rch and hope that // we have better luck next time. return } // we don't really know how to deal with other kinds of errors, so // log it and skip the rest of the response. log.Errorln("failed to write response buffer:", err.Error()) } err = d.rw.Flush() if err != nil { if neterr, ok := err.(net.Error); ok && neterr.Timeout() && out.Len() > 0 { return } log.Errorln("failed to flush response buffer:", err.Error()) } } }
func (this *SayhiHandler) doGet(w http.ResponseWriter, r *http.Request) { const USAGE = "GET /sayhi?type=typename&name=rappername" r.ParseForm() ttype, name := r.FormValue("type"), r.FormValue("name") if ttype == "" || name == "" { glog.Errorln("sayhi ERR:", ttype, name) w.WriteHeader(http.StatusBadRequest) w.Write([]byte(USAGE)) return } taskTypeOne, rapperOne := GetRapper(ttype, name) if taskTypeOne == nil { glog.Errorln("sayhi to nil type:", ttype) w.WriteHeader(http.StatusBadRequest) w.Write([]byte("no such task type.")) return } if rapperOne == nil { taskTypeOne.AddRapper(name, NewRapper(name)) } else { taskTypeOne.ResetRapper(rapperOne) rapperOne.Beat(true) } w.Write([]byte("OK")) return }
func (r *Remote) streamLedgerData(ledger interface{}, c chan data.LedgerEntrySlice) { defer close(c) cmd := newBinaryLedgerDataCommand(ledger, nil) for ; ; cmd = newBinaryLedgerDataCommand(ledger, cmd.Result.Marker) { r.outgoing <- cmd <-cmd.Ready if cmd.CommandError != nil { glog.Errorln(cmd.Error()) return } les := make(data.LedgerEntrySlice, len(cmd.Result.State)) for i, state := range cmd.Result.State { b, err := hex.DecodeString(state.Data + state.Index) if err != nil { glog.Errorln(cmd.Error()) return } les[i], err = data.ReadLedgerEntry(bytes.NewReader(b), data.Hash256{}) if err != nil { glog.Errorln(err.Error()) return } } c <- les if cmd.Result.Marker == nil { return } } }
func (c *ModifyUserAuthController) DoModify() { // 参数获取 glog.Infoln("--modify request start--") user := c.GetString("user") glog.Infof("accout is %s \n", user) name := c.GetString("name") glog.Infof("image name is %s \n", name) mtype, err := c.GetInt("mtype") if err != nil { ret := map[string]interface{}{"success": false, "msg": "mtype参数错误"} c.Data["json"] = ret glog.Errorln("mtype 错误!!") c.ServeJson() return } glog.Infof("mtype is %s", mtype) ispull, err := c.GetBool("ispull") if err != nil { ret := map[string]interface{}{"success": false, "msg": "ispull参数错误"} c.Data["json"] = ret glog.Errorln("ispull 错误!!") c.ServeJson() return } glog.Infof("ispull is %s \n", ispull) success := models.ACManager.Update(user, &name, mtype, ispull) ret := map[string]interface{}{"success": success} c.Data["json"] = ret c.ServeJson() // 直接返回json数据 glog.Flush() }
func handleGetJobs(w http.ResponseWriter, r *http.Request) { switch r.Method { case "GET": var err error maxIdx, limit, page := int64(0), int64(0), int64(0) if r.URL.Path != "" { if maxIdx, limit, page, err = parseGetJobsParamsOrBadRequest(w, r.URL.Path); err != nil { return } } engine, statuses := getJobsQueryParams(r) jobs, err := db.GetJobs(maxIdx, limit, page, engine, statuses) if err != nil { glog.Errorln("Error getting jobs from DB:", err) error500(w) return } if res, err := json.Marshal(map[string]interface{}{"jobs": jobs}); err != nil { glog.Errorln("Error Marshalling jobs JSON:", err) error500(w) return } else { w.Write(res) } } }
func (this *rapperVideo) updateTask() { for { // 取一个任务 oneVideoTask := <-this.toUpdate // 回调 para := map[string]string{"type": confJson["tasktype"].(string), "tid": oneVideoTask.Tid, "rid": oneVideoTask.Rid} if oneVideoTask.err != nil { para["msg"] = base64.StdEncoding.EncodeToString([]byte(oneVideoTask.err.Error())) } if oneVideoTask.nfid != "" { para["nfid"] = oneVideoTask.nfid } if oneVideoTask.nimg != "" { para["img"] = oneVideoTask.nimg } olderr := "" if oneVideoTask.err != nil { olderr = oneVideoTask.err.Error() } glog.Warningln("callbackTask: ", this.no, oneVideoTask.toString(), para) body, err := getRequest(oneVideoTask.Callback, ¶) if err != nil { oneVideoTask.err = fmt.Errorf("%s\ncallbackERR: %s", olderr, err.Error()) glog.Errorln("updateTask callbackERR:", oneVideoTask.toString()) } else if string(body) != "true" { oneVideoTask.err = fmt.Errorf("%s\ncallbackERR: %s", olderr, body) glog.Errorln("updateTask callbackERR:", oneVideoTask.toString(), body) } // 更新任务状态 delete(para, "rid") delete(para, "nfid") delete(para, "img") para["stat"] = "1" para["name"] = confJson["rappername"].(string) if oneVideoTask.err != nil { para["stat"] = "-1" para["msg"] = base64.StdEncoding.EncodeToString([]byte(oneVideoTask.err.Error())) } glog.Infoln("updateTask: ", this.no, oneVideoTask.toString(), para) _, err = getRequest(confJson["taskServ"].(string)+"/uptask", ¶) if err == nil { glog.Warningln("updateTaskOK: ", this.no, oneVideoTask.toString()) } else { glog.Errorln("updateTaskERR:", this.no, oneVideoTask.toString(), para, err.Error()) } glog.Flush() // 删除临时文件 fn := confJson["tmpdir"].(string) + oneVideoTask.Tid os.Remove(fn) os.Remove(fn + ".mp4") os.Remove(fn + ".jpg") } }
// connect to the server. Here we keep trying every 10 seconds until we manage // to Dial to the server. func (bot *ircBot) connect() (conn io.ReadWriteCloser) { var ( err error counter int ) connectTimeout := time.After(0) bot.Lock() bot.isConnecting = true bot.isAuthenticating = false bot.Unlock() for { select { case <-connectTimeout: counter++ connectTimeout = nil glog.Infoln("[Info] Connecting to IRC server: ", bot.address) conn, err = tls.Dial("tcp", bot.address, nil) // Always try TLS first if err == nil { glog.Infoln("Connected: TLS secure") return conn } else if _, ok := err.(x509.HostnameError); ok { glog.Errorln("Could not connect using TLS because: ", err) // Certificate might not match. This happens on irc.cloudfront.net insecure := &tls.Config{InsecureSkipVerify: true} conn, err = tls.Dial("tcp", bot.address, insecure) if err == nil && isCertValid(conn.(*tls.Conn)) { glog.Errorln("Connected: TLS with awkward certificate") return conn } } else if _, ok := err.(x509.UnknownAuthorityError); ok { glog.Errorln("x509.UnknownAuthorityError : ", err) insecure := &tls.Config{InsecureSkipVerify: true} conn, err = tls.Dial("tcp", bot.address, insecure) if err == nil { glog.Infoln("Connected: TLS with an x509.UnknownAuthorityError", err) return conn } } else { glog.Errorln("Could not establish a tls connection", err) } conn, err = net.Dial("tcp", bot.address) if err == nil { glog.Infoln("Connected: Plain text insecure") return conn } // TODO (yml) At some point we might want to panic delay := 5 * counter glog.Infoln("IRC Connect error. Will attempt to re-connect. ", err, "in", delay, "seconds") connectTimeout = time.After(time.Duration(delay) * time.Second) } } }
//列出图片 func listFiles(data *ListReqData, list *KindList) error { t, err := time.Parse("2006年1月", list.CurrentDirPath) if err != nil { glog.Errorln("解析时间错误:", err) return err } //取得bucket bucket, err := bucketdb.FindByName(data.Bucket) if err != nil { return err } list.CurrentUrl = bucket.ImgBaseUrl() list.MoveupDirPath = t.Format("2006年") prefix := t.Format(IMG_PRE_FMT) //取得图片列表 list.FileList = make([]*KindFile, 0, 10) client := rsf.New(&digest.Mac{bucket.Ak, []byte(bucket.Sk)}) marker := "" limit := 1000 // "is_dir": false, // "has_file": false, // "filesize": 208736, // "is_photo": true, // "filetype": "jpg", // "filename": "1241601537255682809.jpg", // "datetime": "2011-08-02 15:32:43" var es []rsf.ListItem for err == nil { es, marker, err = client.ListPrefix(nil, bucket.Name, prefix, marker, limit) for _, item := range es { f := &KindFile{ IsDir: false, HasFile: false, IsPhoto: true, Filesize: item.Fsize, Filetype: item.MimeType, Filename: item.Key, Datetime: time.Unix(item.PutTime, 0).Format("2006-01-02 15:04:05"), } list.FileList = append(list.FileList, f) } } if err != io.EOF { //非预期的错误 glog.Errorln("listAll failed:", err) return err } list.TotalCount = len(list.FileList) list.Order = data.Order sort.Sort(list) return nil }
func Write(data [][]interface{}, dataType string) bool { endpoint := fmt.Sprintf("%s:%d", config.Influxdb.Host, config.Influxdb.Port) c, err := client.NewClient(&client.ClientConfig{ Host: endpoint, Username: config.Influxdb.Username, Password: config.Influxdb.Password, Database: config.Influxdb.Database, }) if err != nil { panic(err) } if argDbCreated == false { argDbCreated = true if err := c.CreateDatabase(config.Influxdb.Database); err != nil { glog.Errorf("Database creation failed with: %s", err) } else { glog.Info("Creating Database...") } } c.DisableCompression() var column []string switch dataType { case "machine": column = Machine case "stats": column = Stats case "network": column = Network default: glog.Error("Unrecognized database") return false } series := &client.Series{ Name: dataType, Columns: column, Points: data, } glog.Infoln(series) if err := c.WriteSeriesWithTimePrecision([]*client.Series{series}, client.Second); err != nil { glog.Errorln("Failed to write", dataType, "to influxDb.", err) glog.Errorln("Data:", series) if strings.Contains(err.Error(), "400") { argDbCreated = false } return false } return true }
func jobError(job string, msg string) { glog.Errorln("job", job, "terminated with error:", msg) body := strings.NewReader(msg) put, _ := http.NewRequest("PUT", fmt.Sprintf("http://%s/result/%s?status=error", cfg.Frontend, job), body) _, err := http.DefaultClient.Do(put) if err != nil { glog.Errorln("Error sending results", err) return } }
// Implementing pwshandler.ConnManager interface func (m *ConnManager) HandleError(ws *websocket.Conn, err error) { if err == nil { err = errors.New("Passed nil errer to reporting") } // Log error message if ws != nil { glog.Errorln("IP:", ws.Request().RemoteAddr, ", Error:", err) } else { glog.Errorln("Error:", err) } }
// Currently one client acceping will block all other clients' request func (ln *TCPListener) AcceptTCP() (conn *TCPConn, err error) { var c net.Conn for { c, err = ln.Listener.Accept() if err != nil { glog.Errorln("fail to accept from listener", err) return conn, err } // in case idle connection session being cleaned by NAT server if err = c.(*net.TCPConn).SetKeepAlive(true); err != nil { glog.Errorln("fail to set keep alive", err) return } req := request{} if err := recieve(ln.cipher, c, &req); err != nil { glog.Errorln("fail to recieve request", err) continue } if req.Secret != ln.secret { send(ln.cipher, c, response{Status: StatusInvalidSecret}) continue } ip, err := ln.ipAddrPool.Get() if err != nil { send(ln.cipher, c, response{Status: StatusNoIPAddrAvaliable}) continue } conn = &TCPConn{ remoteAddr: ip.IP.To4(), listener: ln, } conn.cipher, err = cipher.NewCipher(req.Key[:]) if err != nil { return conn, err } res := response{ Status: StatusOK, } copy(res.IP[:], ip.IP.To4()) copy(res.IPMask[:], ip.Mask) send(ln.cipher, c, &res) conn.Conn = c return conn, err } }
// StoreSession stores a ServerSession in both memcached/redis and the // backing PostgreSQL database. func (s *ServerSession) StoreSession(id string) error { conn := database.Pool.Get() defer conn.Close() obj, err := s.MarshalJSON() if err != nil { glog.Errorln(err) return err } reply, err := conn.Do("SET", id, obj) if err != nil { glog.Errorln(err, reply) return err } _, err = database.DB.Exec(`WITH new_values ( session_id, auth_token, csrf_token, email, school, date) as ( values ($1::text, $2::bytea, $3::bytea, $4::text, $5::text, $6::bigint ) ), upsert as ( UPDATE sessions m SET auth_token = nv.auth_token, csrf_token = nv.csrf_token, email = nv.email, school = nv.school, date = nv.date FROM new_values nv WHERE m.session_id = nv.session_id RETURNING m.* ) INSERT INTO sessions (session_id, auth_token, csrf_token, email, school, date) SELECT session_id, auth_token, csrf_token, email, school, date FROM new_values WHERE NOT EXISTS (SELECT 1 FROM upsert up WHERE up.session_id = new_values.session_id)`, id, s.AuthToken, s.CSRFToken, s.Email, s.School, s.Date) if err != nil { glog.Fatal(err) } return err }