func getSessionSecrets(filename string) ([]string, error) { // Build secrets list secrets := []string{} if len(filename) != 0 { sessionSecrets, err := latest.ReadSessionSecrets(filename) if err != nil { return nil, fmt.Errorf("error reading sessionSecretsFile %s: %v", filename, err) } if len(sessionSecrets.Secrets) == 0 { return nil, fmt.Errorf("sessionSecretsFile %s contained no secrets", filename) } for _, s := range sessionSecrets.Secrets { secrets = append(secrets, s.Authentication) secrets = append(secrets, s.Encryption) } } else { // Generate random signing and encryption secrets if none are specified in config secrets = append(secrets, fmt.Sprintf("%x", md5.Sum([]byte(uuid.NewRandom().String())))) secrets = append(secrets, fmt.Sprintf("%x", md5.Sum([]byte(uuid.NewRandom().String())))) } return secrets, nil }
// DecodeClientData attempts to decode the data received from the client. Gob is tried first followed // by JSON then XML. Once the data is decoded, the resulting Wrapper is placed on the client receive // channel. func DecodeClientData(c *data.Client) error { var wrap data.Wrapper rdr := bytes.NewReader(c.DataIn) // Attempt decoding from Gob first err := gob.NewDecoder(rdr).Decode(&wrap) if err != nil { rdr.Seek(0, 0) // Attempt JSON next err := json.NewDecoder(rdr).Decode(&wrap) if err != nil { rdr.Seek(0, 0) switch err.(type) { case *json.SyntaxError: // JSON decoding failed, try XML err := xml.NewDecoder(rdr).Decode(&wrap) if err != nil { return err } default: return err } } } // If the message doesn't have an id, generate one. if wrap.Message.Uuid == "" { wrap.Message.Uuid = uuid.NewRandom().String() } c.Receive <- &wrap return nil }
func newNotice(config *Configuration, err Error, extra ...interface{}) *Notice { notice := Notice{ APIKey: config.APIKey, Error: err, Token: uuid.NewRandom().String(), ErrorMessage: err.Message, ErrorClass: err.Class, Env: config.Env, Hostname: config.Hostname, Backtrace: composeStack(err.Stack, config.Root), ProjectRoot: config.Root, Context: Context{}, } for _, thing := range extra { switch t := thing.(type) { case Context: notice.setContext(t) case Params: notice.Params = t case CGIData: notice.CGIData = t case url.URL: notice.URL = t.String() } } return ¬ice }
func makeFixedMessage(encoder client.Encoder, size uint64) [][]byte { ma := make([][]byte, 1) hostname, _ := os.Hostname() pid := int32(os.Getpid()) msg := &message.Message{} msg.SetType("hekabench") msg.SetTimestamp(time.Now().UnixNano()) msg.SetUuid(uuid.NewRandom()) msg.SetSeverity(int32(6)) msg.SetEnvVersion("0.8") msg.SetPid(pid) msg.SetHostname(hostname) rdm := &randomDataMaker{ src: rand.NewSource(time.Now().UnixNano()), } buf := make([]byte, size) payloadSuffix := bytes.NewBuffer(buf) _, err := io.CopyN(payloadSuffix, rdm, int64(size)) payload := fmt.Sprintf("hekabench: %s", hostname) if err == nil { payload = fmt.Sprintf("%s - %s", payload, payloadSuffix.String()) } else { log.Println("Error getting random string: ", err) } msg.SetPayload(payload) var stream []byte if err := encoder.EncodeMessageStream(msg, &stream); err != nil { log.Println(err) } ma[0] = stream return ma }
// Random creates a random ID (uses uuid.NewRandom() which uses crypto.Random() // under the covers) func Random() ID { id, err := Read(uuid.NewRandom()) if err != nil { panic(fmt.Sprintf("Unable to generate random peer id: %s", err)) } return id }
func getTestMessage() *message.Message { hostname, _ := os.Hostname() field, _ := message.NewField("foo", "bar", message.Field_RAW) msg := &message.Message{} msg.SetType("TEST") msg.SetTimestamp(5123456789) msg.SetPid(9283) msg.SetUuid(uuid.NewRandom()) msg.SetLogger("GoSpec") msg.SetSeverity(int32(6)) msg.SetEnvVersion("0.8") msg.SetPid(int32(os.Getpid())) msg.SetHostname(hostname) msg.AddField(field) data := []byte("data") field1, _ := message.NewField("bytes", data, message.Field_RAW) field2, _ := message.NewField("int", int64(999), message.Field_RAW) field2.AddValue(int64(1024)) field3, _ := message.NewField("double", float64(99.9), message.Field_RAW) field4, _ := message.NewField("bool", true, message.Field_RAW) field5, _ := message.NewField("foo", "alternate", message.Field_RAW) msg.AddField(field1) msg.AddField(field2) msg.AddField(field3) msg.AddField(field4) msg.AddField(field5) return msg }
func (sr *sRunner) DeliverRecord(record []byte, del Deliverer) { unframed := record pack := <-sr.ir.InChan() if sr.unframer != nil { unframed = sr.unframer.UnframeRecord(record, pack) if unframed == nil { pack.Recycle() return } } if sr.useMsgBytes { // Put the blob in the pack and let the decoder sort it out. messageLen := len(unframed) if messageLen > cap(pack.MsgBytes) { pack.MsgBytes = make([]byte, messageLen) } pack.MsgBytes = pack.MsgBytes[:messageLen] copy(pack.MsgBytes, unframed) } else { // Put the record data in the payload. pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetLogger(sr.ir.Name()) pack.Message.SetPayload(string(unframed)) } // Give the input one last chance to mutate the pack. if sr.packDecorator != nil { sr.packDecorator(pack) } if del == nil { sr.ir.Deliver(pack) } else { del.Deliver(pack) } }
func (c *Client) Call(method string, args ...interface{}) (interface{}, error) { message := &Request{ ID: uuid.NewRandom().String(), Method: method, Args: args, } c.messages <- message // put our listener on the queue and listen for it c.lock.Lock() results := make(chan *Response, 1) c.waiting[message.ID] = results c.lock.Unlock() select { case result := <-results: var err error if result.Error != "" { err = errors.New(result.Error) } return result.Payload, err case <-c.context.Done(): return nil, ErrStopped } }
func CreateMessage(userId string, receiverId string, mr types.MessageRequest) error { var err error err = nil if err = utilsservice.CheckIfMatchExists(userId, receiverId); err == nil { var m types.Message m.SenderId = uuid.Parse(userId) m.ReceiverId = uuid.Parse(receiverId) m.MessageId = uuid.NewRandom() m.IsRead = false m.Text = strings.Replace(mr.Text, "'", "''", -1) if mr.Timestamp != 0 { m.Timestamp = int64(time.Unix(mr.Timestamp, 0).UTC().Unix()) } else { m.Timestamp = int64(time.Now().UTC().Unix()) } if err = messageRepo.CreateMessage(m); err == nil { err = chatservice.UpdateLastMessageChat(userId, receiverId, m.Text) if lastActivity, errA := activityservice.GetUserActivity(receiverId); errA == nil { senderUsername, _ := utilsservice.GetUserUsername(userId) pushMessage := fmt.Sprintf("%s: %s", senderUsername, m.Text) notificationsservice.SendPushNotification(lastActivity.DeviceType, lastActivity.PushToken, pushMessage) } } } return err }
func cli(cn int, wg *sync.WaitGroup) { defer wg.Done() //time.Sleep(time.Millisecond * time.Duration(rand.Intn(1000))) conn, err := net.Dial("tcp", "127.0.0.1:1314") if err != nil { fmt.Println("Error!") return } defer conn.Close() clientConn := new(ClientConn) clientConn.Client = kamaji.NewClient(conn) clientConn.ID = uuid.NewRandom() clientConn.Name = clientConn.ID.String() clientConn.Name = fmt.Sprintf("node%03d.test.now", cn) clientConn.sender = make(chan *proto_msg.KamajiMessage) go clientConn.messageSender() //go reportStats(clientConn) for { tmp, err := clientConn.ReadMessage() if err != nil { break } message := &proto_msg.KamajiMessage{} err = proto.Unmarshal(tmp, message) if err != nil { fmt.Println(err) break } handleMessage(clientConn, message) } fmt.Println("Exiting Client Loop.") }
func (w *Worker) Run() error { uid := uuid.NewRandom() copy(w.Id[:], uid) w.lastjob = time.Now() w.FileCache = map[string][]byte{} wd, err := os.Getwd() if err != nil { return err } os.Setenv("PATH", os.Getenv("PATH")+":"+wd) if w.Wait == 0 { w.Wait = 10 * time.Second } for { wait, err := w.dojob() if err != nil { log.Print(err) } if w.MaxIdle > 0 && time.Now().Sub(w.lastjob) > w.MaxIdle { log.Printf("no jobs received for %v, shutting down", w.MaxIdle) return nil } if wait { <-time.After(w.Wait) } } }
func serveDocumentsCreate(res http.ResponseWriter, req *http.Request, params httprouter.Params) { serveAPI(res, req, func() interface{} { ctx := appengine.NewContext(req) session, _ := sessionStore.Get(req, "session") email, ok := session.Values["email"].(string) if !ok { return HTTPError{403, "access denied"} } var document Document err := json.NewDecoder(req.Body).Decode(&document) if err != nil { return err } if document.ID != "" { return fmt.Errorf("invalid document: id must not be set") } document.ID = uuid.NewRandom().String() userKey := datastore.NewKey(ctx, "User", email, 0, nil) docKey := datastore.NewKey(ctx, "Document", document.ID, 0, userKey) docKey, err = datastore.Put(ctx, docKey, &document) if err != nil { return err } return document }) }
// Standard text log file parser func payloadParser(fm *FileMonitor, isRotated bool) (bytesRead int64, err error) { var ( n int pack *PipelinePack record []byte ) for err == nil { n, record, err = fm.parser.Parse(fm.fd) if err != nil { if err == io.EOF && isRotated { record = fm.parser.GetRemainingData() } else if err == io.ErrShortBuffer { fm.ir.LogError(fmt.Errorf("record exceeded MAX_RECORD_SIZE %d", message.MAX_RECORD_SIZE)) err = nil // non-fatal, keep going } } if len(record) > 0 { payload := string(record) pack = <-fm.ir.InChan() pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetType("logfile") pack.Message.SetHostname(fm.hostname) pack.Message.SetLogger(fm.logger_ident) pack.Message.SetPayload(payload) fm.outChan <- pack fm.last_logline_start = fm.seek + bytesRead fm.last_logline = payload } bytesRead += int64(n) } return }
func (img *Image) buildPodManifest(exec []string) *schema.PodManifest { bpm := schema.BlankPodManifest() // Figure out working path that doesn't exist in the image's rootfs workDir := ".jetpack.build." for { if _, err := os.Stat(img.getRootfs().Path(workDir)); err != nil { if os.IsNotExist(err) { break } panic(err) } workDir = fmt.Sprintf(".jetpack.build.%v", uuid.NewRandom()) } bprta := img.RuntimeApp() bprta.Name.Set("jetpack/build") bprta.App = &types.App{ Exec: exec, WorkingDirectory: "/" + workDir, User: "******", Group: "0", } bpm.Apps = append(bpm.Apps, bprta) // This is needed by freebsd-update at least, should be okay to // allow this in builders. bpm.Annotations.Set("jetpack/jail.conf/allow.chflags", "true") bpm.Annotations.Set("jetpack/jail.conf/securelevel", "0") return bpm }
func (j *Job) setup() error { var err error if j.wd == "" { j.wd, err = os.Getwd() if err != nil { return err } } j.dir = uuid.NewRandom().String() err = os.MkdirAll(j.dir, 0755) if err != nil { return err } if err := os.Chdir(j.dir); err != nil { return err } for _, f := range j.Infiles { err := ioutil.WriteFile(f.Name, f.Data, 0755) if err != nil { return err } } return nil }
func (c *PasswordGeneratorController) processRequest(r *http.Request) string { r.ParseForm() r.Form.Add("password", uuid.NewRandom().String()) return "password.html" }
func NewToken() string { uuid := uuid.NewRandom() mac := hmac.New(sha256.New, nil) mac.Write([]byte(uuid.String())) tokenBytes := mac.Sum(nil) return hex.EncodeToString(tokenBytes) }
// Dial creates a Conn, opens a connection to the proxy and starts processing // writes and reads on the Conn. // // addr: the host:port of the destination server that we're trying to reach // // config: configuration for this Conn func Dial(addr string, config *Config) (net.Conn, error) { c := &conn{ id: uuid.NewRandom().String(), addr: addr, config: config, } c.initDefaults() c.makeChannels() c.initRequestStrategy() // Dial proxy proxyConn, err := c.dialProxy() if err != nil { return nil, fmt.Errorf("Unable to dial proxy to %s: %s", addr, err) } go c.processWrites() go c.processReads() go c.processRequests(proxyConn) increment(&open) return idletiming.Conn(c, c.config.IdleTimeout, func() { log.Debugf("Proxy connection to %s via %s idle for %v, closing", addr, proxyConn.conn.RemoteAddr(), c.config.IdleTimeout) if err := c.Close(); err != nil { log.Debugf("Unable to close connection: %v", err) } // Close the initial proxyConn just in case if err := proxyConn.conn.Close(); err != nil { log.Debugf("Unable to close proxy connection: %v", err) } }), nil }
func (e *EventsHandler) Create(w http.ResponseWriter, r *http.Request) { blob, err := ioutil.ReadAll(r.Body) if err != nil { HandleError(err, w) return } defer r.Body.Close() event := new(state.Event) err = json.Unmarshal(blob, event) if err != nil { HandleError(err, w) return } event.ID = uuid.NewRandom().String() err = e.events.Update(event) if err != nil { HandleError(err, w) return } e.NewEvents <- event headers := w.Header() headers.Add("Location", "/1/events/"+event.ID) w.WriteHeader(http.StatusCreated) }
// SaveEntry is part of the DB interface. func (d *db) SaveEntry(e *Entry) error { e.Start = e.Start.Truncate(time.Second) e.End = e.End.Truncate(time.Second) err := e.Valid() if err != nil { return err } var insert bool if insert = e.ID == ""; insert { e.ID = uuid.NewRandom().String() } var start, end interface{} start = e.Start.Format(datetimeLayout) if !e.End.IsZero() { end = e.End.Format(datetimeLayout) } categoryID := sql.NullString{String: e.CategoryID, Valid: e.CategoryID != ""} q := "INSERT INTO entries (id, start, end, note, category_id) VALUES (?, ?, ?, ?, ?)" args := []interface{}{e.ID, start, end, e.Note, categoryID} if !insert { q = "UPDATE entries SET id=?, start=?, end=?, note=?, category_id=? WHERE id=?" args = append(args, e.ID) } _, err = d.Exec(q, args...) return err }
// Standard text log file parser func (lsi *LogstreamInput) payloadParser(ir p.InputRunner, deliver Deliver, stop chan chan bool) (err error) { var ( pack *p.PipelinePack record []byte n int ) for err == nil { select { case lsi.stopped = <-stop: return default: } n, record, err = lsi.parser.Parse(lsi.stream) if err == io.ErrShortBuffer { ir.LogError(fmt.Errorf("record exceeded MAX_RECORD_SIZE %d", message.MAX_RECORD_SIZE)) err = nil // non-fatal, keep going } if n > 0 { lsi.stream.FlushBuffer(n) } if len(record) > 0 { payload := string(record) pack = <-ir.InChan() pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetType("logfile") pack.Message.SetHostname(lsi.hostName) pack.Message.SetLogger(lsi.loggerIdent) pack.Message.SetPayload(payload) deliver(pack) lsi.countRecord() } } return }
// Creates and returns a new (but not yet started) DecoderRunner for the // provided Decoder plugin. func NewDecoderRunner(name string, decoder Decoder) DecoderRunner { return &dRunner{ pRunnerBase: pRunnerBase{name: name, plugin: decoder.(Plugin)}, uuid: uuid.NewRandom().String(), inChan: make(chan *PipelinePack, Globals().PluginChanSize), } }
// Standard text log file parser func networkPayloadParser(conn net.Conn, parser StreamParser, ir InputRunner, config *NetworkInputConfig, dr DecoderRunner) (err error) { var ( pack *PipelinePack record []byte ) _, record, err = parser.Parse(conn) if len(record) > 0 { pack = <-ir.InChan() pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetType("NetworkInput") pack.Message.SetSeverity(int32(0)) pack.Message.SetEnvVersion("0.8") pack.Message.SetPid(0) // Only TCP packets have a remote address. if remoteAddr := conn.RemoteAddr(); remoteAddr != nil { pack.Message.SetHostname(remoteAddr.String()) } pack.Message.SetLogger(ir.Name()) pack.Message.SetPayload(string(record)) if dr == nil { ir.Inject(pack) } else { dr.InChan() <- pack } } return }
// NewService creates a service that can be registered with etcd to handle // requests from an exchange. func NewService(namespace string, client *etcd.Client, address string, routes Routes) *Service { return &Service{ id: uuid.NewRandom().String(), namespace: namespace, client: client, address: address, routes: routes} }
func (repo *MockedAccelerationRepo) CreateAcceleration(a types.Acceleration) error { var err error err = nil a.UserId = uuid.NewRandom() accelerations = append(accelerations, a) return err }
// NewId is a globally unique identifier. It is a [A-Z0-9] string 26 // characters long. It is a UUID version 4 Guid that is zbased32 encoded // with the padding stripped off. func NewId() string { var b bytes.Buffer encoder := base32.NewEncoder(encoding, &b) encoder.Write(uuid.NewRandom()) encoder.Close() b.Truncate(26) // removes the '==' padding return b.String() }
func (input *FilePollingInput) Run(runner pipeline.InputRunner, helper pipeline.PluginHelper) error { var ( data []byte pack *pipeline.PipelinePack dRunner pipeline.DecoderRunner ok bool err error ) if input.DecoderName != "" { if dRunner, ok = helper.DecoderRunner(input.DecoderName, fmt.Sprintf("%s-%s", runner.Name(), input.DecoderName)); !ok { return fmt.Errorf("Decoder not found: %s", input.DecoderName) } input.decoderChan = dRunner.InChan() } input.runner = runner hostname := helper.PipelineConfig().Hostname() packSupply := runner.InChan() tickChan := runner.Ticker() for { select { case <-input.stop: return nil case <-tickChan: } data, err = ioutil.ReadFile(input.FilePath) if err != nil { runner.LogError(fmt.Errorf("Error reading file: %s", err)) continue } pack = <-packSupply pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetType("heka.file.polling") pack.Message.SetHostname(hostname) pack.Message.SetPayload(string(data)) if field, err := message.NewField("TickerInterval", int(input.TickerInterval), ""); err != nil { runner.LogError(err) } else { pack.Message.AddField(field) } if field, err := message.NewField("FilePath", input.FilePath, ""); err != nil { runner.LogError(err) } else { pack.Message.AddField(field) } input.sendPack(pack) } return nil }
func makeVariableMessage(encoder client.StreamEncoder, items int, rdm *randomDataMaker) [][]byte { ma := make([][]byte, items) hostname, _ := os.Hostname() pid := int32(os.Getpid()) var cnt int for x := 0; x < items; x++ { msg := &message.Message{} msg.SetUuid(uuid.NewRandom()) msg.SetTimestamp(time.Now().UnixNano()) msg.SetType("hekabench") msg.SetLogger("flood") msg.SetEnvVersion("0.2") msg.SetPid(pid) msg.SetHostname(hostname) cnt = (rand.Int() % 3) * 1024 msg.SetPayload(makePayload(uint64(cnt), rdm)) cnt = rand.Int() % 5 for c := 0; c < cnt; c++ { field, _ := message.NewField(fmt.Sprintf("string%d", c), fmt.Sprintf("value%d", c), "") msg.AddField(field) } cnt = rand.Int() % 5 for c := 0; c < cnt; c++ { b := byte(c) field, _ := message.NewField(fmt.Sprintf("bytes%d", c), []byte{b, b, b, b, b, b, b, b}, "") msg.AddField(field) } cnt = rand.Int() % 5 for c := 0; c < cnt; c++ { field, _ := message.NewField(fmt.Sprintf("int%d", c), c, "") msg.AddField(field) } cnt = rand.Int() % 5 for c := 0; c < cnt; c++ { field, _ := message.NewField(fmt.Sprintf("double%d", c), float64(c), "") msg.AddField(field) } cnt = rand.Int() % 5 for c := 0; c < cnt; c++ { field, _ := message.NewField(fmt.Sprintf("bool%d", c), true, "") msg.AddField(field) } cnt = (rand.Int() % 60) * 1024 buf := make([]byte, cnt) field, _ := message.NewField("filler", buf, "") msg.AddField(field) var stream []byte if err := encoder.EncodeMessageStream(msg, &stream); err != nil { log.Println(err) } ma[x] = stream } return ma }
// generateSecret generates a random secret string func generateSecret(n int) string { n = n * 3 / 4 b := make([]byte, n) read, _ := rand.Read(b) if read != n { return uuid.NewRandom().String() } return base64.URLEncoding.EncodeToString(b) }
func (lw *LogfileInput) Run(ir InputRunner, h PluginHelper) (err error) { var ( pack *PipelinePack dRunner DecoderRunner e error ok bool ) packSupply := ir.InChan() lw.Monitor.ir = ir for _, msg := range lw.Monitor.pendingMessages { lw.Monitor.LogMessage(msg) } for _, msg := range lw.Monitor.pendingErrors { lw.Monitor.LogError(msg) } // Clear out all the errors lw.Monitor.pendingMessages = make([]string, 0) lw.Monitor.pendingErrors = make([]string, 0) dSet := h.DecoderSet() decoders := make([]Decoder, len(lw.decoderNames)) for i, name := range lw.decoderNames { if dRunner, ok = dSet.ByName(name); !ok { return fmt.Errorf("Decoder not found: %s", name) } decoders[i] = dRunner.Decoder() } for logline := range lw.Monitor.NewLines { pack = <-packSupply pack.Message.SetUuid(uuid.NewRandom()) pack.Message.SetTimestamp(time.Now().UnixNano()) pack.Message.SetType("logfile") pack.Message.SetLogger(logline.Logger) pack.Message.SetSeverity(int32(0)) pack.Message.SetEnvVersion("0.8") pack.Message.SetPid(0) pack.Message.SetPayload(logline.Line) pack.Message.SetHostname(lw.hostname) for _, decoder := range decoders { if e = decoder.Decode(pack); e == nil { break } } if e == nil { ir.Inject(pack) } else { ir.LogError(fmt.Errorf("Couldn't parse log line: %s", logline.Line)) pack.Recycle() } } return }