func TestAMQPLogWriterWrite(t *testing.T) { amqpConn, amqpChan := setupConn(t) defer amqpConn.Close() defer amqpChan.Close() uuid := uuid.NewRandom() ctx := workerctx.FromUUID(context.TODO(), uuid.String()) logWriter, err := newAMQPLogWriter(ctx, amqpConn, 4) if err != nil { t.Fatal(err) } logWriter.SetMaxLogLength(1000) logWriter.SetTimeout(time.Second) _, err = fmt.Fprintf(logWriter, "Hello, ") if err != nil { t.Error(err) } _, err = fmt.Fprintf(logWriter, "world!") if err != nil { t.Error(err) } // Close the log writer to force it to flush out the buffer err = logWriter.Close() if err != nil { t.Error(err) } delivery, ok, err := amqpChan.Get("reporting.jobs.logs", true) if err != nil { t.Error(err) } if !ok { t.Error("expected log message, but there was none") } var lp amqpLogPart err = json.Unmarshal(delivery.Body, &lp) if err != nil { t.Error(err) } expected := amqpLogPart{ JobID: 4, Content: "Hello, world!", Number: 0, UUID: uuid.String(), Final: false, } if expected != lp { t.Errorf("log part is %#v, expected %#v", lp, expected) } }
func (agent *agent_t) handle_beacon() (err error) { msg, err := agent.udp.RecvMessage(0) if len(msg[0]) != 16 { return errors.New("Not a uuid") } // If we got a UUID and it's not our own beacon, we have a peer uuid := uuid.UUID(msg[0]) if bytes.Compare(uuid, agent.uuid_bytes) != 0 { // Find or create peer via its UUID string uuid_string := uuid.String() peer, ok := agent.peers[uuid_string] if !ok { peer = new_peer(uuid) agent.peers[uuid_string] = peer // Report peer joined the network agent.pipe.SendMessage("JOINED", uuid_string) } // Any activity from the peer means it's alive peer.is_alive() } return }
func newTestAMQPCanceller(t *testing.T) *AMQPCanceller { amqpConn, _ := setupConn(t) uuid := uuid.NewRandom() ctx := context.FromUUID(gocontext.TODO(), uuid.String()) return NewAMQPCanceller(ctx, amqpConn) }
func (p *DataStoreHandler) CreateCustomer(customer *messages.Customer) (r *messages.Result, err error) { uuid := uuid.NewUUID() logger.Printf("CreateCustomer: %s -> %s", uuid, customer) p.makeConnection() // Caching open path := customerRoot + uuid.String() + "/" + "details" bytes, err := json.Marshal(customer) if err != nil { return &messages.Result{false, uuid.String(), "Failed to Create customer"}, errors.New("Failures during customer creation") } _, err = p.connection.SetValue(path, string(bytes)) if err != nil { logger.Printf("Failed to store in etcd[%s]: %s", path, err) return &messages.Result{false, uuid.String(), "Failed to Create customer"}, errors.New("Failures during customer creation") } return &messages.Result{true, uuid.String(), ""}, nil }
// NewBlankNode creates a new blank node. The blank node ID is guaranteed to // be unique in BadWolf. func NewBlankNode() *Node { uuid := <-nextVal id := ID(uuid.String()) return &Node{ t: &tBlank, id: &id, } }
// Handle requests for saving secrets func saveHandler(response http.ResponseWriter, request *http.Request, db Database) { response.Header().Set("Content-type", "application/json") if request.Method != "POST" { http.Error(response, `{"message": "Bad Request, see https://github.com/jhaals/yopass for more info"}`, http.StatusBadRequest) return } decoder := json.NewDecoder(request.Body) var secret struct { Message string `json:"secret"` Expiration int32 `json:"expiration"` } err := decoder.Decode(&secret) if err != nil { http.Error(response, `{"message": "Unable to parse json"}`, http.StatusBadRequest) return } if validExpiration(secret.Expiration) == false { http.Error(response, `{"message": "Invalid expiration specified"}`, http.StatusBadRequest) return } if len(secret.Message) > 10000 { http.Error(response, `{"message": "Message is too long"}`, http.StatusBadRequest) return } // Generate new UUID and store secret in memcache with specified expiration uuid := uuid.NewUUID() err = db.Set(uuid.String(), secret.Message, secret.Expiration) if err != nil { http.Error(response, `{"message": "Failed to store secret in database"}`, http.StatusInternalServerError) return } resp := map[string]string{"key": uuid.String(), "message": "secret stored"} jsonData, _ := json.Marshal(resp) response.Write(jsonData) }
func (driver *MesosExecutorDriver) statusUpdateAcknowledgement(from *upid.UPID, pbMsg proto.Message) { log.Infoln("Executor statusUpdateAcknowledgement") msg := pbMsg.(*mesosproto.StatusUpdateAcknowledgementMessage) log.Infof("Receiving status update acknowledgement %v", msg) frameworkID := msg.GetFrameworkId() taskID := msg.GetTaskId() uuid := uuid.UUID(msg.GetUuid()) if driver.stopped() { log.Infof("Ignoring status update acknowledgement %v for task %v of framework %v because the driver is stopped!\n", uuid, taskID, frameworkID) } // Remove the corresponding update. delete(driver.updates, uuid.String()) // Remove the corresponding task. delete(driver.tasks, taskID.String()) }
func new_agent() (agent *agent_t) { // push output from udp into zmq socket bcast := &net.UDPAddr{Port: PING_PORT_NUMBER, IP: net.IPv4bcast} conn, e := net.ListenUDP("udp", bcast) if e != nil { panic(e) } go func() { buffer := make([]byte, 1024) udp, _ := zmq.NewSocket(zmq.PAIR) udp.Bind("inproc://udp") for { if n, _, err := conn.ReadFrom(buffer); err == nil { udp.SendBytes(buffer[:n], 0) } } }() time.Sleep(100 * time.Millisecond) pipe, _ := zmq.NewSocket(zmq.PAIR) pipe.Connect("inproc://iface") udp, _ := zmq.NewSocket(zmq.PAIR) udp.Connect("inproc://udp") uuid := uuid.NewRandom() agent = &agent_t{ pipe: pipe, udp: udp, conn: conn, uuid_bytes: []byte(uuid), uuid_string: uuid.String(), peers: make(map[string]*peer_t), } return }
func TestAMQPMaxLogLength(t *testing.T) { amqpConn, amqpChan := setupConn(t) defer amqpConn.Close() defer amqpChan.Close() uuid := uuid.NewRandom() ctx := workerctx.FromUUID(context.TODO(), uuid.String()) logWriter, err := newAMQPLogWriter(ctx, amqpConn, 4) if err != nil { t.Fatal(err) } logWriter.SetMaxLogLength(4) logWriter.SetTimeout(time.Second) _, err = fmt.Fprintf(logWriter, "1234") if err != nil { t.Error(err) } _, err = fmt.Fprintf(logWriter, "5") if err == nil { t.Error("expected error, but got nil") } }
func TestProcessor(t *testing.T) { uuid := uuid.NewRandom() ctx := workerctx.FromProcessor(context.TODO(), uuid.String()) provider, err := backend.NewBackendProvider("fake", config.ProviderConfigFromMap(map[string]string{ "LOG_OUTPUT": "hello, world", })) if err != nil { t.Error(err) } generator := buildScriptGeneratorFunction(func(ctx context.Context, json *simplejson.Json) ([]byte, error) { return []byte("hello, world"), nil }) jobChan := make(chan Job) canceller := &fakeCanceller{} processor, err := NewProcessor(ctx, "test-hostname", jobChan, provider, generator, canceller, 2*time.Second, time.Second, 3*time.Second, 4*time.Second) if err != nil { t.Error(err) } doneChan := make(chan struct{}) go func() { processor.Run() doneChan <- struct{}{} }() job := &fakeJob{ payload: &JobPayload{ Type: "job:test", Job: JobJobPayload{ ID: 2, Number: "3.1", }, Build: BuildPayload{ ID: 1, Number: "3", }, Repository: RepositoryPayload{ ID: 4, Slug: "green-eggs/ham", }, UUID: "foo-bar", Config: map[string]interface{}{}, Timeouts: TimeoutsPayload{}, }, startAttributes: &backend.StartAttributes{}, } jobChan <- job processor.GracefulShutdown() <-doneChan if processor.ProcessedCount != 1 { t.Errorf("processor.ProcessedCount = %d, expected %d", processor.ProcessedCount, 1) } expectedEvents := []string{"received", "started", string(FinishStatePassed)} if !reflect.DeepEqual(expectedEvents, job.events) { t.Errorf("job.events = %#v, expected %#v", job.events, expectedEvents) } if canceller.subscribedIDs[0] != 2 { t.Errorf("canceller.subscribedIDs[0] = %d, expected 2", canceller.subscribedIDs[0]) } if canceller.unsubscribedIDs[0] != 2 { t.Errorf("canceller.unsubscribedIDs[0] = %d, expected 2", canceller.unsubscribedIDs[0]) } }