Ejemplo n.º 1
0
func (pc *KafkaPartitionConsumer) collectorFunc(messages *[]*MessageAndMetadata) func(topic string, partition int32, offset int64, key []byte, value []byte) error {
	return func(topic string, partition int32, offset int64, key []byte, value []byte) error {
		decodedKey, err := pc.config.KeyDecoder.Decode(key)
		if err != nil {
			log.Warning(err.Error())
			return err
		}
		decodedValue, err := pc.config.ValueDecoder.Decode(value)
		if err != nil {
			log.Warning(err.Error())
			return err
		}

		*messages = append(*messages, &MessageAndMetadata{
			Key:          key,
			Value:        value,
			Topic:        topic,
			Partition:    partition,
			Offset:       offset,
			DecodedKey:   decodedKey,
			DecodedValue: decodedValue,
		})
		return nil
	}
}
Ejemplo n.º 2
0
// 处理 tls 请求
func (s *TlsServer) HandlerTls(conn net.Conn) {
	c, err := vhost.TLS(conn)

	if err != nil || c.Host() != s.httpsDomain {
		// 不匹配,直接转发
		defer c.Close()
		c.Free()

		remoteConn, err := net.Dial("tcp", s.forwardAddr)
		if err != nil {
			glog.Warning(fmt.Printf("[ERR] dial(\"tcp\",%v):%v", s.forwardAddr, err))
			return
		}
		defer remoteConn.Close()

		go io.Copy(c, remoteConn)
		io.Copy(remoteConn, c)
	} else {
		c.Free()
		tlsConn := tls.Server(c, s.tlsConfig)

		err := tlsConn.Handshake()
		if err != nil {
			glog.Warning(err)
			return
		}

		s.http.HandlerHttp(tlsConn)
	}
}
Ejemplo n.º 3
0
func validateVolumeMounts(mounts []VolumeMount, volumes util.StringSet) errorList {
	allErrs := errorList{}

	for i := range mounts {
		mnt := &mounts[i] // so we can set default values
		if len(mnt.Name) == 0 {
			allErrs.Append(makeInvalidError("VolumeMount.Name", mnt.Name))
		} else if !volumes.Has(mnt.Name) {
			allErrs.Append(makeNotFoundError("VolumeMount.Name", mnt.Name))
		}
		if len(mnt.MountPath) == 0 {
			// Backwards compat.
			if len(mnt.Path) == 0 {
				allErrs.Append(makeInvalidError("VolumeMount.MountPath", mnt.MountPath))
			} else {
				glog.Warning("DEPRECATED: VolumeMount.Path has been replaced by VolumeMount.MountPath")
				mnt.MountPath = mnt.Path
				mnt.Path = ""
			}
		}
		if len(mnt.MountType) != 0 {
			glog.Warning("DEPRECATED: VolumeMount.MountType will be removed. The Volume struct will handle types")
		}
	}
	return allErrs
}
Ejemplo n.º 4
0
func (storage *Storage) ReadMessage(file *os.File) *Message {
	//校验消息起始位置的magic
	var magic int32
	err := binary.Read(file, binary.BigEndian, &magic)
	if err != nil {
		log.Info("read file err:", err)
		return nil
	}

	if magic != MAGIC {
		log.Warning("magic err:", magic)
		return nil
	}
	msg := ReceiveMessage(file)
	if msg == nil {
		return msg
	}

	err = binary.Read(file, binary.BigEndian, &magic)
	if err != nil {
		log.Info("read file err:", err)
		return nil
	}

	if magic != MAGIC {
		log.Warning("magic err:", magic)
		return nil
	}
	return msg
}
Ejemplo n.º 5
0
// InstanceID returns the cloud provider ID of the specified instance.
func (i *Instances) InstanceID(name string) (string, error) {
	// Create context
	ctx, cancel := context.WithCancel(context.Background())
	defer cancel()

	// Create vSphere client
	c, err := vsphereLogin(i.cfg, ctx)
	if err != nil {
		return "", err
	}
	defer c.Logout(ctx)

	vm, err := getVirtualMachineByName(i.cfg, ctx, c, name)

	var mvm mo.VirtualMachine
	err = getVirtualMachineManagedObjectReference(ctx, c, vm, "summary", &mvm)
	if err != nil {
		return "", err
	}

	if mvm.Summary.Runtime.PowerState == ActivePowerState {
		return "/" + vm.InventoryPath, nil
	}

	if mvm.Summary.Config.Template == false {
		glog.Warning("VM %s, is not in %s state", name, ActivePowerState)
	} else {
		glog.Warning("VM %s, is a template", name)
	}

	return "", cloudprovider.InstanceNotFound
}
Ejemplo n.º 6
0
func (pc *KafkaPartitionConsumer) initOffset() bool {
	log.Infof("Initializing offset for topic %s, partition %d", pc.topic, pc.partition)
	for {
		offset, err := pc.client.GetOffset(pc.config.Group, pc.topic, pc.partition)
		if err != nil {
			if err == siesta.ErrUnknownTopicOrPartition {
				return pc.resetOffset()
			}
			log.Warning("Cannot get offset for group %s, topic %s, partition %d: %s\n", pc.config.Group, pc.topic, pc.partition, err)
			select {
			case <-pc.stop:
				{
					log.Warning("PartitionConsumer told to stop trying to get offset, returning")
					return false
				}
			default:
			}
		} else {
			validOffset := offset + 1
			log.Infof("Initialized offset to %d", validOffset)
			atomic.StoreInt64(&pc.offset, validOffset)
			atomic.StoreInt64(&pc.highwaterMarkOffset, validOffset)
			return true
		}
		time.Sleep(pc.config.InitOffsetBackoff)
	}
}
Ejemplo n.º 7
0
func (client *Client) HandleGroupIMMessage(msg *IMMessage, seq int) {
	if client.uid == 0 {
		log.Warning("client has't been authenticated")
		return
	}

	msg.timestamp = int32(time.Now().Unix())
	m := &Message{cmd: MSG_GROUP_IM, body: msg}

	group := group_manager.FindGroup(msg.receiver)
	if group == nil {
		log.Warning("can't find group:", msg.receiver)
		return
	}
	members := group.Members()
	for member := range members {
		//群消息不再发送给自己
		if member == client.uid {
			continue
		}
		msgid, err := SaveMessage(client.appid, member, m)
		if err != nil {
			return
		}

		emsg := &EMessage{msgid: msgid, msg: m}
		SendEMessage(client.appid, member, emsg)
	}

	client.wt <- &Message{cmd: MSG_ACK, body: &MessageACK{int32(seq)}}
	atomic.AddInt64(&server_summary.in_message_count, 1)
	log.Infof("group message sender:%d group id:%d", msg.sender, msg.receiver)
}
Ejemplo n.º 8
0
func (storage *Storage) SaveSyncMessage(emsg *EMessage) error {
	storage.mutex.Lock()
	defer storage.mutex.Unlock()

	filesize, err := storage.file.Seek(0, os.SEEK_END)
	if err != nil {
		log.Fatalln(err)
	}
	if emsg.msgid != filesize {
		log.Warningf("file size:%d, msgid:%d is't equal", filesize, emsg.msgid)
		if emsg.msgid < filesize {
			log.Warning("skip msg:", emsg.msgid)
		} else {
			log.Warning("write padding:", emsg.msgid-filesize)
			padding := make([]byte, emsg.msgid-filesize)
			_, err = storage.file.Write(padding)
			if err != nil {
				log.Fatal("file write:", err)
			}
		}
	}

	storage.WriteMessage(storage.file, emsg.msg)
	storage.ExecMessage(emsg.msg, emsg.msgid)
	log.Info("save sync message:", emsg.msgid)
	return nil
}
Ejemplo n.º 9
0
func Warning(ctx context.Context, args ...interface{}) {
	if ctx == nil || !hasTraceKey(ctx) {
		glog.Warning(args)
		return
	}
	glog.Warning(prependParam(args, ctx)...)
}
Ejemplo n.º 10
0
func (self *Router) handleMsgServerClient(msc *link.Session) {
	msc.ReadLoop(func(msg link.InBuffer) {
		glog.Info("msg_server", msc.Conn().RemoteAddr().String(), " say: ", string(msg.Get()))
		var c protocol.CmdInternal
		pp := NewProtoProc(self)
		err := json.Unmarshal(msg.Get(), &c)
		if err != nil {
			glog.Error("error:", err)
		}
		switch c.GetCmdName() {
		case protocol.SEND_MESSAGE_P2P_CMD:
			err := pp.procSendMsgP2P(c, msc)
			if err != nil {
				glog.Warning(err.Error())
			}
		case protocol.CREATE_TOPIC_CMD:
			err := pp.procCreateTopic(c, msc)
			if err != nil {
				glog.Warning(err.Error())
			}
		case protocol.JOIN_TOPIC_CMD:
			err := pp.procJoinTopic(c, msc)
			if err != nil {
				glog.Warning(err.Error())
			}
		case protocol.SEND_MESSAGE_TOPIC_CMD:
			err := pp.procSendMsgTopic(c, msc)
			if err != nil {
				glog.Warning(err.Error())
			}

		}
	})
}
Ejemplo n.º 11
0
func (pk *Peekachu) applyMetricFilters(
	client *Client,
	tableName string,
	row RowMap,
) RowMap {
	for _, filterName := range Filters.FilterNames() {
		if _, ok := pk.config.Influxdb.MetricFilters[filterName]; ok {
			filterer, err := Filters.GetFilter(filterName, client, pk)

			if err != nil {
				msg := "Error retrieving %s filter: %s\n"
				glog.Errorf(msg, filterName, err)
				glog.Warning("Filter will not be applied!")
				break
			}
			filteredRow, err := filterer.Filter(tableName, row)

			if err != nil {
				glog.Errorf("Error applying filter: %s\n", err)
				glog.Warning("Filter will not be applyed!")
				break
			} else {
				row = filteredRow
			}

			if row == nil {
				return nil
			}

		}
	}
	return row
}
Ejemplo n.º 12
0
func (client *Client) HandleGroupIMMessage(msg *IMMessage, seq int) {
	if client.uid == 0 {
		log.Warning("client has't been authenticated")
		return
	}

	msg.timestamp = int32(time.Now().Unix())
	m := &Message{cmd: MSG_GROUP_IM, version: DEFAULT_VERSION, body: msg}

	group := group_manager.FindGroup(msg.receiver)
	if group == nil {
		log.Warning("can't find group:", msg.receiver)
		return
	}
	if group.super {
		_, err := SaveGroupMessage(client.appid, msg.receiver, client.device_ID, m)
		if err != nil {
			return
		}
	} else {
		members := group.Members()
		for member := range members {
			_, err := SaveMessage(client.appid, member, client.device_ID, m)
			if err != nil {
				continue
			}
		}
	}

	client.wt <- &Message{cmd: MSG_ACK, body: &MessageACK{int32(seq)}}
	atomic.AddInt64(&server_summary.in_message_count, 1)
	log.Infof("group message sender:%d group id:%d", msg.sender, msg.receiver)
}
Ejemplo n.º 13
0
func connect(addrs []string, tries int, hint int) (net.Conn, int, error) {
	var conn net.Conn
	var err error

	// first, try on to connect to the most likely leader
	glog.Info("Trying to connect to ", addrs[hint])
	conn, err = net.Dial("tcp", addrs[hint])
	// if successful
	if err == nil {
		glog.Infof("Connect established to %s", addrs[hint])
		return conn, hint, err
	}
	//if unsuccessful
	glog.Warning(err)

	// if fails, try everyone else
	for i := range addrs {
		for t := tries; t > 0; t-- {
			glog.Info("Trying to connect to ", addrs[i])
			conn, err = net.Dial("tcp", addrs[i])

			// if successful
			if err == nil {
				glog.Infof("Connect established to %s", addrs[i])
				return conn, i, err
			}

			//if unsuccessful
			glog.Warning(err)
		}
	}

	return conn, hint + 1, err
}
Ejemplo n.º 14
0
func AllMigrateTasks() ([]*MigrateMeta, error) {
	appname := meta.appName
	zconn := meta.zconn
	taskspath := "/r3/app/" + appname + "/migrate"
	exists, _, err := zconn.Exists(taskspath)
	if err != nil {
		return nil, err
	}
	if exists {
		tasks, _, err := zconn.Children(taskspath)
		if err != nil {
			return nil, err
		}
		var alltasks []*MigrateMeta
		for _, t := range tasks {
			glog.Info("Get task from zk: ", t)
			task, _, err := zconn.Get(taskspath + "/" + t)
			if err != nil {
				glog.Warning("Get task failed ", err)
				return nil, err
			}
			var migrateMeta MigrateMeta
			err = json.Unmarshal(task, &migrateMeta)
			if err != nil {
				glog.Warning("Cluster", "Unmarshal failed: ", err)
				return nil, err
			}
			alltasks = append(alltasks, &migrateMeta)
		}
		return alltasks, nil
	} else {
		return nil, nil
	}
}
Ejemplo n.º 15
0
// Interpolate alert information into summary/description templates.
func interpolateMessage(msg string, labels clientmodel.LabelSet, value clientmodel.SampleValue) string {
	t := template.New("message")

	// Inject some convenience variables that are easier to remember for users
	// who are not used to Go's templating system.
	defs :=
		"{{$labels := .Labels}}" +
			"{{$value := .Value}}"

	if _, err := t.Parse(defs + msg); err != nil {
		glog.Warning("Error parsing template: ", err)
		return msg
	}

	l := map[string]string{}
	for k, v := range labels {
		l[string(k)] = string(v)
	}

	tmplData := struct {
		Labels map[string]string
		Value  clientmodel.SampleValue
	}{
		Labels: l,
		Value:  value,
	}

	var buf bytes.Buffer
	if err := t.Execute(&buf, &tmplData); err != nil {
		glog.Warning("Error executing template: ", err)
		return msg
	}
	return buf.String()
}
Ejemplo n.º 16
0
func (storage *Storage) SaveSyncMessage(emsg *EMessage) error {
	storage.mutex.Lock()
	defer storage.mutex.Unlock()

	filesize, err := storage.file.Seek(0, os.SEEK_END)
	if err != nil {
		log.Fatalln(err)
	}
	if emsg.msgid != filesize {
		log.Warningf("file size:%d, msgid:%d is't equal", filesize, emsg.msgid)
		if emsg.msgid < filesize {
			log.Warning("skip msg:", emsg.msgid)
		} else {
			log.Warning("write padding:", emsg.msgid-filesize)
			padding := make([]byte, emsg.msgid-filesize)
			_, err = storage.file.Write(padding)
			if err != nil {
				log.Fatal("file write:", err)
			}
		}
	}

	storage.WriteMessage(storage.file, emsg.msg)

	if emsg.msg.cmd == MSG_OFFLINE {
		off := emsg.msg.body.(*OfflineMessage)
		storage.AddOffline(off.msgid, off.appid, off.receiver)
		storage.SetLastMessageID(off.appid, off.receiver, emsg.msgid)
	} else if emsg.msg.cmd == MSG_ACK_IN {
		off := emsg.msg.body.(*OfflineMessage)
		storage.RemoveOffline(off.msgid, off.appid, off.receiver)
	}
	log.Info("save sync message:", emsg.msgid)
	return nil
}
Ejemplo n.º 17
0
// Update handles PUT
func (ctl *WatcherController) Update(c *models.Context) {
	m := models.WatcherType{}
	err := c.Fill(&m)

	if err != nil {
		glog.Warning(err)
		c.RespondWithErrorMessage(
			fmt.Sprintf("The post data is invalid: %v", err.Error()),
			http.StatusBadRequest,
		)
		return
	}

	itemType := strings.ToLower(m.ItemType)
	if itemType != "" {
		if _, exists := h.ItemTypes[itemType]; !exists {
			glog.Warning(err)
			c.RespondWithErrorMessage(
				fmt.Sprintf("Watcher could not be saved: Item type not found"),
				http.StatusBadRequest,
			)
			return
		}

		m.ItemTypeID = h.ItemTypes[itemType]
	}

	var status int
	// watcher must exist to be updated
	// Also the returned watcher ID belongs to the authed person by definition
	// - no need to check later
	m.ID, _, _, _, status, err = models.GetWatcherAndIgnoreStatus(
		m.ItemTypeID,
		m.ItemID,
		c.Auth.ProfileID,
	)
	if err != nil {
		glog.Error(err)
		c.RespondWithErrorDetail(err, status)
		return
	}

	// To update we only need id, SendEmail and SendSMS
	status, err = m.Update()
	if err != nil {
		glog.Error(err)
		c.RespondWithErrorMessage(
			fmt.Sprintf("Could not update watcher: %v", err.Error()),
			http.StatusBadRequest,
		)
		return
	}

	// Respond
	c.RespondWithOK()
}
Ejemplo n.º 18
0
func NewOpenStack(config io.Reader) (*OpenStack, error) {
	var cfg Config
	err := gcfg.ReadInto(&cfg, config)
	if err != nil {
		glog.Warning("Failed to parse openstack configure file: %v", err)
		return nil, err
	}

	provider, err := openstack.AuthenticatedClient(cfg.toAuthOptions())
	if err != nil {
		glog.Warning("Failed to auth openstack: %v", err)
		return nil, err
	}

	identity, err := openstack.NewIdentityV2(provider, gophercloud.EndpointOpts{
		Availability: gophercloud.AvailabilityAdmin,
	})
	if err != nil {
		glog.Warning("Failed to find identity endpoint")
		return nil, err
	}

	network, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{
		Region: cfg.Global.Region,
	})
	if err != nil {
		glog.Warning("Failed to find neutron endpoint: %v", err)
		return nil, err
	}

	os := OpenStack{
		identity:   identity,
		network:    network,
		provider:   provider,
		region:     cfg.Global.Region,
		lbOpts:     cfg.LoadBalancer,
		pluginOpts: cfg.Plugin,
		ExtNetID:   cfg.Global.ExtNetID,
	}

	// init plugin
	if cfg.Plugin.PluginName != "" {
		integrationBriage := "br-int"
		if cfg.Plugin.IntegrationBridge != "" {
			integrationBriage = cfg.Plugin.IntegrationBridge
		}

		plugin, _ := plugins.GetNetworkPlugin(cfg.Plugin.PluginName)
		if plugin != nil {
			plugin.Init(integrationBriage)
			os.Plugin = plugin
		}
	}

	return &os, nil
}
Ejemplo n.º 19
0
func CleanupTempFile(f *os.File) {
	err := f.Close()
	if err != nil {
		glog.Warning("error closing temp file: %v", err)
	}
	err = os.Remove(f.Name())
	if err != nil {
		glog.Warning("error deleting temp file: %v", err)
	}
}
Ejemplo n.º 20
0
// handleClientStateChange makes clean transitions as the connection with
// avahi-daemon changes.
//export handleClientStateChange
func handleClientStateChange(client *C.AvahiClient, newState C.AvahiClientState, userdata unsafe.Pointer) {
	z := instance
	z.spMutex.Lock()
	defer z.spMutex.Unlock()

	// Name conflict.
	if newState == C.AVAHI_CLIENT_S_COLLISION {
		glog.Warning("Avahi reports a host name collision.")
	}

	// Transition from not connecting to connecting. Warn in logs.
	if newState == C.AVAHI_CLIENT_CONNECTING {
		glog.Warning("Cannot find Avahi daemon. Is it running?")
	}

	// Transition from running to not running. Free all groups.
	if newState != C.AVAHI_CLIENT_S_RUNNING {
		glog.Info("Local printing disabled (Avahi client is not running).")
		for name, r := range z.printers {
			if r.group != nil {
				if errstr := C.removeAvahiGroup(z.threadedPoll, r.group); errstr != nil {
					err := errors.New(C.GoString(errstr))
					glog.Errorf("Failed to remove Avahi group: %s", err)
				}
				r.group = nil
				z.printers[name] = r
			}
		}
	}

	// Transition from not running to running. Recreate all groups.
	if newState == C.AVAHI_CLIENT_S_RUNNING {
		glog.Info("Local printing enabled (Avahi client is running).")
		for name, r := range z.printers {
			txt := prepareTXT(r.ty, r.url, r.id, r.online)
			defer C.avahi_string_list_free(txt)

			if errstr := C.addAvahiGroup(z.threadedPoll, z.client, &r.group, r.name, C.ushort(r.port), txt); errstr != nil {
				err := errors.New(C.GoString(errstr))
				glog.Errorf("Failed to add Avahi group: %s", err)
			}

			z.printers[name] = r
		}
	}

	// Transition from not failure to failure. Recreate thread poll and client.
	if newState == C.AVAHI_CLIENT_FAILURE {
		z.restart <- struct{}{}
	}

	z.state = newState
}
Ejemplo n.º 21
0
func (nt *UDPTransporter) Send(to uint8, msg message.Message) {
	go func() {
		buf := new(bytes.Buffer)
		enc := gob.NewEncoder(buf)
		if err := enc.Encode(&msg); err != nil {
			glog.Warning("Encoding error ", err)
		}
		_, err := nt.Conns[to].Write(buf.Bytes())
		if err != nil {
			glog.Warning("UDP write error ", err)
		}
	}()
}
Ejemplo n.º 22
0
func (client *CSClient) HandleCustomerService(cs *CustomerServiceMessage, seq int) {
	if cs.sender != client.uid {
		log.Warningf("customer message sender:%d client uid:%d\n",
			cs.sender, client.uid)
		return
	}

	is_question := (client.uid == cs.customer_id)

	group_id, mode := customer_service.GetApplicationConfig(client.appid)

	group := group_manager.FindGroup(group_id)
	if group == nil {
		log.Warning("can't find group:", group_id)
		return
	}

	//来自客服人员的回复消息,接受者的id不能为0
	if !is_question && cs.customer_id == 0 {
		log.Warning("customer service message receiver is 0")
		return
	}

	if !is_question && !group.IsMember(client.uid) {
		log.Warningf("client:%d is't in staff set", client.uid)
		return
	}

	cs.timestamp = int32(time.Now().Unix())

	log.Infof("customer service mode:%d", mode)
	var err error = nil
	if mode == CS_MODE_BROADCAST {
		err = client.Broadcast(cs, group)
	} else if mode == CS_MODE_ONLINE {
		err = client.OnlineSend(cs, is_question)
	} else if mode == CS_MODE_FIX {
		err = client.FixSend(cs, group, is_question)
	} else {
		log.Warning("do not support customer service mode:", mode)
		return
	}

	if err != nil {
		return
	}

	client.wt <- &Message{cmd: MSG_ACK, body: &MessageACK{int32(seq)}}
}
Ejemplo n.º 23
0
func (env *Envelope) ToPublic(us UserService) *PublicEnvelope {
	users, err := us.GetByIDs(env.UserID, env.WithUserID)
	if err != nil {
		glog.Warning("Envelope.ToPublic expects 2 valid users")
		return nil
	}
	var user, withUser *User
	for _, u := range users {
		if u.ID == env.UserID {
			user = u
		}
		if u.ID == env.WithUserID {
			withUser = u
		}
	}
	var author, recipient string
	if env.IsIncoming {
		author = withUser.Username
		recipient = user.Username
	} else {
		author = user.Username
		recipient = withUser.Username
	}
	return &PublicEnvelope{
		Author:      author,
		Recipient:   recipient,
		Message:     env.Message,
		MessageType: env.MessageType,
		CreatedAt:   env.CreatedAt.Time,
	}
}
Ejemplo n.º 24
0
func (z *zeroconf) restartAndQuit() {
	for {
		select {
		case <-z.restart:
			glog.Warning("Avahi client failed. Make sure that avahi-daemon is running while I restart the client.")

			C.stopAvahiClient(z.threadedPoll, z.client)

			var errstr *C.char
			C.startAvahiClient(&z.threadedPoll, &z.client, &errstr)
			if errstr != nil {
				err := errors.New(C.GoString(errstr))
				C.free(unsafe.Pointer(errstr))
				glog.Errorf("Failed to restart Avahi client: %s", err)
			}

		case <-z.q:
			for name := range z.printers {
				z.removePrinter(name)
			}
			C.stopAvahiClient(z.threadedPoll, z.client)
			close(z.q)
			return
		}
	}
}
Ejemplo n.º 25
0
func (client *Client) HandleMessage(msg *Message) {
	log.Info("msg cmd:", Command(msg.cmd))
	switch msg.cmd {
	case MSG_LOAD_OFFLINE:
		client.HandleLoadOffline(msg.body.(*LoadOffline))
	case MSG_SAVE_AND_ENQUEUE:
		client.HandleSaveAndEnqueue(msg.body.(*SAEMessage))
	case MSG_DEQUEUE:
		client.HandleDQMessage(msg.body.(*DQMessage))
	case MSG_LOAD_HISTORY:
		client.HandleLoadHistory((*LoadHistory)(msg.body.(*LoadHistory)))
	case MSG_SAVE_AND_ENQUEUE_GROUP:
		client.HandleSaveAndEnqueueGroup(msg.body.(*SAEMessage))
	case MSG_DEQUEUE_GROUP:
		client.HandleDQGroupMessage(msg.body.(*DQGroupMessage))
	case MSG_SUBSCRIBE_GROUP:
		client.HandleSubscribeGroup(msg.body.(*AppGroupMemberID))
	case MSG_UNSUBSCRIBE_GROUP:
		client.HandleUnSubscribeGroup(msg.body.(*AppGroupMemberID))
	case MSG_LOAD_GROUP_OFFLINE:
		client.HandleLoadGroupOffline(msg.body.(*LoadGroupOffline))
	case MSG_SUBSCRIBE:
		client.HandleSubscribe(msg.body.(*AppUserID))
	case MSG_UNSUBSCRIBE:
		client.HandleUnsubscribe(msg.body.(*AppUserID))
	default:
		log.Warning("unknown msg:", msg.cmd)
	}
}
Ejemplo n.º 26
0
// shouldWriteCerts determines if the router should ask the cert manager to write out certificates
// it will return true if a route is edge or reencrypt and it has all the required (host/key) certificates
// defined.  If the route does not have the certificates defined it will log an info message if the
// router is configured with a default certificate and assume the route is meant to be a wildcard.  Otherwise
// it will log a warning.  The route will still be written but users may receive browser errors
// for a host/cert mismatch
func (r *templateRouter) shouldWriteCerts(cfg *ServiceAliasConfig) bool {
	if cfg.Certificates == nil {
		return false
	}

	if cfg.TLSTermination == routeapi.TLSTerminationEdge || cfg.TLSTermination == routeapi.TLSTerminationReencrypt {
		if hasRequiredEdgeCerts(cfg) {
			return true
		}

		if cfg.TLSTermination == routeapi.TLSTerminationReencrypt && hasReencryptDestinationCACert(cfg) {
			glog.V(4).Info("a reencrypt route with host %s does not have an edge certificate, using default router certificate", cfg.Host)
			return true
		}

		msg := fmt.Sprintf("a %s terminated route with host %s does not have the required certificates.  The route will still be created but no certificates will be written",
			cfg.TLSTermination, cfg.Host)
		// if a default cert is configured we'll assume it is meant to be a wildcard and only log info
		// otherwise we'll consider this a warning
		if len(r.defaultCertificate) > 0 {
			glog.V(4).Info(msg)
		} else {
			glog.Warning(msg)
		}
		return false
	}
	return false
}
Ejemplo n.º 27
0
func loadTestProfiles() error {
	f, err := ioutil.TempFile("/tmp", "apparmor")
	if err != nil {
		return fmt.Errorf("failed to open temp file: %v", err)
	}
	defer os.Remove(f.Name())
	defer f.Close()

	if _, err := f.WriteString(testProfiles); err != nil {
		return fmt.Errorf("failed to write profiles to file: %v", err)
	}

	// TODO(random-liu): The test is run as root now, no need to use sudo here.
	cmd := exec.Command("sudo", "apparmor_parser", "-r", "-W", f.Name())
	stderr := &bytes.Buffer{}
	cmd.Stderr = stderr
	out, err := cmd.Output()
	// apparmor_parser does not always return an error code, so consider any stderr output an error.
	if err != nil || stderr.Len() > 0 {
		if stderr.Len() > 0 {
			glog.Warning(stderr.String())
		}
		if len(out) > 0 {
			glog.Infof("apparmor_parser: %s", out)
		}
		return fmt.Errorf("failed to load profiles: %v", err)
	}
	glog.V(2).Infof("Loaded profiles: %v", out)
	return nil
}
Ejemplo n.º 28
0
// ObsChan implements the ObsChan method for the observer interface.
// Each observation is a sequence of type model.FloatObsSequence.
func (so *SeqObserver) ObsChan() (<-chan Obs, error) {
	obsChan := make(chan Obs, 1000)
	go func() {

		dec := json.NewDecoder(so.reader)
		for {
			var v Seq
			err := dec.Decode(&v)
			if err == io.EOF {
				break
			}
			if err != nil {
				glog.Warning(err)
				break
			}
			fos := NewFloatObsSequence(v.Vectors, SimpleLabel(strings.Join(v.Labels, ",")), v.ID)
			if v.Alignments != nil && len(v.Alignments) > 0 {
				x := fos.(FloatObsSequence)
				x.SetAlignment(v.Alignments)
			}
		}
		close(obsChan)
	}()
	return obsChan, nil
}
Ejemplo n.º 29
0
// Retrieve the Photon VM ID from the Photon Controller endpoint based on the IP address
func getVMIDbyIP(project string, IPAddress string) (string, error) {
	vmList, err := photonClient.Projects.GetVMs(project, nil)
	if err != nil {
		glog.Errorf("Photon Cloud Provider: Failed to GetVMs for project %s. error: [%v]", project, err)
		return "", err
	}

	for _, vm := range vmList.Items {
		task, err := photonClient.VMs.GetNetworks(vm.ID)
		if err != nil {
			glog.Warningf("Photon Cloud Provider: GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err)
		} else {
			task, err = photonClient.Tasks.Wait(task.ID)
			if err != nil {
				glog.Warning("Photon Cloud Provider: Wait task for GetNetworks failed for vm.ID %s, error [%v]", vm.ID, err)
			} else {
				networkConnections := task.ResourceProperties.(map[string]interface{})
				networks := networkConnections["networkConnections"].([]interface{})
				for _, nt := range networks {
					network := nt.(map[string]interface{})
					if val, ok := network["ipAddress"]; ok && val != nil {
						ipAddr := val.(string)
						if ipAddr == IPAddress {
							return vm.ID, nil
						}
					}
				}
			}
		}
	}

	return "", fmt.Errorf("No matching VM is found with IP %s", IPAddress)
}
Ejemplo n.º 30
0
// run is responsible for preparing environment for actual build.
// It accepts factoryFunc and an ordered array of SCMAuths.
func run(builderFactory factoryFunc, scmAuths []scmauth.SCMAuth) {
	client, endpoint, err := dockerutil.NewHelper().GetClient()
	if err != nil {
		glog.Fatalf("Error obtaining docker client: %v", err)
	}
	buildStr := os.Getenv("BUILD")
	build := api.Build{}
	if err := latest.Codec.DecodeInto([]byte(buildStr), &build); err != nil {
		glog.Fatalf("Unable to parse build: %v", err)
	}
	var (
		authcfg     docker.AuthConfiguration
		authPresent bool
	)
	output := build.Spec.Output.To != nil && len(build.Spec.Output.To.Name) != 0
	if output {
		authcfg, authPresent = dockercfg.NewHelper().GetDockerAuth(
			build.Spec.Output.To.Name,
			dockercfg.PullAuthType,
		)
	}
	if build.Spec.Source.SourceSecret != nil {
		if err := setupSourceSecret(build.Spec.Source.SourceSecret.Name, scmAuths); err != nil {
			glog.Fatalf("Cannot setup secret file for accessing private repository: %v", err)
		}
	}
	b := builderFactory(client, endpoint, authcfg, authPresent, &build)
	if err = b.Build(); err != nil {
		glog.Fatalf("Build error: %v", err)
	}
	if !output {
		glog.Warning("Build does not have an Output defined, no output image was pushed to a registry.")
	}

}