func renderHTML(w http.ResponseWriter, r *http.Request, task eremetic.Task, taskID string, conf *config.Config) { var templateFile string data := make(map[string]interface{}) funcMap := template.FuncMap{ "ToLower": strings.ToLower, "FormatTime": FormatTime, } if reflect.DeepEqual(task, (eremetic.Task{})) { notFound(w, r) return } templateFile = "task.html" data = makeMap(task) data["Version"] = version.Version source, _ := assets.Asset(fmt.Sprintf("templates/%s", templateFile)) tpl, err := template.New(templateFile).Funcs(funcMap).Parse(string(source)) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) logrus.WithError(err).WithField("template", templateFile).Error("Unable to load template") return } err = tpl.Execute(w, data) if err != nil { logrus.WithError(err).WithField("template", templateFile).Error("Unable to execute template") } }
// Start begins periodic polling of Controller for the latest configuration. This is a blocking operation. func (p *poller) Start() error { // Stop existing ticker if necessary if p.ticker != nil { if err := p.Stop(); err != nil { logrus.WithError(err).Error("Could not stop existing periodic poll") return err } } // Create new ticker p.ticker = time.NewTicker(p.config.Controller.Poll) // Do initial poll if err := p.poll(); err != nil { logrus.WithError(err).Error("Poll failed") } // Start periodic poll for _ = range p.ticker.C { if err := p.poll(); err != nil { logrus.WithError(err).Error("Poll failed") } } return nil }
func main() { flag.Parse() if *NAME == "" { log.Fatal("name required") } if *MEMORY == 0 { log.Fatal("memory required") } if *CPUS == 0 { log.Fatal("cpus required") } db, err := bolt.Open(*DB_PATH, 0600, nil) if err != nil { log.WithError(err).Fatal("failed to open database") } planrep := dal.NewBoltPlanrep(db) plan := &models.Plan{ Name: *NAME, Memory: *MEMORY * 1024 * 1024, Cpus: *CPUS, DiskSize: *DISK * 1024 * 1024 * 1024, } if err := planrep.Add(plan); err != nil { log.WithError(err).WithField("plan", plan).Fatal("failed to add ip address") } }
func (sv *supervisor) run(name string, args ...string) { isRunning, err := sv.dk.IsRunning(name) if err != nil { log.WithError(err).Warnf("could not check running status of %s.", name) return } if isRunning { return } ro := docker.RunOptions{ Name: name, Image: images[name], Args: args, NetworkMode: "host", VolumesFrom: []string{"minion"}, } if name == Ovsvswitchd { ro.Privileged = true } log.Infof("Start Container: %s", name) _, err = sv.dk.Run(ro) if err != nil { log.WithError(err).Warnf("Failed to run %s.", name) } }
// Synchronize locally running "application" containers with the database. func (sv *supervisor) runApp() { for range sv.conn.TriggerTick(10, db.MinionTable, db.ContainerTable).C { minion, err := sv.conn.MinionSelf() if err != nil || minion.Role != db.Worker { continue } if err := delStopped(sv.dk); err != nil { log.WithError(err).Error("Failed to clean up stopped containers") } dkcs, err := sv.dk.List(map[string][]string{ "label": {docker.SchedulerLabelPair}, }) if err != nil { log.WithError(err).Error("Failed to list local containers.") continue } sv.conn.Transact(func(view db.Database) error { sv.runAppTransact(view, dkcs) return nil }) } }
// Run blocks implementing the scheduler module. func Run(conn db.Conn, dk docker.Client) { bootWait(conn) subnet := getMinionSubnet(conn) err := dk.ConfigureNetwork(plugin.NetworkName, subnet) if err != nil { log.WithError(err).Fatal("Failed to configure network plugin") } loopLog := util.NewEventTimer("Scheduler") trig := conn.TriggerTick(60, db.MinionTable, db.ContainerTable, db.PlacementTable, db.EtcdTable).C for range trig { loopLog.LogStart() minion, err := conn.MinionSelf() if err != nil { log.WithError(err).Warn("Missing self in the minion table.") continue } if minion.Role == db.Worker { subnet = updateNetwork(conn, dk, subnet) runWorker(conn, dk, minion.PrivateIP, subnet) } else if minion.Role == db.Master { runMaster(conn) } loopLog.LogEnd() } }
func main() { containers, err := exec.Command("quilt", "containers").Output() if err != nil { log.WithError(err).Fatal("Unable to get containers.") } fmt.Println("`quilt containers` output:") fmt.Println(string(containers)) matches := regexp.MustCompile(`(\d+) .*run master.*`). FindStringSubmatch(string(containers)) if len(matches) != 2 { log.Fatal("Unable to find StitchID of Spark master.") } id := matches[1] logs, err := exec.Command("quilt", "logs", id).CombinedOutput() if err != nil { log.WithError(err).Fatal("Unable to get Spark master logs.") } fmt.Printf("`quilt logs %s` output:\n", id) fmt.Println(string(logs)) if !strings.Contains(string(logs), "Pi is roughly") { fmt.Println("FAILED, sparkPI did not execute correctly.") } else { fmt.Println("PASSED") } }
func main() { flag.Parse() if *ADDRESS == "" { log.Fatal("address not specified") } if *MASK == 0 { log.Fatal("mask not specified") } if *GW == "" { log.Fatal("gateway not specified") } db, err := bolt.Open(*DB_PATH, 0600, nil) if err != nil { log.WithError(err).Fatal("failed to open database") } pool := dal.NewBoltIPPool(db) ip := &models.IP{ Address: *ADDRESS, Gateway: *GW, Netmask: *MASK, UsedBy: "", } if err := pool.Add(ip); err != nil { log.WithError(err).WithField("ip", ip).Fatal("failed to add ip address") } }
func updateContainerIP(containers []db.Container, privateIP string, store Store) { oldIPMap := map[string]string{} selfStore := path.Join(nodeStore, privateIP) etcdIPs, err := store.Get(path.Join(selfStore, minionIPStore)) if err != nil { etcdErr, ok := err.(client.Error) if !ok || etcdErr.Code != client.ErrorCodeKeyNotFound { log.WithError(err).Error("Failed to load current IPs from Etcd") return } } json.Unmarshal([]byte(etcdIPs), &oldIPMap) newIPMap := map[string]string{} for _, c := range containers { newIPMap[strconv.Itoa(c.StitchID)] = c.IP } if util.StrStrMapEqual(oldIPMap, newIPMap) { return } jsonData, err := json.Marshal(newIPMap) if err != nil { log.WithError(err).Error("Failed to marshal minion container IP map") return } err = store.Set(path.Join(selfStore, minionIPStore), string(jsonData), 0) if err != nil { log.WithError(err).Error("Failed to update minion container IP map") } }
func main() { config, err := OpenConfig("config.yml") if err != nil { logrus.WithError(err).Fatal("Error loading config file") } logrus.Info("Loaded config") repo, err := rethink.NewRepo(config.RethinkDB) if err != nil { logrus.WithError(err).Fatal("Error connecting to repository") } logrus.Info("Connected to repository") tokenValidator := controllers.NewTokenValidator(config.Token.ClientID) updateController := controllers.NewUpdateController(repo, tokenValidator) readController := controllers.NewReadController(repo, tokenValidator) readAllController := controllers.NewReadAllController(repo, tokenValidator) subscribeController := controllers.NewSubscribeController(repo, tokenValidator) unsubscribeController := controllers.NewUnsubscribeController(repo, tokenValidator) http.Handle("/updates", updateController) http.Handle("/read", readController) http.Handle("/readall", readAllController) http.Handle("/subscribe", subscribeController) http.Handle("/unsubscribe", unsubscribeController) logrus.Info("Starting server") http.ListenAndServe(config.Server.Listen, nil) }
// Run retrieves and prints the requested containers. func (cCmd *Container) Run() int { localClient, err := cCmd.clientGetter.Client(cCmd.host) if err != nil { log.Error(err) return 1 } defer localClient.Close() c, err := cCmd.clientGetter.LeaderClient(localClient) if err != nil { log.WithError(err).Error("Error connecting to leader.") return 1 } defer c.Close() containers, err := c.QueryContainers() if err != nil { log.WithError(err).Error("Unable to query containers.") return 1 } machines, err := localClient.QueryMachines() if err != nil { log.WithError(err).Error("Unable to query machines") return 1 } writeContainers(os.Stdout, machines, containers) return 0 }
func (hc *HyenaClient) write() { stop := false for f := range hc.send { buf := f.Buffer() if !stop { size := byte(len(buf)) n, err := hc.conn.Write([]byte{size}) if err != nil || n != 1 { log.WithError(err).Error("Writing to hyenad client connection") hc.conn.Close() stop = true } n, err = hc.conn.Write(buf) if err != nil || n != len(buf) { log.WithError(err).Error("Writing to hyenad client connection") hc.conn.Close() stop = true } frameBuffers.Return(buf) } else { frameBuffers.Return(buf) log.WithField("Frame", f).Warn("Dropping frame, connection down") } } }
func (d *dialer) readloop(u *upstream) { for { if atomic.LoadInt32(&u.closed) != 0 { logrus.WithField("proto", u.proto).Warnln("dialer upstream is closed") return } p := packet{} switch u.proto { case tcp: err := u.decoder.Decode(&p) if err != nil { logrus.WithFields(logrus.Fields{ "error": err, }).Warnln("Dialer docode upstream packet") return } case udp: udpbuf := make([]byte, buffersize) n, err := u.conn.Read(udpbuf) if err != nil { logrus.WithError(err).Warnln("dialer Read UDP error") return } if err := decodePacket(udpbuf[:n], &p); err != nil { logrus.WithError(err).Warnln("dialer gop decode from udp error") continue } p.udp = true } d.proc(u, &p) } }
func (ab *abManager) ChangeUserTestGroup(userid, appid, testname string) (models.ABUser, error) { user, err := ab.User(userid) if err != nil { logrus.WithError(err).Errorf("User fetch failed") return user, err } repo := ab.db.GetABRepo() app, err := repo.App(appid) if err != nil { logrus.WithError(err).Errorf("App fetch failed") return user, err } version := int32(-1) if testname != "" { for _, t := range app.Tests { if t.Name == testname { version = app.Version break } } } else { version = app.Version } if version <= 0 { return user, grpc.Errorf(codes.InvalidArgument, "invalid test group name") } if err := repo.ChangeTesterInfo(userid, appid, testname, version); err != nil { logrus.WithError(err).Errorf("failed to change user testing info appid=%s to %s", appid, testname) return user, err } return ab.User(userid) }
func (sys *System) Start() { backends := make(map[edn.Keyword]*Backend) for name, config := range sys.config.Backends { backend, err := NewBackend(sys, string(name), config) if err != nil { log.WithError(err).Fatal("Can't start backend") os.Exit(1) } backends[name] = backend } for _, config := range sys.config.Frontends { switch string(config.Type) { case "zmq": backend, pst := backends[config.Backend] if !pst { log.Fatalf("There are no backend %s", config.Backend) os.Exit(1) } _, err := NewZMQFrontend(sys, config, backend) if err != nil { log.WithError(err).Fatal("Can't start frontend") os.Exit(1) } case "http": NewHttpFrontend(sys, config.Endpoint) } } sys.processes.Wait() }
// New creates a GCE cluster. // // Clusters are differentiated (namespace) by setting the description and // filtering off of that. func New(namespace string) (*Cluster, error) { if err := gceInit(); err != nil { log.WithError(err).Debug("failed to start up gce") return nil, err } clst := Cluster{ projID: "declarative-infrastructure", ns: namespace, ipv4Range: "192.168.0.0/16", } clst.baseURL = fmt.Sprintf("%s/%s", computeBaseURL, clst.projID) clst.intFW = fmt.Sprintf("%s-internal", clst.ns) clst.imgURL = fmt.Sprintf("%s/%s", computeBaseURL, "ubuntu-os-cloud/global/images/ubuntu-1604-xenial-v20160921") if err := clst.netInit(); err != nil { log.WithError(err).Debug("failed to start up gce network") return nil, err } if err := clst.fwInit(); err != nil { log.WithError(err).Debug("failed to start up gce firewalls") return nil, err } return &clst, nil }
func main() { box, err := rice.FindBox("assets") if err != nil { logrus.WithError(err).Fatal("Error loading assets") } str, err := box.String("index.tpl.html") if err != nil { logrus.WithError(err).Fatal("Error loading index") } tmpl, err := template.New("name").Parse(str) if err != nil { logrus.WithError(err).Fatal("Error parsing index") } index = tmpl config, err := OpenConfig("config.yml") if err != nil { logrus.WithError(err).Fatal("Error loading config file") } logrus.Info("Loaded config") context.Config = config logrus.Info("Starting server") fileServer = http.FileServer(box.HTTPBox()) http.HandleFunc("/", indexHandler) http.ListenAndServe(config.Server.Listen, nil) }
// updateNameservers assigns each container the same nameservers as the host. func updateNameservers(dk docker.Client, containers []db.Container) { hostResolv, err := ioutil.ReadFile("/etc/resolv.conf") if err != nil { log.WithError(err).Error("failed to read /etc/resolv.conf") } nsRE := regexp.MustCompile("nameserver\\s([0-9]{1,3}\\.){3}[0-9]{1,3}\\s+") matches := nsRE.FindAllString(string(hostResolv), -1) newNameservers := strings.Join(matches, "\n") for _, dbc := range containers { id := dbc.DockerID currNameservers, err := dk.GetFromContainer(id, "/etc/resolv.conf") if err != nil { log.WithError(err).Error("failed to get /etc/resolv.conf") return } if newNameservers != currNameservers { err = dk.WriteToContainer(id, newNameservers, "/etc", "resolv.conf", 0644) if err != nil { log.WithError(err).Error( "failed to update /etc/resolv.conf") } } } }
// Create a GCE cluster. // // Clusters are differentiated (namespace) by setting the description and // filtering off of that. // // XXX: A lot of the fields are hardcoded. func (clst *gceCluster) Connect(namespace string) error { if err := gceInit(); err != nil { log.WithError(err).Debug("failed to start up gce") return err } clst.projID = "declarative-infrastructure" clst.ns = namespace clst.imgURL = fmt.Sprintf( "%s/%s", computeBaseURL, "ubuntu-os-cloud/global/images/ubuntu-1510-wily-v20160310") clst.baseURL = fmt.Sprintf("%s/%s", computeBaseURL, clst.projID) clst.ipv4Range = "192.168.0.0/16" clst.intFW = fmt.Sprintf("%s-internal", clst.ns) clst.extFW = fmt.Sprintf("%s-external", clst.ns) if err := clst.netInit(); err != nil { log.WithError(err).Debug("failed to start up gce network") return err } if err := clst.fwInit(); err != nil { log.WithError(err).Debug("failed to start up gce firewalls") return err } return nil }
func updateDefaultGw(odb ovsdb.Ovsdb) { currMac, err := getMac("", quiltBridge) if err != nil { log.WithError(err).Errorf("failed to get MAC for %s", quiltBridge) return } if currMac != gatewayMAC { if err := odb.SetBridgeMac(quiltBridge, gatewayMAC); err != nil { log.WithError(err).Error("failed to set MAC for default gateway") } } if err := upLink("", quiltBridge); err != nil { log.WithError(err).Error("failed to up default gateway") } currIPs, err := listIP("", quiltBridge) if err != nil { log.WithError(err).Errorf("failed to list IPs") return } targetIPs := []string{gatewayIP + "/8"} if err := updateIPs("", quiltBridge, currIPs, targetIPs); err != nil { log.WithError(err).Errorf("failed to update IPs") } }
func main() { configPathFlag := flag.String("config", "resource/config.yml", "A config file for start server") dbIPFlag := flag.String("dbip", "", "IP for database") flag.Parse() app := land.Application{} defer core.ErrorGlobalHandler() log.Info("Loading logger..") core.NewLogger() log.Info("Loading config..") if err := core.NewConfig(&app.Config, *configPathFlag); err != nil { log.WithError(err).Panic("Config is not load") } if *dbIPFlag != "" { app.Config.Database.IP = *dbIPFlag } log.Info("Loading database..") if err := core.NewDatabase(&app.Database, &app.Config); err != nil { log.WithError(err).Panic("Database is not load") } log.WithField("address", app.Config.Base.IP+":"+app.Config.Base.Port).Info("Server starting") if err := app.Run(); err != nil { log.WithError(err).Panic("Server is not started") } }
// Sets up the OpenFlow tables to get packets from containers into the OVN controlled // bridge. The Openflow tables are organized as follows. // // - Table 0 will check for packets destined to an ip address of a label with MAC // 0A:00:00:00:00:00 (obtained by OVN faking out arp) and use the OF mulipath action // to balance load packets across n links where n is the number of containers // implementing the label. This result is stored in NXM_NX_REG0. This is done using // a symmetric l3/4 hash, so transport connections should remain intact. // // -Table 1 reads NXM_NX_REG0 and changes the destination mac address to one of the // MACs of the containers that implement the label // // XXX: The multipath action doesn't perform well. We should migrate away from it // choosing datapath recirculation instead. func updateOpenFlow(dk docker.Client, odb ovsdb.Ovsdb, containers []db.Container, labels []db.Label, connections []db.Connection) { targetOF, err := generateTargetOpenFlow(dk, odb, containers, labels, connections) if err != nil { log.WithError(err).Error("failed to get target OpenFlow flows") return } currentOF, err := generateCurrentOpenFlow(dk) if err != nil { log.WithError(err).Error("failed to get current OpenFlow flows") return } _, flowsToDel, flowsToAdd := join.HashJoin(currentOF, targetOF, nil, nil) for _, f := range flowsToDel { if err := deleteOFRule(dk, f.(OFRule)); err != nil { log.WithError(err).Error("error deleting OpenFlow flow") } } for _, f := range flowsToAdd { if err := addOFRule(dk, f.(OFRule)); err != nil { log.WithError(err).Error("error adding OpenFlow flow") } } }
func (sv *supervisor) run(name string, args ...string) { isRunning, err := sv.dk.IsRunning(name) if err != nil { log.WithError(err).Warnf("could not check running status of %s.", name) return } if isRunning { return } ro := docker.RunOptions{ Name: name, Image: images[name], Args: args, NetworkMode: "host", } switch name { case Ovsvswitchd: ro.Privileged = true ro.VolumesFrom = []string{Ovsdb} case Ovnnorthd: ro.VolumesFrom = []string{Ovsdb} case Ovncontroller: ro.VolumesFrom = []string{Ovsdb} } if err := sv.dk.Run(ro); err != nil { log.WithError(err).Warnf("Failed to run %s.", name) } }
func (s *serv) udphandler(conn *net.UDPConn) { u := newUpstream(s.proto) u.udpconn = conn // add to pool s.pool.append(u, 0) defer func() { u.close() s.pool.remove(u) }() for { udpbuf := make([]byte, buffersize) n, addr, err := conn.ReadFromUDP(udpbuf) if err != nil { logrus.WithError(err).Warnln("ReadFromUDP error") break } if u.udpaddr == nil { u.udpaddr = addr } p := packet{} if err := decodePacket(udpbuf[:n], &p); err != nil { logrus.WithError(err).Warnln("server gop decode from udp error", n) continue } p.udp = true if err := s.proc(u, &p); err != nil { logrus.WithError(err).Warn("serve send pong err") return } } }
func migrateToSuperGroup(db *mgo.Database, fromChatID int64, toChatID int64) { var chat chatData _, err := db.C("chats").FindId(fromChatID).Apply(mgo.Change{ Update: bson.M{"$set": bson.M{"migratedtochatid": toChatID}, "$unset": bson.M{"hooks": "", "membersids": ""}}, }, &chat) if err != nil { log.WithError(err).Error("migrateToSuperGroup remove") } if chat.ID != 0 { chat.ID = toChatID chat.Type = "supergroup" err := db.C("chats").Insert(chat) if err != nil { log.WithError(err).Error("migrateToSuperGroup insert") } } err = db.C("users").Update(bson.M{"hooks.chats": fromChatID}, bson.M{"$addToSet": bson.M{"hooks.$.chats": toChatID}}) if err != nil { log.WithError(err).Error("migrateToSuperGroup add new hook chats") } err = db.C("users").Update(bson.M{"hooks.chats": toChatID}, bson.M{"$pull": bson.M{"hooks.$.chats": fromChatID}}) if err != nil { log.WithError(err).Error("migrateToSuperGroup remove outdated hook chats") } }
func (slack *SlackNotifier) postToSlack() bool { data, err := json.Marshal(slack) if err != nil { logrus.WithError(err).Error("Unable to marshal slack payload") return false } logrus.Debugf("struct = %+v, json = %s", slack, string(data)) b := bytes.NewBuffer(data) if res, err := http.Post(slack.Url, "application/json", b); err != nil { logrus.WithError(err).Error("Unable to send data to slack") return false } else { defer res.Body.Close() statusCode := res.StatusCode if statusCode != 200 { body, _ := ioutil.ReadAll(res.Body) logrus.Error("Unable to notify slack:", string(body)) return false } else { logrus.Info("Slack notification sent.") return true } } }
// poll obtains the latest NGINX config from Controller and updates NGINX to use it func (p *poller) poll() error { // Get latest config from Controller conf, err := p.controller.GetNGINXConfig(p.version) if err != nil { logrus.WithError(err).Error("Call to Controller failed") return err } if conf == "" { //TODO no new rules to update, do we need to do anything else? return nil } reader := bytes.NewBufferString(conf) // Update our existing NGINX config if err := p.nginx.Update(reader); err != nil { logrus.WithError(err).Error("Could not update NGINX config") return err } t := time.Now() p.version = &t return nil }
func init() { // 初始化日志 logrus.SetFormatter(&logrus.JSONFormatter{}) log.SetOutput(logrus.StandardLogger().Out) // 初始化配置 Config = loadConfig() lv, err := logrus.ParseLevel(Config.LogLevel) if err != nil { logrus.WithError(err).Warn("解析日志等级出错") lv = logrus.DebugLevel } logrus.SetLevel(lv) if Config.ReportOn { hook, err := logrus_mail.NewMailAuthHook(Config.AppName, Config.MailerHost, Config.MailerPort, Config.MailerUser, Config.MailReceiver, Config.MailerUser, Config.MailerPass) if err == nil { logrus.AddHook(hook) } } exeRoot := filepath.Dir(os.Args[0]) f, err := os.OpenFile(filepath.Join(exeRoot, "retask.log"), os.O_APPEND|os.O_CREATE, os.ModePerm) if err != nil { logrus.WithError(err).Fatal("打开日志文件失败") } logrus.SetOutput(f) }
// ClientLive method for accept new connection from socket func ClientLive(buffers Buffers, conf core.Config, c net.Conn) { defer core.ErrorNetworkHandler(c) buffer := bytes.NewBuffer([]byte{}) player := entitie.NewPlayer(c) newPill := pill.NewPill() for getBytes := range buffers.GetReadChannel() { buffer.Reset() buffer.Write(getBytes) log.WithField("bytes", fmt.Sprintf("% x", buffer.Bytes())).Info("Print message from client") if buffer.Len() <= 2 { buffers.GetWriteChannel() <- []byte{0x00, 0x02} continue } opcodes, err := newPill.Decrypt(buffer.Bytes(), player) if err != nil { log.WithError(err).Panic("Error in pill decrypt") } if opcodes != nil { for _, opcode := range opcodes { pillEncrypt, err := newPill.Encrypt(newPill.GetPill(opcode), player) if err != nil { log.WithError(err).Error("Error in pill encrypt") } buffers.GetWriteChannel() <- pillEncrypt } } } }
func getCredential(settings *Settings) (*mesosproto.Credential, error) { if settings.CredentialFile != "" { content, err := ioutil.ReadFile(settings.CredentialFile) if err != nil { logrus.WithError(err).WithFields(logrus.Fields{ "credential_file": settings.CredentialFile, }).Error("Unable to read credential_file") return nil, err } fields := strings.Fields(string(content)) if len(fields) != 2 { err := errors.New("Unable to parse credentials") logrus.WithError(err).WithFields(logrus.Fields{ "credential_file": settings.CredentialFile, }).Error("Should only contain a key and a secret separated by whitespace") return nil, err } logrus.WithField("principal", fields[0]).Info("Successfully loaded principal") return &mesosproto.Credential{ Principal: proto.String(fields[0]), Secret: proto.String(fields[1]), }, nil } logrus.Debug("No credentials specified in configuration") return nil, nil }