// handleMessages takes all messages from the Send channel // and actually sends them over the network. It then waits // for the response and puts it in the Recv channel. func (c *Client) handleMessages(tnl io.ReadWriter) { // We don't need compression for a local socket: protocol := protocol.NewProtocol(tnl, false) for { select { case <-c.quit: return case msg := <-c.Send: if err := protocol.Send(msg); err != nil { log.Warning("client-send: ", err) c.Recv <- nil continue } resp := &wire.Response{} if err := protocol.Recv(resp); err != nil { log.Warning("client-recv: ", err) c.Recv <- nil continue } c.Recv <- resp } } }
func init() { err := envconfig.Process("HELEN", &Constants) if err != nil { logrus.Fatal(err) } if Constants.SteamDevAPIKey == "" { logrus.Warning("Steam api key not provided, setting SteamApiMockUp to true") } if Constants.PublicAddress == "" { Constants.PublicAddress = "http://" + Constants.ListenAddress } if Constants.MockupAuth { logrus.Warning("Mockup authentication enabled.") } _, err = url.Parse(Constants.PublicAddress) if err != nil { logrus.Fatal("Couldn't parse HELEN_PUBLIC_ADDR - ", err) } _, err = url.Parse(Constants.LoginRedirectPath) if err != nil { logrus.Fatal("Couldn't parse HELEN_SERVER_REDIRECT_PATH - ", err) } if Constants.GeoIP { logrus.Info("GeoIP support enabled") } }
func (a *App) Serve() { requestHandlers := &handlers.RequestHandler{ Config: &a.config, Horizon: a.horizon, TransactionSubmitter: a.transactionSubmitter, } portString := fmt.Sprintf(":%d", *a.config.Port) flag.Set("bind", portString) goji.Abandon(middleware.Logger) goji.Use(handlers.StripTrailingSlashMiddleware()) goji.Use(handlers.HeadersMiddleware()) if a.config.ApiKey != "" { goji.Use(handlers.ApiKeyMiddleware(a.config.ApiKey)) } if a.config.Accounts.AuthorizingSeed != nil { goji.Post("/authorize", requestHandlers.Authorize) } else { log.Warning("accounts.authorizing_seed not provided. /authorize endpoint will not be available.") } if a.config.Accounts.IssuingSeed != nil { goji.Post("/send", requestHandlers.Send) } else { log.Warning("accounts.issuing_seed not provided. /send endpoint will not be available.") } goji.Post("/payment", requestHandlers.Payment) goji.Serve() }
func (connection *Connection) Write(m interface{}) { defer func() { if err := recover(); err != nil { log.Warning("Recovered write on closed channel") log.Warning(err) } }() connection.InChan <- m }
func PrepareHTTPS(s *http.Server, cert string, key string, skipDev bool) error { // Checking for TLS default keys var certBytes, keyBytes []byte devCert := "devcert/cert.pem" devKey := "devcert/key.pem" // If only a single argument is set if (cert == "" || key == "") && (cert != "" || key != "") { log.Panic("You cannot specify only key or certificate") } // Using default certificate if cert == "" && !skipDev { if _, err := os.Stat(devCert); err == nil { if _, err := os.Stat(devKey); err == nil { log.Warning("===============================================================================") log.Warning("This instance will use development certificates, don't use them in production !") log.Warning("===============================================================================") cert = devCert key = devKey } else { log.Debug("No devcert key found: ", devKey) } } else { log.Debug("No devcert certificate found: ", devCert) } } if _, err := os.Stat(cert); err != nil { if _, err := os.Stat(key); err != nil { certBytes, keyBytes = GenerateDefaultTLS(cert, key) } } else { log.Info("Loading TLS Certificate: ", cert) log.Info("Loading TLS Private key: ", key) certBytes, err = ioutil.ReadFile(cert) keyBytes, err = ioutil.ReadFile(key) } certifs, err := tls.X509KeyPair(certBytes, keyBytes) if err != nil { log.Panic("Cannot parse certificates") } s.TLSConfig = &tls.Config{ Certificates: []tls.Certificate{certifs}, } return nil }
// toDBMachine converts machines specified in the Stitch into db.Machines that can // be compared against what's already in the db. // Specifically, it sets the role of the db.Machine, the size (which may depend // on RAM and CPU constraints), and the provider. // Additionally, it skips machines with invalid roles, sizes or providers. func toDBMachine(machines []stitch.Machine, maxPrice float64) []db.Machine { var hasMaster, hasWorker bool var dbMachines []db.Machine for _, stitchm := range machines { var m db.Machine role, err := db.ParseRole(stitchm.Role) if err != nil { log.WithError(err).Error("Error parsing role.") continue } m.Role = role hasMaster = hasMaster || role == db.Master hasWorker = hasWorker || role == db.Worker p, err := db.ParseProvider(stitchm.Provider) if err != nil { log.WithError(err).Error("Error parsing provider.") continue } m.Provider = p m.Size = stitchm.Size if m.Size == "" { providerInst := provider.New(p) m.Size = providerInst.ChooseSize( stitchm.RAM, stitchm.CPU, maxPrice) if m.Size == "" { log.Errorf("No valid size for %v, skipping.", m) continue } } m.DiskSize = stitchm.DiskSize if m.DiskSize == 0 { m.DiskSize = defaultDiskSize } m.SSHKeys = stitchm.SSHKeys m.Region = stitchm.Region dbMachines = append(dbMachines, provider.DefaultRegion(m)) } if !hasMaster && hasWorker { log.Warning("A Master was specified but no workers.") return nil } else if hasMaster && !hasWorker { log.Warning("A Worker was specified but no masters.") return nil } return dbMachines }
// New creates a Client Object. func New() *Client { config := os.Getenv("KUMORU_CONFIG") if config == "" { usrHome := os.Getenv("HOME") config = usrHome + "/.kumoru/config" } e := LoadEndpoints(config, "endpoints") t, err := LoadTokens(config, "tokens") if err != nil { log.Warning("No tokens found.") } roleUUID, err := LoadRole(config, "auth") if err != nil { log.Warning("No active role found. Generate a new token.") } envDebug := false if strings.ToLower(os.Getenv("KUMORU_SDK_DEBUG")) == "true" { envDebug = true } logger := log.New() return &Client{ BounceToRawString: false, Client: &http.Client{}, Data: make(map[string]interface{}), Debug: envDebug, EndPoint: &e, Errors: nil, FormData: url.Values{}, Header: make(map[string]string), Logger: logger, ProxyRequestData: nil, QueryData: url.Values{}, RawString: "", RoleUUID: roleUUID, Sign: false, SliceData: []interface{}{}, TargetType: "form", Tokens: &t, Transport: &http.Transport{}, URL: "", } }
// syncronize mount info with refcounts - and unmounts if needed func (r refCountsMap) syncMountsWithRefCounters(d *vmdkDriver) { for vol, cnt := range r { f := log.Fields{ "name": vol, "refcnt": cnt.count, "mounted": cnt.mounted, "dev": cnt.dev, } log.WithFields(f).Debug("Refcnt record: ") if cnt.mounted == true { if cnt.count == 0 { // Volume mounted but not used - UNMOUNT and DETACH ! log.WithFields(f).Info("Initiating recovery unmount. ") err := d.unmountVolume(vol) if err != nil { log.Warning("Failed to unmount - manual recovery may be needed") } } // else: all good, nothing to do - volume mounted and used. } else { if cnt.count == 0 { // volume unmounted AND refcount 0. We should NEVER get here // since unmounted and recount==0 volumes should have no record // in the map. Something went seriously wrong in the code. log.WithFields(f).Panic("Internal failure: record should not exist. ") } else { // No mounts, but Docker tells we have refcounts. // It could happen when Docker runs a container with a volume // but not using files on the volumes, and the volume is (manually?) // unmounted. Unlikely but possible. Mount ! log.WithFields(f).Warning("Initiating recovery mount. ") isReadOnly := false status, err := d.ops.Get(vol) if err != nil { log.Warning("Unable to get volume status - mounting as read-only") isReadOnly = true } if status["access"] == "read-only" { isReadOnly = true } _, err = d.mountVolume(vol, status["fstype"].(string), isReadOnly) if err != nil { log.Warning("Failed to mount - manual recovery may be needed") } } } } }
func deploy(cmd *cobra.Command, args []string) error { //TODO this should be a validation method if Project == "" { log.Fatal("--project parameter is required to create a new deployment") } service, err := googlecloud.GetService() Check(err) Name, err = getName(uid) if err != nil { log.Warning(err) return err } log.Infof("Creating new deployment %s", Name) depBuilder := &DeploymentBuilder{ DeploymentName: Name, DeploymentDesc: "", ConfigFilePath: configpath, VarsDotYamlPath: varspath, CLIVars: vars.vars, } d, err := depBuilder.GetDeployment() if err != nil { log.Warning(err) return err } // d.Intent = "UPDATE" call := service.Deployments.Insert(Project, d) _, error := call.Do() Check(error) //TODO only set Vars if the varspath file actually exists dConfig := Deployment{ Id: Name, Project: Project, Config: configpath, Vars: varspath, } _, err = AppendDeployment(dConfig, true) if err != nil { log.Fatal(fmt.Sprintf("Config was deployed but there was an error writing the config file. You will not be able to use other `dm` commands, but the deployment will exist. Error was %s", err)) } fmt.Printf("Created deployment %s.\n", Name) return nil }
func main() { flag.StringVar(&flagDataDir, "datadir", ".lablog", "the path to the datadir to use as the source of files.") flag.StringVar(&flagOutDir, "outdir", "output", "the path to the output directroy in which the converted files will be saved.") flag.Parse() log.Debug("DataDir: ", flagDataDir) log.Debug("OutDir: ", flagOutDir) dbread := dbfiles.New() dbread.Structure = dbfiles.NewFlat() dbread.BaseDir = flagDataDir readKeys, err := dbread.Keys() if err != nil { log.Fatal(errgo.Notef(err, "can not get keys from datadir")) } store, err := store.NewFolderStore(flagOutDir) if err != nil { log.Fatal(errgo.Notef(err, "can not create new store")) } for _, key := range readKeys { log.Info("Converting key '", strings.Join(key, "."), "'") project, err := data.ParseProjectName(strings.Join(key, data.ProjectNameSepperator)) if err != nil { log.Warning(errgo.Notef(err, "can not convert key to project name")) continue } log.Debug("Project: ", project) values, err := dbread.Get(key...) if err != nil { log.Warning(errgo.Notef(err, "can not get values for key '"+strings.Join(key, ".")+"'")) continue } log.Debug("Values: ", values) err = convertValues(store, project, values) if err != nil { log.Warning(errgo.Notef(err, "can no convert values for key '"+strings.Join(key, ".")+"'")) continue } log.Info("Converted key '", strings.Join(key, "."), "'") } }
func New(cfgFile string) CinderDriver { conf, err := processConfig(cfgFile) isV3 := strings.Contains(conf.Endpoint, "v3") if err != nil { log.Fatal("Error processing cinder driver config file: ", err) } _, err = os.Lstat(conf.MountPoint) if os.IsNotExist(err) { if err := os.MkdirAll(conf.MountPoint, 0755); err != nil { log.Fatal("Failed to create Mount directory during driver init: %v", err) } } auth := gophercloud.AuthOptions{ IdentityEndpoint: conf.Endpoint, Username: conf.Username, Password: conf.Password, TenantID: conf.TenantID, AllowReauth: true, } if isV3 == true && conf.DomainName == "" { log.Warning("V3 endpoint specified, but DomainName not set!") log.Warning("Setting to \"Default\", maybe it'll work.") auth.DomainName = "Default" } if conf.DomainName != "" && isV3 == true { log.Info("Authorizing to a V3 Endpoint") auth.DomainName = conf.DomainName } providerClient, err := openstack.AuthenticatedClient(auth) if err != nil { log.Fatal("Error initiating gophercloud provider client: ", err) } client, err := openstack.NewBlockStorageV2(providerClient, gophercloud.EndpointOpts{Region: "RegionOne"}) if err != nil { log.Fatal("Error initiating gophercloud cinder client: ", err) } d := CinderDriver{ Conf: &conf, Mutex: &sync.Mutex{}, Client: client, } return d }
func handleConnection(conn net.Conn) { closed := false defer func() { if !closed { conn.Close() } }() var err error = nil if err = handShake(conn); err != nil { log.Warning("socks handshake:", err) return } // log.Debug("socks5 connection handshaked!") rawaddr, addr, err := getRequest(conn) if err != nil { log.Warning("error getting request:", err) return } // log.Debugf("socks5 connection get request: %v", addr) remote, err := createServerConn(rawaddr, addr) if err != nil { log.Debugf("error when create connection to server: %v\n", err) return } defer func() { if !closed { remote.Close() } }() // Sending connection established message immediately to client. // This some round trip time for creating socks connection with the client. // But if connection failed, the client will get connection reset error. _, err = conn.Write([]byte{0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x08, 0x43}) if err != nil { log.WithField("error", err).Debug("send connection confirmation error") return } log.WithField("addr", addr).Infof("Proxy connection to %v", addr) // log.Debugf("piping %s<->%s", conn.RemoteAddr(), remote.RemoteAddr()) go ss.PipeThenClose(conn, remote) ss.PipeThenClose(remote, conn) closed = true // log.Debug("closed connection to", addr) }
func Before(c *cli.Context) error { var ( heartbeat time.Duration ttl time.Duration ) if adver := c.String("advertise"); adver == "" { return ErrRequireAdvertise } else { disc.Advertise = adver } if hbStr := c.String("heartbeat"); hbStr == "" { heartbeat = disc.DefaultHeartbeat } else { if hb, err := time.ParseDuration(hbStr); err != nil { log.Warning(err) heartbeat = disc.DefaultHeartbeat } else { heartbeat = hb } } if ttlStr := c.String("ttl"); ttlStr == "" { ttl = disc.DefaultTTL } else { if t, err := time.ParseDuration(ttlStr); err != nil { log.Warning(err) ttl = disc.DefaultTTL } else { ttl = t } } if disc.Discovery == "" { if pos := c.Args(); len(pos) != 1 { return ErrRequireDiscovery } else { disc.Discovery = pos[0] } } // register monitor instance disc.Register(heartbeat, ttl) log.WithFields(log.Fields{"advertise": disc.Advertise, "discovery": disc.Discovery, "heartbeat": heartbeat, "ttl": ttl}).Info("begin advertise") return nil }
// FindAndValidateIdentity ... // IMPORTANT: you have to C.CFRelease the returned items (one-by-one)!! // you can use the ReleaseIdentityWithRefList method to do that func FindAndValidateIdentity(identityLabel string, isFullLabelMatch bool) ([]IdentityWithRefModel, error) { foundIdentityRefs, err := FindIdentity(identityLabel, isFullLabelMatch) if err != nil { return nil, fmt.Errorf("Failed to find Identity, error: %s", err) } if len(foundIdentityRefs) < 1 { return nil, nil } // check validity validIdentityRefs := []IdentityWithRefModel{} for _, aIdentityRef := range foundIdentityRefs { cert, err := GetCertificateDataFromIdentityRef(aIdentityRef.KeychainRef) if err != nil { return validIdentityRefs, fmt.Errorf("Failed to read certificate data, error: %s", err) } if err := certutil.CheckCertificateValidity(cert); err != nil { log.Warning(colorstring.Yellowf("Certificate is not valid, skipping: %s", err)) continue } validIdentityRefs = append(validIdentityRefs, aIdentityRef) } return validIdentityRefs, nil }
func Init(persist bool, pub push.Pusher) { cli := GetDockerClient() // setup service state change publisher if v := reflect.ValueOf(pub); !v.IsValid() || v.IsNil() { log.Warning("not publishing service status") report = &push.NullPusher{} } else { report = pub } // Advertise host URI Advertise, _, _ = net.SplitHostPort(disc.Advertise) if persist { if r, err := libkv.Load(DEFAULT_SYNC_PATH); err != nil { log.WithFields(log.Fields{"err": err}).Warning("load failed") rec = libkv.NewStore() } else { rec = r } } else { rec = libkv.NewStore() } containers, err := cli.ListContainers(docker.ListContainersOptions{All: true}) if err != nil { log.WithFields(log.Fields{"err": err}).Warning(err) return } for _, c := range containers { NewContainerRecord(c.ID) } }
func LoadSettingsFromFile() { viper.SetConfigType(ConfigType) setConfigLocation() // Set defaults viper.SetDefault("Port", Port) viper.SetDefault("EnableAutomerge", EnableAutomerge) viper.SetDefault("Debug", Debug) viper.SetDefault("DefaultEnvironment", DefaultEnvironment) viper.SetDefault("DefaultTask", DefaultTask) viper.SetDefault("BoltdbName", BoltdbName) // Load configuration err := viper.ReadInConfig() if err != nil { log.Warning("Configuration not found, loading defaults") } // set log level if viper.GetBool("Debug") { log.SetLevel(log.DebugLevel) } else { log.SetLevel(log.InfoLevel) } log.Debugf("Loaded setting values: %v", viper.AllSettings()) log.Info("Settings loaded") }
func (node *ZkNode) SetDataWithRetry(data []byte, currentRetry int, retry int) error { var err error log.Info("Persisting data") if node.stat != nil { node.data, node.stat, err = node.mgr.zkConn.Get(node.ns.GetZKPath()) if err == nil { node.stat, err = node.mgr.zkConn.Set(node.ns.GetZKPath(), data, node.stat.Version) } } else { _, err = node.mgr.zkConn.Create(node.ns.GetZKPath(), data, 0, zk.WorldACL(zk.PermAll)) if err == nil { node.data, node.stat, err = node.mgr.zkConn.Get(node.ns.GetZKPath()) } } if err != nil && currentRetry >= retry { log.Panic("Error persisting data: ", err) return err } if err != nil { log.Warning("Error persisting data, retrying: ", err) node.mgr.CreateConnection() return node.SetDataWithRetry(data, currentRetry+1, retry) } return nil }
func checkSchema() { var count int defer writeConstants() db.DB.Model(&Constant{}).Where("schema_version = ?", schemaVersion.String()).Count(&count) if count == 1 { return } currStr := getCurrConstants().SchemaVersion if currStr == "" { db.DB.Save(&Constant{ schemaVersion.String(), }) //Initial database migration whitelist_id_string() //Write current schema version return } if v, _ := semver.Parse(currStr); v.Major < schemaVersion.Major { logrus.Warning("Incompatible schema change detected (", currStr, ") attempting to migrate to (", schemaVersion.String(), ")") for i := v.Major + 1; i <= schemaVersion.Major; i++ { logrus.Debug("Calling migration routine for ", schemaVersion.String()) f := migrationRoutines[i] f() } } }
func SetupStores() { var key []byte if config.Constants.CookieStoreSecret != "" { var err error key, err = base64.StdEncoding.DecodeString(string(config.Constants.CookieStoreSecret)) if err != nil { logrus.Fatal(err) } } else { logrus.Warning("Using an insecure cookie encryption key") } if SessionStore == nil { sessionStoreMutex.Lock() if len(key) == 0 { SessionStore = pgstore.NewPGStore(database.DBUrl.String(), []byte("secret")) } else { SessionStore = pgstore.NewPGStore(database.DBUrl.String(), key) } SessionStore.Options.HttpOnly = true sessionStoreMutex.Unlock() } }
// SaveGameState saves the current gamestate for a user. Does not delete old gamestates. func (db *GameStateDB) SaveGameState( tx *sqlx.Tx, userRow UserRow, gameState libgame.GameState) error { var binarizedState bytes.Buffer encoder := gob.NewEncoder(&binarizedState) encoder.Encode(gameState) dataStruct := GameStateRow{} dataStruct.UserID = userRow.ID dataStruct.BinarizedState = binarizedState.Bytes() dataMap := make(map[string]interface{}) dataMap["user_id"] = dataStruct.UserID dataMap["binarized_state"] = dataStruct.BinarizedState insertResult, err := db.InsertIntoTable(tx, dataMap) if err != nil { logrus.Warning("error saving game state:", err) return err } rowsAffected, err := insertResult.RowsAffected() if err != nil || rowsAffected != 1 { return errors.New( fmt.Sprintf("expected to change 1 row, changed %d", insertResult.RowsAffected)) } id, err := insertResult.LastInsertId() logrus.Infof("Saved new gamestate (id %d) to db", id) return nil }
func (b *BufferWriter) CloseNotify() <-chan bool { if cn, ok := b.W.(http.CloseNotifier); ok { return cn.CloseNotify() } log.Warning("Upstream ResponseWriter does not implement http.CloseNotifier. Returning dummy channel.") return make(<-chan bool) }
func (mgr *MetadataManager) makeNodeWithDataWithRetry(ns Namespace, data []byte, ephemeral bool, currentRetry int, retry int) (*ZkNode, error) { var flags int32 if ephemeral { flags = zk.FlagEphemeral } else { flags = 0 } var err error exists, _, err := mgr.zkConn.Exists(ns.GetZKPath()) if err == nil { log.Info("Making node") if exists { _, stat, err := mgr.zkConn.Get(ns.GetZKPath()) if err == nil { _, err = mgr.zkConn.Set(ns.GetZKPath(), data, stat.Version) } } else { _, err = mgr.zkConn.Create(ns.GetZKPath(), data, flags, zk.WorldACL(zk.PermAll)) } } if err != nil && currentRetry >= retry { log.Panic("Error persisting data: ", err) return nil, err } if err != nil { log.Warning("Error persisting data, retrying: ", err) mgr.CreateConnection() return mgr.makeNodeWithDataWithRetry(ns, data, ephemeral, currentRetry+1, retry) } return mgr.getNode(ns) }
func (v *Validator) checkEvalLicense(licenses []types.LicenseManagerLicenseInfo) { for _, l := range licenses { if l.EditionKey == "eval" { log.Warning("Evaluation license detected. VIC may not function if evaluation expires or insufficient license is later assigned.") } } }
//SocketInit initializes the websocket connection for the provided socket func SocketInit(so *wsevent.Client) error { chelpers.AuthenticateSocket(so.ID, so.Request) loggedIn := chelpers.IsLoggedInSocket(so.ID) if loggedIn { steamid := chelpers.GetSteamId(so.ID) sessions.AddSocket(steamid, so) } if loggedIn { hooks.AfterConnect(socket.AuthServer, so) player, err := models.GetPlayerBySteamID(chelpers.GetSteamId(so.ID)) if err != nil { logrus.Warning( "User has a cookie with but a matching player record doesn't exist: %s", chelpers.GetSteamId(so.ID)) chelpers.DeauthenticateSocket(so.ID) hooks.AfterConnect(socket.UnauthServer, so) return ErrRecordNotFound } hooks.AfterConnectLoggedIn(so, player) } else { hooks.AfterConnect(socket.UnauthServer, so) so.EmitJSON(helpers.NewRequest("playerSettings", "{}")) so.EmitJSON(helpers.NewRequest("playerProfile", "{}")) } so.EmitJSON(helpers.NewRequest("socketInitialized", "{}")) return nil }
func BeforeApp(c *cli.Context) error { if c.GlobalBool("verbose") { logrus.SetLevel(logrus.DebugLevel) } logrus.Warning("Note: This is an experimental alternate implementation of the Compose CLI (https://github.com/docker/compose)") return nil }
func GetNetLoc(iden string) (netloc []docker.APIPort, grr error) { var retry int for { dkv, err := disc.New(Discovery, "instances") if err != nil { log.Fatal("unable to establish connection to discovery -- %v", err) } resp, err := dkv.Get(iden, false, false) if err != nil { if retry < FailedAttempts { log.Warning(err) time.Sleep(1 * time.Second) retry += 1 continue } else { grr = fmt.Errorf("unable to locate user -- %v", iden) break } } err = json.Unmarshal([]byte(resp.Node.Value), &netloc) if err != nil { grr = err return } break } return }
// Handles the actual incoming commands: func (d *Server) handleCommand(ctx context.Context, cmd *wire.Command, p *protocol.Protocol) { ctx, cancel := context.WithCancel(ctx) defer cancel() // Figure out which handler to call: handler, ok := handlerMap[cmd.CommandType] if !ok { log.Warningf("No handler for ID: %v", cmd.CommandType) return } resp, err := handler(d, ctx, cmd) // Empty response or error response: if resp == nil { resp = &wire.Response{} } resp.ResponseType = cmd.CommandType resp.Success = false if err != nil { resp.Error = err.Error() } else { resp.Success = true } // Send the response back to the client: if err := p.Send(resp); err != nil { log.Warning("Unable to send response back to client: ", err) } }
func (mgr *MetadataManager) DeleteChildrenWithRetry(path string, currentRetry int, retry int) { children, _, _ := mgr.zkConn.Children(path) // Leaf if len(children) == 0 { fmt.Println("Deleting ", path) err := mgr.zkConn.Delete(path, -1) if err != nil && currentRetry >= retry { log.Panic(err) } if err != nil { log.Warning(err) mgr.DeleteChildrenWithRetry(path, currentRetry+1, retry) } else { return } } // Branches for _, name := range children { mgr.DeleteChildrenWithRetry(path+"/"+name, currentRetry, retry) } return }
func (db *GameStateDB) DeleteLatestGameState( tx *sqlx.Tx, userRow UserRow) error { // Get latest gamestate's ID var gameStateRow GameStateRow query := fmt.Sprintf( "SELECT * FROM %s WHERE user_id=$1 ORDER BY id DESC LIMIT 1", db.table) err := db.db.Get(&gameStateRow, query, userRow.ID) if err != nil { return fmt.Errorf("Error getting latest gamestate: %v", err) } logrus.Infof("Deleting latest gamestate (id %d) from db", gameStateRow.ID) // Delete the gamestate queryWhereStatement := fmt.Sprintf("id=%d", gameStateRow.ID) res, err := db.DeleteFromTable(tx, queryWhereStatement) if err != nil { logrus.Warning("Error deleting last game state: ", err) return err } rowsAffected, err := res.RowsAffected() if err != nil || rowsAffected != 1 { return errors.New( fmt.Sprintf("expected to change 1 row, changed %d", res.RowsAffected)) } return nil }
func ReadConfigfileServe(path string, configCh chan BgpConfigSet, reloadCh chan bool) { cnt := 0 for { <-reloadCh b := Bgp{} p := RoutingPolicy{} md, err := toml.DecodeFile(path, &b) if err == nil { err = SetDefaultConfigValues(md, &b) if err == nil { _, err = toml.DecodeFile(path, &p) } } if err != nil { if cnt == 0 { log.Fatal("can't read config file ", path, ", ", err) } else { log.Warning("can't read config file ", path, ", ", err) continue } } if cnt == 0 { log.Info("finished reading the config file") } cnt++ bgpConfig := BgpConfigSet{Bgp: b, Policy: p} configCh <- bgpConfig } }