// Backups returns a list of backup images for a droplet. func DropletBackups(c *cli.Context) { client := NewClient(c, DefaultConfig) id := c.Int(ArgDropletID) opts := LoadOpts(c) f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { list, resp, err := client.Droplets.Backups(id, opt) if err != nil { return nil, nil, err } si := make([]interface{}, len(list)) for i := range list { si[i] = list[i] } return si, resp, err } si, err := PaginateResp(f, opts) if err != nil { logrus.WithField("err", err).Fatal("could not list backups for droplet") } list := make([]godo.Image, len(si)) for i := range si { list[i] = si[i].(godo.Image) } err = displayOutput(c, list) if err != nil { logrus.WithField("err", err).Fatal("could not write output") } }
// NewDriver creates a new MACVLAN Driver func NewDriver(version string, ctx *cli.Context) (*Driver, error) { docker, err := dockerclient.NewDockerClient("unix:///var/run/docker.sock", nil) if err != nil { return nil, fmt.Errorf("could not connect to docker: %s", err) } // lower bound of v4 MTU is 68-bytes per rfc791 if ctx.Int("mtu") <= 0 { cliMTU = defaultMTU } else if ctx.Int("mtu") >= minMTU { cliMTU = ctx.Int("mtu") } else { log.Fatalf("The MTU value passed [ %d ] must be greater than [ %d ] bytes per rfc791", ctx.Int("mtu"), minMTU) } // Set the default mode to bridge if ctx.String("mode") == "" { macvlanMode = bridgeMode } switch ctx.String("mode") { case bridgeMode: macvlanMode = bridgeMode // todo: in other modes if relevant } d := &Driver{ networks: networkTable{}, dockerer: dockerer{ client: docker, }, } return d, nil }
func server(ctx *cli.Context) { port := ctx.Int("port") s := apidemic.NewServer() log.Println("starting server on port :", port) log.Fatal(http.ListenAndServe(fmt.Sprintf(":%d", port), s)) }
// FromCLIContext creates a Config using a cli.Context by pulling configuration // from the flags in the context. func FromCLIContext(c *cli.Context) *Config { cfg := &Config{} cfgVal := reflect.ValueOf(cfg).Elem() for _, def := range defs { if !def.HasField { continue } field := cfgVal.FieldByName(def.FieldName) if _, ok := def.Flag.(*cli.BoolFlag); ok { field.SetBool(c.Bool(def.Name)) } else if _, ok := def.Flag.(*cli.DurationFlag); ok { field.Set(reflect.ValueOf(c.Duration(def.Name))) } else if _, ok := def.Flag.(*cli.IntFlag); ok { field.SetInt(int64(c.Int(def.Name))) } else if _, ok := def.Flag.(*cli.StringFlag); ok { field.SetString(c.String(def.Name)) } } cfg.ProviderConfig = ProviderConfigFromEnviron(cfg.ProviderName) return cfg }
func health(c *cli.Context) { endpoint := c.String("e") requests := c.Int("r") done := make(chan bool) var healthy uint64 = 0 for i := 0; i < requests; i++ { go func() { resp, err := http.Get("http://" + endpoint + "/health") if err != nil { fmt.Printf("ERROR: %v\n", err) } else { fmt.Printf("Response: %v\n", resp.Status) if resp.StatusCode == 200 { atomic.AddUint64(&healthy, 1) runtime.Gosched() } } done <- true }() } for i := 0; i < requests; i++ { <-done } fmt.Printf("Healthy Requests: %v\n", atomic.LoadUint64(&healthy)) }
func createNetwork(ctx *cli.Context) { argCheck(1, ctx) subnet := ctx.String("subnet") gateway := ctx.String("gateway") if subnet == "" || gateway == "" { errExit(ctx, exitHelp, "Invalid Arguments", true) } tenant := ctx.String("tenant") network := ctx.Args()[0] encap := ctx.String("encap") pktTag := ctx.Int("pkt-tag") url := fmt.Sprintf("%s%s:%s/", networkURL(ctx), tenant, network) out := map[string]interface{}{ "tenantName": tenant, "networkName": network, "encap": encap, "pktTag": pktTag, "subnet": subnet, "gateway": gateway, } postMap(ctx, url, out) }
func getMetric(ctx *cli.Context) error { if !ctx.IsSet("metric-namespace") { return newUsageError("namespace is required\n\n", ctx) } ns := ctx.String("metric-namespace") ver := ctx.Int("metric-version") metric := pClient.GetMetric(ns, ver) switch mtype := metric.(type) { case []*client.GetMetricResult: // Multiple metrics var merr error for i, m := range metric.([]*client.GetMetricResult) { err := printMetric(m, i) if err != nil { merr = err } } if merr != nil { return merr } case *client.GetMetricResult: // Single metric err := printMetric(metric.(*client.GetMetricResult), 0) if err != nil { return err } default: return fmt.Errorf("Unexpected response type %T\n", mtype) } return nil }
// Populate updates the specified project context based on command line arguments and subcommands. func Populate(context *project.Context, c *cli.Context) { context.ComposeFiles = c.GlobalStringSlice("file") if len(context.ComposeFiles) == 0 { context.ComposeFiles = []string{"docker-compose.yml"} if _, err := os.Stat("docker-compose.override.yml"); err == nil { context.ComposeFiles = append(context.ComposeFiles, "docker-compose.override.yml") } } context.ProjectName = c.GlobalString("project-name") if c.Command.Name == "logs" { context.Log = true context.FollowLog = c.Bool("follow") } else if c.Command.Name == "up" || c.Command.Name == "create" { context.Log = !c.Bool("d") context.NoRecreate = c.Bool("no-recreate") context.ForceRecreate = c.Bool("force-recreate") context.NoBuild = c.Bool("no-build") } else if c.Command.Name == "stop" || c.Command.Name == "restart" || c.Command.Name == "scale" { context.Timeout = uint(c.Int("timeout")) } else if c.Command.Name == "kill" { context.Signal = c.String("signal") } else if c.Command.Name == "rm" { context.Volume = c.Bool("v") } else if c.Command.Name == "build" { context.NoCache = c.Bool("no-cache") } }
func GetAction(c *cli.Context) { if len(c.Args()) != 2 { log.Fatal("get s3path localpath") } s3path, err := NewS3Path(c.Args().Get(0)) if err != nil { log.Fatal(err) } localpath := c.Args().Get(1) if localpath == "" { log.Fatal("get s3path localpath") } log.Printf("s3path Bucket:%v Prefix:%v", s3path.Bucket, s3path.Prefix) parallel := c.Int("parallel") manager := s3manager.NewDownloader(nil) d := NewDownloader(s3path, localpath, parallel, manager) client := s3.New(nil) params := &s3.ListObjectsInput{Bucket: &s3path.Bucket, Prefix: &s3path.Prefix} err = client.ListObjectsPages(params, d.eachPage) if err != nil { log.Fatal(err) } d.Wait() }
func ListAction(c *cli.Context) { // Figure out input if len(c.Args()) < 1 { cli.ShowCommandHelp(c, c.Command.Name) return } jsonFile := c.Args()[0] data := FromJSON(GetFileContent(jsonFile)) count := c.Int("count") threads := data.Threads if count > len(threads) { count = len(threads) } // Sort by message count sort.Sort(ByMessage(threads)) // Reverse (top = more) for i, j := 0, len(threads)-1; i < j; i, j = i+1, j-1 { threads[i], threads[j] = threads[j], threads[i] } threads = threads[:count] for _, thread := range threads { fmt.Println(strings.Join(DeleteElementFromSlice(thread.Persons, data.WhoAmI), ", ")+" > Messages:", thread.CountMessages(), "Words:", thread.CountWords()) } }
func RunServer(c *cli.Context) *Server { s := new(Server) s.Host = c.String("bind-addr") s.Port = c.Int("port") s.Run() return s }
// ServerFromCli uses the cli arguments to configure a server instance func ServerFromCli(c *cli.Context) *gouncer.Server { CheckSSL(c) // Initialize configuration components from cli core := &gouncer.Core{c.String("hostname"), ":" + c.String("port"), c.Bool("jsonp"), c.String("log")} ssl := &gouncer.Ssl{c.String("certificate"), c.String("key")} backend := &gouncer.Backend{ Couchdb: c.String("couchdb"), Userdb: c.String("userdb"), Groupdb: c.String("groupdb"), Memcache: c.StringSlice("memcache"), Smtp: c.String("smtp"), } token := &gouncer.Token{c.String("algorithm"), int32(c.Int("expiration"))} // Create configuration cfg := &gouncer.Config{ Core: core, Ssl: ssl, Backend: backend, Token: token, } return gouncer.NewServer(cfg) }
// delete containers which are not running func removeContainers(c *cli.Context) { dry := c.Bool("dry") exited := c.Bool("exited") hours := c.Int("hours") if !exited { cli.ShowCommandHelp(c, "rm") fmt.Println("EXAMPLE:") fmt.Println(" command rm --exited") return } if hours <= 0 { hours = 24 } ctx := getUtilContext() if ctx == nil { return } if exited == true { ctx.DeleteExitedContainers(dry, hours) } return }
// log.fatalf will block the process and return errors. so the errors.new can be remove, or fmt.Errorf(). fmz. // when no param pass, it will not equal "", because i set the value(default) first, fmz. func ParseParam(ctx *cli.Context) error { if ctx.String("host-interface") == "" { log.Fatalf("Required flag [ host-interface ] is missing") return errors.New("Required flag [ host-interface ] is missing") } CliIF = ctx.String("host-interface") if ctx.String("ip") == "" || ctx.String("gateway") == "" || ctx.String("container-name") == "" { log.Fatalf("Required flag [ ip or gateway or container-name ] is missing") return errors.New("Required flag [ ip or gateway or container-name ] is missing") } CliIP = ctx.String("ip") CligwIP = ctx.String("gateway") CliCName = ctx.String("container-name") if ctx.Int("mtu") <= 0 { CliMTU = CliMTU } else if ctx.Int("mtu") >= minMTU { CliMTU = ctx.Int("mtu") } else { log.Fatalf("The MTU value passed [ %d ] must be greater than [ %d ] bytes per rfc791", ctx.Int("mtu"), minMTU) return errors.New("the mtu must be int") } return nil }
func cmdRackScale(c *cli.Context) { count := 0 typ := "" if c.IsSet("count") { count = c.Int("count") } if c.IsSet("type") { typ = c.String("type") } system, err := rackClient(c).ScaleSystem(count, typ) if err != nil { stdcli.Error(err) return } fmt.Printf("Name %s\n", system.Name) fmt.Printf("Status %s\n", system.Status) fmt.Printf("Version %s\n", system.Version) fmt.Printf("Count %d\n", system.Count) fmt.Printf("Type %s\n", system.Type) }
func (cmd *Scale) Run(c *cli.Context) { currentApp := cmd.appReq.GetApplication() cmd.ui.Say("Scaling app %s in org %s / space %s as %s...", terminal.EntityNameColor(currentApp.Name), terminal.EntityNameColor(cmd.config.OrganizationFields.Name), terminal.EntityNameColor(cmd.config.SpaceFields.Name), terminal.EntityNameColor(cmd.config.Username()), ) params := cf.NewEmptyAppParams() if c.String("m") != "" { memory, err := formatters.ToMegabytes(c.String("m")) if err != nil { cmd.ui.Say("Invalid value for memory") cmd.ui.FailWithUsage(c, "scale") return } params.Set("memory", memory) } if c.Int("i") != -1 { params.Set("instances", c.Int("i")) } _, apiResponse := cmd.appRepo.Update(currentApp.Guid, params) if apiResponse.IsNotSuccessful() { cmd.ui.Failed(apiResponse.Message) return } cmd.ui.Ok() cmd.ui.Say("") }
func (cmd CreateQuota) Run(context *cli.Context) { name := context.Args()[0] cmd.ui.Say(T("Creating quota {{.QuotaName}} as {{.Username}}...", map[string]interface{}{ "QuotaName": terminal.EntityNameColor(name), "Username": terminal.EntityNameColor(cmd.config.Username()), })) quota := models.QuotaFields{ Name: name, } memoryLimit := context.String("m") if memoryLimit != "" { parsedMemory, err := formatters.ToMegabytes(memoryLimit) if err != nil { cmd.ui.Failed(T("Invalid memory limit: {{.MemoryLimit}}\n{{.Err}}", map[string]interface{}{"MemoryLimit": memoryLimit, "Err": err})) } quota.MemoryLimit = parsedMemory } instanceMemoryLimit := context.String("i") if instanceMemoryLimit == "-1" || instanceMemoryLimit == "" { quota.InstanceMemoryLimit = -1 } else { parsedMemory, errr := formatters.ToMegabytes(instanceMemoryLimit) if errr != nil { cmd.ui.Failed(T("Invalid instance memory limit: {{.MemoryLimit}}\n{{.Err}}", map[string]interface{}{"MemoryLimit": instanceMemoryLimit, "Err": errr})) } quota.InstanceMemoryLimit = parsedMemory } if context.IsSet("r") { quota.RoutesLimit = context.Int("r") } if context.IsSet("s") { quota.ServicesLimit = context.Int("s") } if context.IsSet("allow-paid-service-plans") { quota.NonBasicServicesAllowed = true } err := cmd.quotaRepo.Create(quota) httpErr, ok := err.(errors.HttpError) if ok && httpErr.ErrorCode() == errors.QUOTA_EXISTS { cmd.ui.Ok() cmd.ui.Warn(T("Quota Definition {{.QuotaName}} already exists", map[string]interface{}{"QuotaName": quota.Name})) return } if err != nil { cmd.ui.Failed(err.Error()) } cmd.ui.Ok() }
func renew(c *cli.Context) { conf, _, client := setup(c) if len(c.GlobalStringSlice("domains")) <= 0 { logger().Fatal("Please specify at least one domain.") } domain := c.GlobalStringSlice("domains")[0] // load the cert resource from files. // We store the certificate, private key and metadata in different files // as web servers would not be able to work with a combined file. certPath := path.Join(conf.CertPath(), domain+".crt") privPath := path.Join(conf.CertPath(), domain+".key") metaPath := path.Join(conf.CertPath(), domain+".json") certBytes, err := ioutil.ReadFile(certPath) if err != nil { logger().Fatalf("Error while loading the certificate for domain %s\n\t%s", domain, err.Error()) } if c.IsSet("days") { expTime, err := acme.GetPEMCertExpiration(certBytes) if err != nil { logger().Printf("Could not get Certification expiration for domain %s", domain) } if int(expTime.Sub(time.Now()).Hours()/24.0) > c.Int("days") { return } } metaBytes, err := ioutil.ReadFile(metaPath) if err != nil { logger().Fatalf("Error while loading the meta data for domain %s\n\t%s", domain, err.Error()) } var certRes acme.CertificateResource err = json.Unmarshal(metaBytes, &certRes) if err != nil { logger().Fatalf("Error while marshalling the meta data for domain %s\n\t%s", domain, err.Error()) } if c.Bool("reuse-key") { keyBytes, err := ioutil.ReadFile(privPath) if err != nil { logger().Fatalf("Error while loading the private key for domain %s\n\t%s", domain, err.Error()) } certRes.PrivateKey = keyBytes } certRes.Certificate = certBytes newCert, err := client.RenewCertificate(certRes, true) if err != nil { logger().Fatalf("%s", err.Error()) } saveCertRes(newCert, conf) }
func addRule(ctx *cli.Context) { argCheck(2, ctx) args := map[string]interface{}{ "tenantName": ctx.String("tenant"), "policyName": ctx.Args()[0], "ruleId": ctx.Args()[1], "priority": ctx.Int("priority"), "direction": ctx.String("direction"), "endpointGroup": ctx.String("epg"), "network": ctx.String("network"), "ipAddress": ctx.String("ip-address"), "protocol": ctx.String("protocol"), "port": ctx.Int("port"), "action": ctx.String("action"), } url := fmt.Sprintf( "%s%s:%s:%s/", ruleURL(ctx), args["tenantName"].(string), args["policyName"].(string), args["ruleId"].(string), ) postMap(ctx, url, args) }
func dnsCreate(c *cli.Context) { if err := checkEnv(); err != nil { fmt.Println(err) return } if err := checkFlags(c, "zone", "name", "type", "content"); err != nil { return } zone := c.String("zone") name := c.String("name") rtype := c.String("type") content := c.String("content") ttl := c.Int("ttl") proxy := c.Bool("proxy") zoneID, err := api.ZoneIDByName(zone) if err != nil { fmt.Println(err) return } record := cloudflare.DNSRecord{ Name: name, Type: strings.ToUpper(rtype), Content: content, TTL: ttl, Proxied: proxy, } // TODO: Print the result. _, err = api.CreateDNSRecord(zoneID, record) if err != nil { fmt.Println("Error creating DNS record:", err) } }
func ProjectPort(p *project.Project, c *cli.Context) { if len(c.Args()) != 2 { logrus.Fatalf("Please pass arguments in the form: SERVICE PORT") } index := c.Int("index") protocol := c.String("protocol") service, err := p.CreateService(c.Args()[0]) if err != nil { logrus.Fatal(err) } containers, err := service.Containers() if err != nil { logrus.Fatal(err) } if index < 1 || index > len(containers) { logrus.Fatalf("Invalid index %d", index) } output, err := containers[index-1].Port(fmt.Sprintf("%s/%s", c.Args()[1], protocol)) if err != nil { logrus.Fatal(err) } fmt.Println(output) }
func dnsUpdate(c *cli.Context) { if err := checkEnv(); err != nil { fmt.Println(err) return } if err := checkFlags(c, "zone", "id"); err != nil { return } zone := c.String("zone") recordID := c.String("id") content := c.String("content") ttl := c.Int("ttl") proxy := c.Bool("proxy") zoneID, err := api.ZoneIDByName(zone) if err != nil { fmt.Println(err) return } record := cloudflare.DNSRecord{ ID: recordID, Content: content, TTL: ttl, Proxied: proxy, } err = api.UpdateDNSRecord(zoneID, recordID, record) if err != nil { fmt.Println("Error updating DNS record:", err) } }
func kill(c *cli.Context) { endpoint := c.String("e") requests := c.Int("r") done := make(chan bool) for i := 0; i < requests; i++ { fmt.Printf("Killing instance %v...\n", i) // Use goroutines to kill instances in parallel! go func() { resp, err := http.Get("http://" + endpoint + "/killSwitch") if err != nil { fmt.Printf("ERROR: %v\n", err) } else { fmt.Printf("Response: %v\n", resp.Status) } done <- true }() } // Allow all goroutines to finish executing. for i := 0; i < requests; i++ { <-done } }
func unloadPlugin(ctx *cli.Context) { pType := ctx.String("plugin-type") pName := ctx.String("plugin-name") pVer := ctx.Int("plugin-version") if pName == "" { fmt.Println("Must provide plugin name") cli.ShowCommandHelp(ctx, ctx.Command.Name) os.Exit(1) } if pVer < 1 { fmt.Println("Must provide plugin version") cli.ShowCommandHelp(ctx, ctx.Command.Name) os.Exit(1) } r := pClient.UnloadPlugin(pType, pName, pVer) if r.Err != nil { fmt.Printf("Error unloading plugin:\n%v\n", r.Err.Error()) os.Exit(1) } fmt.Println("Plugin unloaded") fmt.Printf("Name: %s\n", r.Name) fmt.Printf("Version: %d\n", r.Version) fmt.Printf("Type: %s\n", r.Type) }
// createLocalConfig creates a config object that supports local mode. func createLocalConfig(context *cli.Context) *lib.Config { return &lib.Config{ NativeJobQueueSize: uint(context.Int("native-job-queue-size")), NativePrinterPollInterval: context.String("native-printer-poll-interval"), PrefixJobIDToJobTitle: context.Bool("prefix-job-id-to-job-title"), DisplayNamePrefix: context.String("display-name-prefix"), SNMPEnable: context.Bool("snmp-enable"), SNMPCommunity: context.String("snmp-community"), SNMPMaxConnections: uint(context.Int("snmp-max-connections")), PrinterBlacklist: lib.DefaultConfig.PrinterBlacklist, LocalPrintingEnable: true, CloudPrintingEnable: false, LogLevel: context.String("log-level"), LogFileName: context.String("log-file-name"), LogFileMaxMegabytes: uint(context.Int("log-file-max-megabytes")), LogMaxFiles: uint(context.Int("log-max-files")), LogToJournal: context.Bool("log-to-journal"), MonitorSocketFilename: context.String("monitor-socket-filename"), CUPSMaxConnections: uint(context.Int("cups-max-connections")), CUPSConnectTimeout: context.String("cups-connect-timeout"), CUPSPrinterAttributes: lib.DefaultConfig.CUPSPrinterAttributes, CUPSJobFullUsername: context.Bool("cups-job-full-username"), CUPSIgnoreRawPrinters: context.Bool("cups-ignore-raw-printers"), CUPSIgnoreClassPrinters: context.Bool("cups-ignore-class-printers"), CUPSCopyPrinterInfoToDisplayName: context.Bool("cups-copy-printer-info-to-display-name"), } }
func doVolumeCreate(c *cli.Context) error { var err error name := c.Args().First() size, err := getSize(c, err) driverName, err := util.GetFlag(c, "driver", false, err) backupURL, err := util.GetFlag(c, "backup", false, err) if err != nil { return err } driverVolumeID := c.String("id") volumeType := c.String("type") iops := c.Int("iops") prepareForVM := c.Bool("vm") request := &api.VolumeCreateRequest{ Name: name, DriverName: driverName, Size: size, BackupURL: backupURL, DriverVolumeID: driverVolumeID, Type: volumeType, IOPS: int64(iops), PrepareForVM: prepareForVM, Verbose: c.GlobalBool(verboseFlag), } url := "/volumes/create" return sendRequestAndPrint("POST", url, request) }
func doLbCreate(c *cli.Context) { if len(c.Args()) == 0 { displayWrongNumOfArgsAndExit(c) } lbname := c.Args().Get(0) path := "/load-balancer" if c.Int("subscription-id") > 0 { path += "/" + strconv.Itoa(c.Int("subscription-id")) } path += "/create/" + lbname resp, err := client.SendRequest("POST", path, nil) assert(err) if resp.StatusCode >= 400 { displayErrorAndExit(string(resp.Body)) } pwd := lib.PasswordResponse{} assert(xml.Unmarshal(resp.Body, &pwd)) outputResult(c, pwd, func(format string) { lib.PrintXMLStruct(pwd) }) }
// Run initializes the driver func Run(ctx *cli.Context) { gitRepoURL := fmt.Sprint(pluginPath, ctx.String("repo")) if gitRepoURL == "" { log.Fatalf("Exiting: A git URL is required to be used as a shared datastore for the endpoints") } log.Debugf("The plugin repo is [ %s ]", gitRepoURL) gitPollInterval := ctx.Int("poll-interval") if gitPollInterval == 0 { gitPollInterval = control.DefaultInterval } log.Debugf("The git polling interval is [ %d ]", gitPollInterval) var d ipvlan.Driver var err error if d, err = ipvlan.New(version, ctx); err != nil { log.Fatalf("unable to create driver: %s", err) } // concatenate the absolute path to the spec file handle absSocket := fmt.Sprint(pluginPath, ctx.String("socket")) log.Info("IPVlan network driver initialized successfully") log.Debugf("The plugin absolute path and handle is [ %s ]", absSocket) if err := d.Listen(absSocket); err != nil { log.Fatal(err) } }
func newCassandraDriver(ctx *cli.Context) (drivers.Driver, error) { urls := strings.Split(ctx.String("cassandra-hosts"), ",") cluster := gocql.NewCluster() cluster.Hosts = urls cluster.Port = ctx.Int("cassandra-port") cluster.Keyspace = ctx.String("cassandra-keyspace") cluster.Consistency = gocql.Quorum session, err := cluster.CreateSession() if err != nil { log.Errorf("cassandra.go: Error while creating cassandra cluster: hosts:%v err:%v", cluster.Hosts, err) return nil, err } log.Infoln("cassandra.go: connected to cassandra cluster") d := &CassandraDriver{ Session: session, Cluster: cluster, } return d, nil }
func cmdAdd(c *cli.Context) { utils.FlagsRequired(c, []string{"cidr", "minPort", "maxPort", "ipProtocol"}) // API accepts only 1 rule newRule := &Rule{ c.String("ipProtocol"), c.String("cidr"), c.Int("minPort"), c.Int("maxPort"), } policy := get() exists := check(policy, *newRule) if exists == false { policy.Rules = append(policy.Rules, *newRule) webservice, err := webservice.NewWebService() utils.CheckError(err) nRule := make(map[string]Rule) nRule["rule"] = *newRule json, err := json.Marshal(nRule) utils.CheckError(err) err, res, code := webservice.Post(fmt.Sprintf("%s/rules", endpoint), json) if res == nil { log.Fatal(err) } utils.CheckError(err) utils.CheckReturnCode(code, res) } }