func parseLexRangeItem(buf []byte) ([]byte, bool, error) { if len(buf) == 0 { return nil, false, errors.Errorf("empty lex range item") } ex := false var dest []byte switch buf[0] { case '+': if len(buf) > 1 { return nil, false, errors.Errorf("invalid lex range item, only + allowed, but %s", buf) } dest = maxString case '-': if len(buf) > 1 { return nil, false, errors.Errorf("invalid lex range item, only - allowed, but %s", buf) } dest = minString case '(', '[': dest = buf[1:] if len(dest) == 0 { return nil, false, errors.Errorf("invalid empty lex range item %s", buf) } ex = buf[0] == '(' default: return nil, false, errors.Errorf("invalid lex range item at first byte, %s", buf) } return dest, ex, nil }
func (h *EventHeader) Decode(data []byte) error { if len(data) < EventHeaderSize { return errors.Errorf("header size too short %d, must 19", len(data)) } pos := 0 h.Timestamp = binary.LittleEndian.Uint32(data[pos:]) pos += 4 h.EventType = EventType(data[pos]) pos++ h.ServerID = binary.LittleEndian.Uint32(data[pos:]) pos += 4 h.EventSize = binary.LittleEndian.Uint32(data[pos:]) pos += 4 h.LogPos = binary.LittleEndian.Uint32(data[pos:]) pos += 4 h.Flags = binary.LittleEndian.Uint16(data[pos:]) pos += 2 if h.EventSize < uint32(EventHeaderSize) { return errors.Errorf("invalid event size %d, must >= 19", h.EventSize) } return nil }
// ValidateVolumeParams is specified on the storage.VolumeSource interface. func (v *ebsVolumeSource) ValidateVolumeParams(params storage.VolumeParams) error { vol, err := parseVolumeOptions(params.Size, params.Attributes) if err != nil { return err } var minVolumeSize, maxVolumeSize int switch vol.VolumeType { case volumeTypeStandard: minVolumeSize = minMagneticVolumeSizeGiB maxVolumeSize = maxMagneticVolumeSizeGiB case volumeTypeGp2: minVolumeSize = minSsdVolumeSizeGiB maxVolumeSize = maxSsdVolumeSizeGiB case volumeTypeIo1: minVolumeSize = minProvisionedIopsVolumeSizeGiB maxVolumeSize = maxProvisionedIopsVolumeSizeGiB } if vol.VolumeSize < minVolumeSize { return errors.Errorf( "volume size is %d GiB, must be at least %d GiB", vol.VolumeSize, minVolumeSize, ) } if vol.VolumeSize > maxVolumeSize { return errors.Errorf( "volume size %d GiB exceeds the maximum of %d GiB", vol.VolumeSize, maxVolumeSize, ) } return nil }
// Compose update stmt assignment list for column scope privilege update. func composeColumnPrivUpdate(ctx context.Context, priv mysql.PrivilegeType, name string, host string, db string, tbl string, col string) (string, error) { newColumnPriv := "" if priv == mysql.AllPriv { for _, p := range mysql.AllColumnPrivs { v, ok := mysql.Priv2SetStr[p] if !ok { return "", errors.Errorf("Unknown column privilege %v", p) } if len(newColumnPriv) == 0 { newColumnPriv = v } else { newColumnPriv = fmt.Sprintf("%s,%s", newColumnPriv, v) } } } else { currColumnPriv, err := getColumnPriv(ctx, name, host, db, tbl, col) if err != nil { return "", errors.Trace(err) } p, ok := mysql.Priv2SetStr[priv] if !ok { return "", errors.Errorf("Unknown priv: %v", priv) } if len(currColumnPriv) == 0 { newColumnPriv = p } else { newColumnPriv = fmt.Sprintf("%s,%s", currColumnPriv, p) } } return fmt.Sprintf(`Column_priv="%s"`, newColumnPriv), nil }
// CheckCIDRs parses the list of strings as CIDRs, checking for // correct formatting, no duplication and no overlaps. Returns error // if no CIDRs are provided, unless cidrsOptional is true. func CheckCIDRs(args []string, cidrsOptional bool) (set.Strings, error) { // Validate any given CIDRs. CIDRs := set.NewStrings() for _, arg := range args { _, ipNet, err := net.ParseCIDR(arg) if err != nil { logger.Debugf("cannot parse %q: %v", arg, err) return CIDRs, errors.Errorf("%q is not a valid CIDR", arg) } cidr := ipNet.String() if CIDRs.Contains(cidr) { if cidr == arg { return CIDRs, errors.Errorf("duplicate subnet %q specified", cidr) } return CIDRs, errors.Errorf("subnet %q overlaps with %q", arg, cidr) } CIDRs.Add(cidr) } if CIDRs.IsEmpty() && !cidrsOptional { return CIDRs, errors.New("CIDRs required but not provided") } return CIDRs, nil }
// decorateAndWriteInfo decorates the info struct with information // from the given cfg, and the writes that out to the filesystem. func decorateAndWriteInfo(info configstore.EnvironInfo, cfg *config.Config) error { // Sanity check our config. var endpoint configstore.APIEndpoint if cert, ok := cfg.CACert(); !ok { return errors.Errorf("CACert is not set") } else if uuid, ok := cfg.UUID(); !ok { return errors.Errorf("UUID is not set") } else if adminSecret := cfg.AdminSecret(); adminSecret == "" { return errors.Errorf("admin-secret is not set") } else { endpoint = configstore.APIEndpoint{ CACert: cert, EnvironUUID: uuid, } } creds := configstore.APICredentials{ User: "******", // TODO(waigani) admin@local once we have that set Password: cfg.AdminSecret(), } info.SetAPICredentials(creds) info.SetAPIEndpoint(endpoint) info.SetBootstrapConfig(cfg.AllAttrs()) if err := info.Write(); err != nil { return errors.Annotatef(err, "cannot create environment info %q", cfg.Name()) } return nil }
func ensureUpgradeInfoUpdated(st *State, machineId string, previousVersion, targetVersion version.Number) (*UpgradeInfo, error) { var doc upgradeInfoDoc if pdoc, err := currentUpgradeInfoDoc(st); err != nil { return nil, errors.Trace(err) } else { doc = *pdoc } if doc.PreviousVersion != previousVersion { return nil, errors.Errorf( "current upgrade info mismatch: expected previous version %s, got %s", previousVersion, doc.PreviousVersion) } if doc.TargetVersion != targetVersion { return nil, errors.Errorf( "current upgrade info mismatch: expected target version %s, got %s", targetVersion, doc.TargetVersion) } controllersReady := set.NewStrings(doc.ControllersReady...) if !controllersReady.Contains(machineId) { return nil, errors.Trace(errUpgradeInfoNotUpdated) } return &UpgradeInfo{st: st, doc: doc}, nil }
// requestStart invokes a runWorker goroutine for the manifold with the supplied // name. It must only be called from the loop goroutine. func (engine *engine) requestStart(name string, delay time.Duration) { // Check preconditions. manifold, found := engine.manifolds[name] if !found { engine.tomb.Kill(errors.Errorf("fatal: unknown manifold %q", name)) } // Copy current info and check more preconditions. info := engine.current[name] if !info.stopped() { engine.tomb.Kill(errors.Errorf("fatal: trying to start a second %q manifold worker", name)) } // Final check that we're not shutting down yet... if engine.isDying() { logger.Tracef("not starting %q manifold worker (shutting down)", name) return } // ...then update the info, copy it back to the engine, and start a worker // goroutine based on current known state. info.starting = true engine.current[name] = info resourceGetter := engine.resourceGetter(name, manifold.Inputs) go engine.runWorker(name, delay, manifold.Start, resourceGetter) }
// fetchResult queries the given API for the given Action ID prefix, and // makes sure the results are acceptable, returning an error if they are not. func fetchResult(api APIClient, requestedId string) (params.ActionResult, error) { none := params.ActionResult{} actionTag, err := getActionTagByPrefix(api, requestedId) if err != nil { return none, err } actions, err := api.Actions(params.Entities{ Entities: []params.Entity{{actionTag.String()}}, }) if err != nil { return none, err } actionResults := actions.Results numActionResults := len(actionResults) if numActionResults == 0 { return none, errors.Errorf("no results for action %s", requestedId) } if numActionResults != 1 { return none, errors.Errorf("too many results for action %s", requestedId) } result := actionResults[0] if result.Error != nil { return none, result.Error } return result, nil }
func (r *JoinRset) checkTableDuplicate(t *TableSource, tr *TableRset) error { if len(t.Name) > 0 { // use alias name _, ok := r.tableNames[t.Name] if ok { return errors.Errorf("%s: duplicate name %s", r, t.Name) } r.tableNames[t.Name] = struct{}{} return nil } // first check ident name identName := t.String() _, ok := r.tableNames[identName] if ok { return errors.Errorf("%s: duplicate name %s", r, identName) } r.tableNames[identName] = struct{}{} qualifiedName := tr.Schema + "." + tr.Name // we should check qualifed name too, e,g select * form t1 join test.t1 if identName != qualifiedName { _, ok = r.tableNames[qualifiedName] if ok { return errors.Errorf("%s: duplicate name %s", r, identName) } r.tableNames[qualifiedName] = struct{}{} } return nil }
func (o *setRow) getMembers(r storeReader, count int64) ([][]byte, error) { it := r.getIterator() defer r.putIterator(it) var members [][]byte for pfx := it.SeekTo(o.DataKeyPrefix()); count > 0 && it.Valid(); it.Next() { key := it.Key() if !bytes.HasPrefix(key, pfx) { break } sfx := key[len(pfx):] if err := o.ParseDataKeySuffix(sfx); err != nil { return nil, err } if err := o.ParseDataValue(it.Value()); err != nil { return nil, err } if len(o.Member) == 0 { return nil, errors.Errorf("len(member) = %d", len(o.Member)) } members = append(members, o.Member) count-- } if err := it.Error(); err != nil { return nil, err } if len(members) == 0 { return nil, errors.Errorf("len(members) = %d, set.size = %d", len(members), o.Size) } return members, nil }
func buildIndexInfo(tblInfo *model.TableInfo, unique bool, indexName model.CIStr, indexID int64, idxColNames []*coldef.IndexColName) (*model.IndexInfo, error) { for _, col := range tblInfo.Columns { if col.Name.L == indexName.L { return nil, errors.Errorf("CREATE INDEX: index name collision with existing column: %s", indexName) } } // build offsets idxColumns := make([]*model.IndexColumn, 0, len(idxColNames)) for _, ic := range idxColNames { col := findCol(tblInfo.Columns, ic.ColumnName) if col == nil { return nil, errors.Errorf("CREATE INDEX: column does not exist: %s", ic.ColumnName) } idxColumns = append(idxColumns, &model.IndexColumn{ Name: col.Name, Offset: col.Offset, Length: ic.Length, }) } // create index info idxInfo := &model.IndexInfo{ ID: indexID, Name: indexName, Columns: idxColumns, Unique: unique, State: model.StateNone, } return idxInfo, nil }
// We don't support multi source replication, so the mariadb gtid set may have only domain-server-sequence func ParseMariadbGTIDSet(str string) (GTIDSet, error) { if len(str) == 0 { return MariadbGTID{0, 0, 0}, nil } seps := strings.Split(str, "-") var gtid MariadbGTID if len(seps) != 3 { return gtid, errors.Errorf("invalid Mariadb GTID %v, must domain-server-sequence", str) } domainID, err := strconv.ParseUint(seps[0], 10, 32) if err != nil { return gtid, errors.Errorf("invalid MariaDB GTID Domain ID (%v): %v", seps[0], err) } serverID, err := strconv.ParseUint(seps[1], 10, 32) if err != nil { return gtid, errors.Errorf("invalid MariaDB GTID Server ID (%v): %v", seps[1], err) } sequenceID, err := strconv.ParseUint(seps[2], 10, 64) if err != nil { return gtid, errors.Errorf("invalid MariaDB GTID Sequence number (%v): %v", seps[2], err) } return MariadbGTID{ DomainID: uint32(domainID), ServerID: uint32(serverID), SequenceNumber: sequenceID}, nil }
func (b *planBuilder) buildUpdateLists(list []*ast.Assignment, p LogicalPlan) ([]*expression.Assignment, LogicalPlan) { schema := p.GetSchema() newList := make([]*expression.Assignment, len(schema)) for _, assign := range list { col, err := schema.FindColumn(assign.Column) if err != nil { b.err = errors.Trace(err) return nil, nil } if col == nil { b.err = errors.Trace(errors.Errorf("column %s not found", assign.Column.Name.O)) return nil, nil } offset := schema.GetIndex(col) if offset == -1 { b.err = errors.Trace(errors.Errorf("could not find column %s.%s", col.TblName, col.ColName)) } newExpr, np, _, err := b.rewrite(assign.Expr, p, nil, false) if err != nil { b.err = errors.Trace(err) return nil, nil } p = np newList[offset] = &expression.Assignment{Col: col, Expr: newExpr} } return newList, p }
// GetColDefaultValue gets default value of the column. func GetColDefaultValue(ctx context.Context, col *model.ColumnInfo) (interface{}, bool, error) { // Check no default value flag. if mysql.HasNoDefaultValueFlag(col.Flag) && col.Tp != mysql.TypeEnum { return nil, false, errors.Errorf("Field '%s' doesn't have a default value", col.Name) } // Check and get timestamp/datetime default value. if col.Tp == mysql.TypeTimestamp || col.Tp == mysql.TypeDatetime { if col.DefaultValue == nil { return nil, true, nil } value, err := expression.GetTimeValue(ctx, col.DefaultValue, col.Tp, col.Decimal) if err != nil { return nil, true, errors.Errorf("Field '%s' get default value fail - %s", col.Name, errors.Trace(err)) } return value, true, nil } else if col.Tp == mysql.TypeEnum { // For enum type, if no default value and not null is set, // the default value is the first element of the enum list if col.DefaultValue == nil && mysql.HasNotNullFlag(col.Flag) { return col.FieldType.Elems[0], true, nil } } return col.DefaultValue, true, nil }
func (c *addCloudCommand) Run(ctxt *cmd.Context) error { specifiedClouds, err := cloud.ParseCloudMetadataFile(c.CloudFile) if err != nil { return err } if specifiedClouds == nil { return errors.New("no personal clouds are defined") } newCloud, ok := specifiedClouds[c.Cloud] if !ok { return errors.Errorf("cloud %q not found in file %q", c.Cloud, c.CloudFile) } personalClouds, err := cloud.PersonalCloudMetadata() if err != nil { return err } if _, ok = personalClouds[c.Cloud]; ok && !c.Replace { return errors.Errorf("cloud called %q already exists; use --replace to replace this existing cloud", c.Cloud) } if personalClouds == nil { personalClouds = make(map[string]cloud.Cloud) } personalClouds[c.Cloud] = newCloud return cloud.WritePersonalCloudMetadata(personalClouds) }
// DecodeRecordKey decodes the key and gets the tableID, handle and columnID. func DecodeRecordKey(key kv.Key) (tableID int64, handle int64, columnID int64, err error) { k := key if !key.HasPrefix(TablePrefix) { return 0, 0, 0, errors.Errorf("invalid record key - %q", k) } key = key[len(TablePrefix):] key, tableID, err = codec.DecodeInt(key) if err != nil { return 0, 0, 0, errors.Trace(err) } if !key.HasPrefix(recordPrefixSep) { return 0, 0, 0, errors.Errorf("invalid record key - %q", k) } key = key[len(recordPrefixSep):] key, handle, err = codec.DecodeInt(key) if err != nil { return 0, 0, 0, errors.Trace(err) } if len(key) == 0 { return } key, columnID, err = codec.DecodeInt(key) if err != nil { return 0, 0, 0, errors.Trace(err) } return }
func (o *hashRow) getAllFields(r storeReader) ([][]byte, error) { it := r.getIterator() defer r.putIterator(it) var fields [][]byte for pfx := it.SeekTo(o.DataKeyPrefix()); it.Valid(); it.Next() { key := it.Key() if !bytes.HasPrefix(key, pfx) { break } sfx := key[len(pfx):] if err := o.ParseDataKeySuffix(sfx); err != nil { return nil, errors.Trace(err) } if len(o.Field) == 0 { return nil, errors.Errorf("len(field) = %d", len(o.Field)) } fields = append(fields, o.Field) } if err := it.Error(); err != nil { return nil, errors.Trace(err) } if len(fields) == 0 || int64(len(fields)) != o.Size { return nil, errors.Errorf("len(fields) = %d, hash.size = %d", len(fields), o.Size) } return fields, nil }
func checkUnits(app PrecheckApplication, modelVersion version.Number) error { units, err := app.AllUnits() if err != nil { return errors.Annotatef(err, "retrieving units for %s", app.Name()) } if len(units) < app.MinUnits() { return errors.Errorf("application %s is below its minimum units threshold", app.Name()) } appCharmURL, _ := app.CharmURL() for _, unit := range units { if unit.Life() != state.Alive { return errors.Errorf("unit %s is %s", unit.Name(), unit.Life()) } if err := checkUnitAgentStatus(unit); err != nil { return errors.Trace(err) } if err := checkAgentTools(modelVersion, unit, "unit "+unit.Name()); err != nil { return errors.Trace(err) } unitCharmURL, _ := unit.CharmURL() if appCharmURL.String() != unitCharmURL.String() { return errors.Errorf("unit %s is upgrading", unit.Name()) } } return nil }
func (o *hashRow) getAllValues(r storeReader) ([][]byte, error) { it := r.getIterator() defer r.putIterator(it) var values [][]byte for pfx := it.SeekTo(o.DataKeyPrefix()); it.Valid(); it.Next() { key := it.Key() if !bytes.HasPrefix(key, pfx) { break } if err := o.ParseDataValue(it.Value()); err != nil { return nil, errors.Trace(err) } if len(o.Value) == 0 { return nil, errors.Errorf("len(value) = %d", len(o.Value)) } values = append(values, o.Value) } if err := it.Error(); err != nil { return nil, errors.Trace(err) } if len(values) == 0 || int64(len(values)) != o.Size { return nil, errors.Errorf("len(values) = %d, hash.size = %d", len(values), o.Size) } return values, nil }
//todo: overflow func Btoi(b []byte) (int, error) { n := 0 sign := 1 for i := uint8(0); i < uint8(len(b)); i++ { if i == 0 && b[i] == '-' { if len(b) == 1 { return 0, errors.Errorf("Invalid number %s", string(b)) } sign = -1 continue } if b[i] >= '0' && b[i] <= '9' { if i > 0 { n *= 10 } n += int(b[i]) - '0' continue } return 0, errors.Errorf("Invalid number %s", string(b)) } return sign * n, nil }
func (c *envStateCollection) mungeInsert(inDoc interface{}) (bson.D, error) { outDoc, err := toBsonD(inDoc) if err != nil { return nil, errors.Trace(err) } uuidSeen := false for i, item := range outDoc { switch item.Name { case "_id": docId, ok := item.Value.(string) if ok { // tolerate non-string ids outDoc[i].Value = ensureEnvUUID(c.envUUID, docId) } case "env-uuid": docEnvUUID, ok := outDoc[i].Value.(string) if !ok { return nil, errors.Errorf("env-uuid is not a string: %v", outDoc[i].Value) } if docEnvUUID == "" { outDoc[i].Value = c.envUUID } else if docEnvUUID != c.envUUID { return nil, errors.Errorf("insert env-uuid is not correct: %q != %q", docEnvUUID, c.envUUID) } uuidSeen = true } } if !uuidSeen { outDoc = append(outDoc, bson.DocElem{"env-uuid", c.envUUID}) } return outDoc, nil }
func (c *createModelCommand) Init(args []string) error { if len(args) == 0 { return errors.New("model name is required") } c.Name, args = args[0], args[1:] if c.Owner != "" && !names.IsValidUser(c.Owner) { return errors.Errorf("%q is not a valid user", c.Owner) } if c.CredentialSpec != "" { parts := strings.Split(c.CredentialSpec, ":") if len(parts) < 2 { return errors.Errorf("invalid cloud credential %s, expected <cloud>:<credential-name>", c.CredentialSpec) } c.CloudName = parts[0] if cloud, err := common.CloudOrProvider(c.CloudName, cloud.CloudByName); err != nil { return errors.Trace(err) } else { c.CloudType = cloud.Type } c.CredentialName = parts[1] } return nil }
func constructStartInstanceParams( machine *apiprovisioner.Machine, instanceConfig *instancecfg.InstanceConfig, provisioningInfo *params.ProvisioningInfo, possibleTools coretools.List, ) (environs.StartInstanceParams, error) { volumes := make([]storage.VolumeParams, len(provisioningInfo.Volumes)) for i, v := range provisioningInfo.Volumes { volumeTag, err := names.ParseVolumeTag(v.VolumeTag) if err != nil { return environs.StartInstanceParams{}, errors.Trace(err) } if v.Attachment == nil { return environs.StartInstanceParams{}, errors.Errorf("volume params missing attachment") } machineTag, err := names.ParseMachineTag(v.Attachment.MachineTag) if err != nil { return environs.StartInstanceParams{}, errors.Trace(err) } if machineTag != machine.Tag() { return environs.StartInstanceParams{}, errors.Errorf("volume attachment params has invalid machine tag") } if v.Attachment.InstanceId != "" { return environs.StartInstanceParams{}, errors.Errorf("volume attachment params specifies instance ID") } volumes[i] = storage.VolumeParams{ volumeTag, v.Size, storage.ProviderType(v.Provider), v.Attributes, v.Tags, &storage.VolumeAttachmentParams{ AttachmentParams: storage.AttachmentParams{ Machine: machineTag, ReadOnly: v.Attachment.ReadOnly, }, Volume: volumeTag, }, } } var subnetsToZones map[network.Id][]string if provisioningInfo.SubnetsToZones != nil { // Convert subnet provider ids from string to network.Id. subnetsToZones = make(map[network.Id][]string, len(provisioningInfo.SubnetsToZones)) for providerId, zones := range provisioningInfo.SubnetsToZones { subnetsToZones[network.Id(providerId)] = zones } } return environs.StartInstanceParams{ Constraints: provisioningInfo.Constraints, Tools: possibleTools, InstanceConfig: instanceConfig, Placement: provisioningInfo.Placement, DistributionGroup: machine.DistributionGroup, Volumes: volumes, SubnetsToZones: subnetsToZones, }, nil }
// settingsYamlFromGetYaml will parse a yaml produced by juju get and generate // charm.Settings from it that can then be sent to the service. func settingsFromGetYaml(yamlContents map[string]interface{}) (charm.Settings, error) { onlySettings := charm.Settings{} settingsMap, ok := yamlContents["settings"].(map[interface{}]interface{}) if !ok { return nil, errors.New("unknown format for settings") } for setting := range settingsMap { s, ok := settingsMap[setting].(map[interface{}]interface{}) if !ok { return nil, errors.Errorf("unknown format for settings section %v", setting) } // some keys might not have a value, we don't care about those. v, ok := s["value"] if !ok { continue } stringSetting, ok := setting.(string) if !ok { return nil, errors.Errorf("unexpected setting key, expected string got %T", setting) } onlySettings[stringSetting] = v } return onlySettings, nil }
// AddPendingResources sends the provided resource info up to Juju // without making it available yet. func (c Client) AddPendingResources(args AddPendingResourcesArgs) (pendingIDs []string, err error) { apiArgs, err := api.NewAddPendingResourcesArgs(args.ServiceID, args.CharmID, args.CharmStoreMacaroon, args.Resources) if err != nil { return nil, errors.Trace(err) } var result api.AddPendingResourcesResult if err := c.FacadeCall("AddPendingResources", &apiArgs, &result); err != nil { return nil, errors.Trace(err) } if result.Error != nil { err := common.RestoreError(result.Error) return nil, errors.Trace(err) } if len(result.PendingIDs) != len(args.Resources) { return nil, errors.Errorf("bad data from server: expected %d IDs, got %d", len(args.Resources), len(result.PendingIDs)) } for i, id := range result.PendingIDs { if id == "" { return nil, errors.Errorf("bad data from server: got an empty ID for resource %q", args.Resources[i].Name) } // TODO(ericsnow) Do other validation? } return result.PendingIDs, nil }
func newEbsConfig(attrs map[string]interface{}) (*ebsConfig, error) { out, err := ebsConfigChecker.Coerce(attrs, nil) if err != nil { return nil, errors.Annotate(err, "validating EBS storage config") } coerced := out.(map[string]interface{}) iops, _ := coerced[EBS_IOPS].(int) volumeType := coerced[EBS_VolumeType].(string) ebsConfig := &ebsConfig{ volumeType: volumeType, iops: iops, encrypted: coerced[EBS_Encrypted].(bool), } switch ebsConfig.volumeType { case volumeTypeMagnetic: ebsConfig.volumeType = volumeTypeStandard case volumeTypeSsd: ebsConfig.volumeType = volumeTypeGp2 case volumeTypeProvisionedIops: ebsConfig.volumeType = volumeTypeIo1 } if ebsConfig.iops > 0 && ebsConfig.volumeType != volumeTypeIo1 { return nil, errors.Errorf("IOPS specified, but volume type is %q", volumeType) } else if ebsConfig.iops == 0 && ebsConfig.volumeType == volumeTypeIo1 { return nil, errors.Errorf("volume type is %q, IOPS unspecified or zero", volumeTypeIo1) } return ebsConfig, nil }
// TableFromMeta creates a Table instance from model.TableInfo. func TableFromMeta(alloc autoid.Allocator, tblInfo *model.TableInfo) (table.Table, error) { if tblInfo.State == model.StateNone { return nil, errors.Errorf("table %s can't be in none state", tblInfo.Name) } columns := make([]*column.Col, 0, len(tblInfo.Columns)) for _, colInfo := range tblInfo.Columns { if colInfo.State == model.StateNone { return nil, errors.Errorf("column %s can't be in none state", colInfo.Name) } col := &column.Col{ColumnInfo: *colInfo} columns = append(columns, col) } t := newTable(tblInfo.ID, tblInfo.Name.O, columns, alloc) for _, idxInfo := range tblInfo.Indices { if idxInfo.State == model.StateNone { return nil, errors.Errorf("index %s can't be in none state", idxInfo.Name) } idx := &column.IndexedCol{ IndexInfo: *idxInfo, } idx.X = kv.NewKVIndex(t.IndexPrefix(), idxInfo.Name.L, idxInfo.ID, idxInfo.Unique) t.AddIndex(idx) } t.meta = tblInfo return t, nil }
// NewAgentConfig returns a new config object suitable for use for a // machine or unit agent. func NewAgentConfig(configParams AgentConfigParams) (ConfigSetterWriter, error) { if configParams.Paths.DataDir == "" { return nil, errors.Trace(requiredError("data directory")) } if configParams.Tag == nil { return nil, errors.Trace(requiredError("entity tag")) } switch configParams.Tag.(type) { case names.MachineTag, names.UnitTag: // these are the only two type of tags that can represent an agent default: return nil, errors.Errorf("entity tag must be MachineTag or UnitTag, got %T", configParams.Tag) } if configParams.UpgradedToVersion == version.Zero { return nil, errors.Trace(requiredError("upgradedToVersion")) } if configParams.Password == "" { return nil, errors.Trace(requiredError("password")) } if uuid := configParams.Environment.Id(); uuid == "" { return nil, errors.Trace(requiredError("environment")) } else if !names.IsValidEnvironment(uuid) { return nil, errors.Errorf("%q is not a valid environment uuid", uuid) } if len(configParams.CACert) == 0 { return nil, errors.Trace(requiredError("CA certificate")) } // Note that the password parts of the state and api information are // blank. This is by design. config := &configInternal{ paths: NewPathsWithDefaults(configParams.Paths), jobs: configParams.Jobs, upgradedToVersion: configParams.UpgradedToVersion, tag: configParams.Tag, nonce: configParams.Nonce, environment: configParams.Environment, caCert: configParams.CACert, oldPassword: configParams.Password, values: configParams.Values, preferIPv6: configParams.PreferIPv6, } if len(configParams.StateAddresses) > 0 { config.stateDetails = &connectionDetails{ addresses: configParams.StateAddresses, } } if len(configParams.APIAddresses) > 0 { config.apiDetails = &connectionDetails{ addresses: configParams.APIAddresses, } } if err := config.check(); err != nil { return nil, err } if config.values == nil { config.values = make(map[string]string) } config.configFilePath = ConfigPath(config.paths.DataDir, config.tag) return config, nil }
// setProxyCommand sets the proxy command option. func (c *SSHCommon) setProxyCommand(options *ssh.Options) error { apiServerHost, _, err := net.SplitHostPort(c.apiAddr) if err != nil { return errors.Errorf("failed to get proxy address: %v", err) } juju, err := getJujuExecutable() if err != nil { return errors.Errorf("failed to get juju executable path: %v", err) } // TODO(mjs) 2016-05-09 LP #1579592 - It would be good to check the // host key of the controller machine being used for proxying // here. This isn't too serious as all traffic passing through the // controller host is encrypted and the host key of the ultimate // target host is verified but it would still be better to perform // this extra level of checking. options.SetProxyCommand( juju, "ssh", "--proxy=false", "--no-host-key-checks", "--pty=false", "ubuntu@"+apiServerHost, "-q", "nc %h %p", ) return nil }