func fieldHasDefault(m driver.Model, f *Field) bool { if f.Default != "" { return true } fields := m.Fields() idx := fields.MNameMap[f.Name] return fields.HasDefault(idx) }
func (s sortModels) less(mi, mj driver.Model) bool { for _, v := range mi.Fields().References { if v.Model == mj { return false } if v.Model != mi && !s.less(v.Model, mj) { return false } } return true }
func (b *Backend) Insert(db *sql.DB, m driver.Model, query string, args ...interface{}) (driver.Result, error) { fields := m.Fields() if fields.AutoincrementPk { q := query + " RETURNING " + fields.MNames[fields.PrimaryKey] var id int64 err := db.QueryRow(q, args...).Scan(&id) // We need to perform a "real" insert to find the real error, so // just let the code fall to the Exec at the end of the function // if there's an error. if err == nil { return insertResult(id), nil } } return db.Exec(query, args...) }
func (b *SqlBackend) AddFields(db *DB, m driver.Model, prevTable *Table, newTable *Table, fields []*Field) error { modelFields := m.Fields() tableName := db.QuoteIdentifier(m.Table()) for _, v := range fields { idx := modelFields.MNameMap[v.Name] field := v hasDefault := modelFields.HasDefault(idx) if hasDefault && v.HasConstraint(ConstraintNotNull) { // ORM level default // Must be added as nullable first, then the default value // must be set and finally the field has to be altered to be // nullable. field = field.Copy() var constraints []*Constraint for _, v := range field.Constraints { if v.Type != ConstraintNotNull { constraints = append(constraints, v) } } field.Constraints = constraints } sql, cons, err := field.SQL(db, m, newTable) if err != nil { return err } if _, err = db.Exec(fmt.Sprintf("ALTER TABLE %s ADD COLUMN %s", tableName, sql)); err != nil { return err } if hasDefault { value := modelFields.DefaultValue(idx) fieldName := db.QuoteIdentifier(v.Name) if _, err := db.Exec(fmt.Sprintf("UPDATE %s SET %s = ?", tableName, fieldName), value); err != nil { return err } if v.HasConstraint(ConstraintNotNull) { if err := db.Backend().AlterField(db, m, newTable, field, v); err != nil { return err } } } for _, c := range cons { if _, err = db.Exec(fmt.Sprintf("ALTER TABLE %s ADD CONSTRAINT %s", tableName, c)); err != nil { return err } } } return nil }
func (d *Driver) mergeTable(m driver.Model, prevTable *Table, newTable *Table) error { existing := make(map[string]*Field) for _, v := range prevTable.Fields { existing[v.Name] = v } var missing []*Field for _, v := range newTable.Fields { prev := existing[v.Name] if prev == nil { // Check if we can add the field if v.HasConstraint(ConstraintNotNull) && !fieldHasDefault(m, v) { return fmt.Errorf("can't add NOT NULL field %q to table %q without a default value", v.Name, m.Table()) } if v.HasConstraint(ConstraintPrimaryKey) { return fmt.Errorf("can't add PRIMARY KEY field %q to table %q", v.Name, m.Table()) } missing = append(missing, v) } else { if prev.Type != v.Type { // Check the Kind k1, len1 := TypeKind(prev.Type) k2, len2 := TypeKind(v.Type) if k1 == k2 { // Check lengths if len1 != len2 { } continue } // Check if we can transform the kind fields := m.Fields() idx := fields.MNameMap[v.Name] modelName := fields.QNames[idx] modelType := fields.Types[idx] return fmt.Errorf("field %q on table %q is of type %s which is not compatible with the model field %q of type %s (%s)", v.Name, m.Table(), prev.Type, modelName, v.Type, modelType) } } } if len(missing) > 0 { if err := d.backend.AddFields(d.db, m, prevTable, newTable, missing); err != nil { return err } } return nil }
func (d *Driver) outValues(m driver.Model, out interface{}) (reflect.Value, *driver.Fields, []interface{}, []*scanner, error) { val := reflect.ValueOf(out) if !val.IsValid() { // Untyped nil pointer return reflect.Value{}, nil, nil, nil, nil } vt := val.Type() if vt.Kind() != reflect.Ptr { return reflect.Value{}, nil, nil, nil, fmt.Errorf("can't set object of type %T. Please, pass a %v rather than a %v", out, reflect.PtrTo(vt), vt) } if vt.Elem().Kind() == reflect.Ptr && vt.Elem().Elem().Kind() == reflect.Struct { // Received a pointer to pointer. Always create a new object, // to avoid overwriting the previous result. val = val.Elem() el := reflect.New(val.Type().Elem()) val.Set(el) } for val.Kind() == reflect.Ptr { el := val.Elem() if !el.IsValid() { if !val.CanSet() { // Typed nil pointer return reflect.Value{}, nil, nil, nil, nil } el = reflect.New(val.Type().Elem()) val.Set(el) } val = el } fields := m.Fields() if fields == nil { // Skipped model return reflect.Value{}, nil, nil, nil, nil } values := make([]interface{}, len(fields.Indexes)) scanners := make([]*scanner, len(fields.Indexes)) for ii, v := range fields.Indexes { field := d.fieldByIndex(val, v, true) tag := fields.Tags[ii] s := newScanner(&field, tag, d.backend) scanners[ii] = s values[ii] = s } return val, fields, values, scanners, nil }
func (d *Driver) createIndex(m driver.Model, idx *index.Index, name string) error { has, err := d.backend.HasIndex(d.db, m, idx, name) if err != nil { return err } if has { return nil } buf := getBuffer() buf.WriteString("CREATE ") if idx.Unique { buf.WriteString("UNIQUE ") } buf.WriteString("INDEX ") buf.WriteString(name) buf.WriteString(" ON \"") buf.WriteString(m.Table()) buf.WriteString("\" (") fields := m.Fields() for _, v := range idx.Fields { name, _, err := fields.Map(v) if err != nil { return err } buf.WriteByte('"') buf.WriteString(name) buf.WriteByte('"') if DescField(idx, v) { buf.WriteString(" DESC") } buf.WriteByte(',') } buf.Truncate(buf.Len() - 1) buf.WriteString(")") _, err = d.db.Exec(buftos(buf)) putBuffer(buf) return err }
func (d *Driver) Insert(m driver.Model, data interface{}) (driver.Result, error) { var id int64 fields := m.Fields() var pkVal *reflect.Value // TODO: If the PK is supplied by the user rather than auto-assigned, it // might conflict with PKs generated by datastore.AllocateIDs(). if fields.PrimaryKey >= 0 { p := d.primaryKey(fields, data) if p.IsValid() && types.Kind(p.Kind()) == types.Int { id = p.Int() if id == 0 { // Must assign PK field value after calling AllocateIDs pkVal = &p } } } name := m.Table() // Make all objects of a given kind ancestors of the same key. While // this hurts scalability, it makes all reads strongly consistent. parent := d.parentKey(m) var err error if id == 0 { id, _, err = datastore.AllocateIDs(d.c, name, parent, 1) if err != nil { return nil, err } } if fields.AutoincrementPk && pkVal != nil { pkVal.SetInt(int64(id)) } key := datastore.NewKey(d.c, name, "", id, parent) log.Debugf("DATASTORE: put %s %v", key, data) _, err = datastore.Put(d.c, key, data) if err != nil { return nil, err } return &result{key: key, count: 1}, nil }
func (d *Driver) makeTable(m driver.Model) (*Table, error) { fields := m.Fields() names := fields.MNames qnames := fields.QNames ftypes := fields.Types tags := fields.Tags dbFields := make([]*Field, len(names)) for ii, v := range names { typ := ftypes[ii] tag := tags[ii] ft, err := d.backend.FieldType(typ, tag) if err != nil { return nil, err } def := tag.Value("default") if fields.HasDefault(ii) { // Handled by the ORM def = "" } if def != "" { if driver.IsFunc(def) { fname, _ := driver.SplitFuncArgs(def) fn, err := d.backend.Func(fname, ftypes[ii]) if err != nil { if err == ErrFuncNotSupported { err = fmt.Errorf("backend %s does not support function %s", d.backend.Name(), tag.Value("default")) } return nil, err } def = fn } else { def = driver.UnescapeDefault(def) if typ.Kind() == reflect.String { def = d.db.QuoteString(def) } } } field := &Field{ Name: v, Type: ft, Default: def, } if tag.Has("notnull") { field.AddConstraint(ConstraintNotNull) } if d.isPrimaryKey(fields, ii, tag) { field.AddConstraint(ConstraintPrimaryKey) } else if tag.Has("unique") { field.AddConstraint(ConstraintUnique) } if tag.Has("auto_increment") { field.AddOption(OptionAutoIncrement) } if ref := fields.References[qnames[ii]]; ref != nil { fk, _, err := ref.Model.Fields().Map(ref.Field) if err != nil { return nil, err } field.Constraints = append(field.Constraints, &Constraint{ Type: ConstraintForeignKey, References: MakeReference(ref.Model.Table(), fk), }) } dbFields[ii] = field } return &Table{Fields: dbFields}, nil }
func (d *Driver) saveParameters(m driver.Model, data interface{}) (reflect.Value, []string, []interface{}, error) { // data is guaranteed to be of m.Type() val := driver.Direct(reflect.ValueOf(data)) fields := m.Fields() max := len(fields.MNames) names := make([]string, 0, max) values := make([]interface{}, 0, max) var err error if d.transforms != nil { for ii, v := range fields.Indexes { f := d.fieldByIndex(val, v, false) if !f.IsValid() { continue } if fields.OmitEmpty[ii] && driver.IsZero(f) { continue } ft := f.Type() var fval interface{} if _, ok := d.transforms[ft]; ok { fval, err = d.backend.TransformOutValue(f) if err != nil { return val, nil, nil, err } if fields.NullEmpty[ii] && driver.IsZero(reflect.ValueOf(fval)) { fval = nil } } else if !fields.NullEmpty[ii] || !driver.IsZero(f) { if c := codec.FromTag(fields.Tags[ii]); c != nil { fval, err = c.Encode(f.Interface()) if err != nil { return val, nil, nil, err } if p := pipe.FromTag(fields.Tags[ii]); p != nil { data, err := p.Encode(fval.([]byte)) if err != nil { return val, nil, nil, err } fval = data } } else { // Most sql drivers won't accept aliases for string type if ft.Kind() == reflect.String && ft != stringType { f = f.Convert(stringType) } fval = f.Interface() } } names = append(names, fields.MNames[ii]) values = append(values, fval) } } else { for ii, v := range fields.Indexes { f := d.fieldByIndex(val, v, false) if !f.IsValid() { continue } if fields.OmitEmpty[ii] && driver.IsZero(f) { continue } var fval interface{} if !fields.NullEmpty[ii] || !driver.IsZero(f) { if c := codec.FromTag(fields.Tags[ii]); c != nil { fval, err = c.Encode(&f) if err != nil { return val, nil, nil, err } } else { ft := f.Type() // Most sql drivers won't accept aliases for string type if ft.Kind() == reflect.String && ft != stringType { f = f.Convert(stringType) } fval = f.Interface() } } names = append(names, fields.MNames[ii]) values = append(values, fval) } } return val, names, values, nil }
func (d *Driver) applyQuery(m driver.Model, dq *datastore.Query, q query.Q) (*datastore.Query, error) { var field *query.Field var op string switch x := q.(type) { case *query.Eq: field = &x.Field op = " =" case *query.Lt: field = &x.Field op = " <" case *query.Lte: field = &x.Field op = " <=" case *query.Gt: field = &x.Field op = " >" case *query.Gte: field = &x.Field op = " >=" case *query.And: var err error for _, v := range x.Conditions { dq, err = d.applyQuery(m, dq, v) if err != nil { return nil, err } } case nil: default: return nil, fmt.Errorf("datastore does not support %T queries", q) } if field != nil { if _, ok := field.Value.(query.F); ok { return nil, fmt.Errorf("datastore queries can't reference other properties (%v)", field.Value) } name := field.Field fields := m.Fields() idx, ok := fields.QNameMap[name] if !ok { return nil, fmt.Errorf("can't map field %q to a datastore name", name) } if strings.IndexByte(name, '.') >= 0 { // GAE flattens embedded fields, so we must remove // the parts of the field which refer to a flattened // field. indexes := fields.Indexes[idx] parts := strings.Split(name, ".") if len(indexes) == len(parts) { var final []string typ := fields.Type for ii, v := range indexes { f := typ.Field(v) if !f.Anonymous { final = append(final, parts[ii]) } typ = f.Type } name = strings.Join(final, ".") } } log.Debugf("DATASTORE: filter %s %s %v", m, name+op, field.Value) dq = dq.Filter(name+op, field.Value) } return dq, nil }