func StringValue(v interface{}, db string) string { var ret string switch v.(type) { case string: t, e := time.Parse(time.RFC3339, toolkit.ToString(v)) if e != nil { ret = toolkit.Sprintf("%s", "'"+v.(string)+"'") } else { if strings.Contains(db, "oci8") { // toolkit.Println(t.Format("2006-01-02 15:04:05")) ret = "to_date('" + t.Format("02-01-2006 15:04:05") + "','DD-MM-YYYY hh24:mi:ss')" } else { ret = "'" + t.Format("2006-01-02 15:04:05") + "'" } } case time.Time: t := v.(time.Time).UTC() if strings.Contains(db, "oci8") { ret = "to_date('" + t.Format("2006-01-02 15:04:05") + "','yyyy-mm-dd hh24:mi:ss')" } else { ret = "'" + t.Format("2006-01-02 15:04:05") + "'" } case int, int32, int64, uint, uint32, uint64: ret = toolkit.Sprintf("%d", v.(int)) case nil: ret = "" default: ret = toolkit.Sprintf("%v", v) } return ret }
func TestInsert(t *testing.T) { t.Skip() var e error skipIfConnectionIsNil(t) es := []string{} qinsert := ctx.NewQuery().From(tableName).Insert() for i := 1; i <= 3; i++ { qty := toolkit.RandInt(10) price := toolkit.RandInt(10) * 50 amount := qty * price u := &Orders{ toolkit.Sprintf("ord0%d", i+10), toolkit.Sprintf("item%d", i), qty, price, amount, toolkit.Sprintf("available"), } e = qinsert.Exec(toolkit.M{}.Set("data", u)) if e != nil { es = append(es, toolkit.Sprintf("Insert fail %d: %s \n", i, e.Error())) } } if len(es) > 0 { t.Fatal(es) } operation = "Test Insert" sintaks = ` ctx.NewQuery().From(tableName).Insert(). Exec(toolkit.M{}.Set("data", u))` TestSelect(t) }
func genGo(fi os.FileInfo, source, out string) error { log.Info("Processing " + fi.Name()) fn := filepath.Join(source, fi.Name()) var ( bs []byte e error ) if bs, e = ioutil.ReadFile(fn); e != nil { return errors.New("Open error " + e.Error()) } pkg := new(PackageModel) e = toolkit.UnjsonFromString(string(bs), pkg) if e != nil { return errors.New("Unmarshal JSON: " + e.Error()) } for _, sm := range pkg.Structs { e = sm.Write(pkg, out) if e != nil { return errors.New(toolkit.Sprintf("Write model %s: %s", sm.Name, e.Error())) } log.Info(toolkit.Sprintf("Writing %s.%s", pkg.Name, sm.Name)) } log.Info("Processing " + fi.Name() + " done") return nil }
func TestCRUD(t *testing.T) { skipIfConnectionIsNil(t) e := ctx.NewQuery().Delete().From(tableName).SetConfig("multiexec", true).Exec(nil) if e != nil { t.Fatalf("Delete fail: %s", e.Error()) } es := []string{} qinsert := ctx.NewQuery().From(tableName).SetConfig("multiexec", true).Insert() for i := 1; i <= 50; i++ { u := &testUser{ toolkit.Sprintf("user%d", i), toolkit.Sprintf("User %d", i), toolkit.RandInt(30) + 20, true} e = qinsert.Exec(toolkit.M{}.Set("data", u)) if e != nil { es = append(es, toolkit.Sprintf("Insert fail %d: %s \n", i, e.Error())) } } if len(es) > 0 { t.Fatal(es) } e = ctx.NewQuery().Update().From(tableName).Where(dbox.Lte("_id", "user2")).Exec(toolkit.M{}.Set("data", toolkit.M{}.Set("Enable", false))) if e != nil { t.Fatalf("Update fail: %s", e.Error()) } }
func (p *Page) DefineCommand(server *Server, sourceZipPath string, destZipPath string, appID string) (string, string, string, error) { var ext string if strings.Contains(server.CmdExtract, "7z") || strings.Contains(server.CmdExtract, "zip") { ext = ".zip" } else if strings.Contains(server.CmdExtract, "tar") { ext = ".tar" } else if strings.Contains(server.CmdExtract, "gz") { ext = ".gz" } sourceZipFile := toolkit.Sprintf("%s%s", sourceZipPath, ext) destZipFile := toolkit.Sprintf("%s%s", destZipPath, ext) var unzipCmd string // cmd /C 7z e -o %s -y %s if server.ServerType == "windows" { unzipCmd = toolkit.Sprintf("cmd /C %s", server.CmdExtract) unzipCmd = strings.Replace(unzipCmd, `%1`, destZipPath, -1) unzipCmd = strings.Replace(unzipCmd, `%2`, destZipFile, -1) } else { unzipCmd = strings.Replace(server.CmdExtract, `%1`, destZipFile, -1) unzipCmd = strings.Replace(unzipCmd, `%2`, destZipPath, -1) } return unzipCmd, sourceZipFile, destZipFile, nil }
func doParseSize(size float64, unit string) string { if unit == "" { unit = "B" } ret := "" if size > 1024 { size = size / 1024 if unit == "B" { unit = "K" } else if unit == "K" { unit = "M" } else if unit == "M" { unit = "G" } else if unit == "G" { unit = "T" } else { unit = "P" } if unit != "P" { ret = doParseSize(size, unit) } else { ret = toolkit.Sprintf("%2.2f%s", size, unit) } } else { ret = toolkit.Sprintf("%2.2f%s", size, unit) } return ret }
func MatchV(v interface{}, f *Filter) bool { match := false /* rv0 := reflect.ValueOf(v) if rv0.Kind() == reflect.Ptr { rv0 = reflect.Indirect(rv0) } rv1 := reflect.ValueOf(f.Value) if rv1.Kind()==reflect.Ptr{ rv1=reflect.Indirect(rv1) } */ //toolkit.Println("MatchV: ", f.Op, v, f.Value) if toolkit.HasMember([]string{FilterOpEqual, FilterOpNoEqual, FilterOpGt, FilterOpGte, FilterOpLt, FilterOpLte}, f.Op) { return toolkit.Compare(v, f.Value, f.Op) } else if f.Op == FilterOpIn { var values []interface{} toolkit.FromBytes(toolkit.ToBytes(f.Value, ""), "", &values) return toolkit.HasMember(values, v) } else if f.Op == FilterOpNin { var values []interface{} toolkit.FromBytes(toolkit.ToBytes(f.Value, ""), "", &values) return !toolkit.HasMember(values, v) } else if f.Op == FilterOpContains { var values []interface{} var b bool toolkit.FromBytes(toolkit.ToBytes(f.Value, ""), "", &values) for _, val := range values { // value := toolkit.Sprintf(".*%s.*", val.(string)) // b, _ = regexp.Match(value, []byte(v.(string))) r := regexp.MustCompile(`(?i)` + val.(string)) b = r.Match([]byte(v.(string))) if b { return true } } } else if f.Op == FilterOpStartWith || f.Op == FilterOpEndWith { value := "" if f.Op == FilterOpStartWith { value = toolkit.Sprintf("^%s.*$", f.Value) } else { value = toolkit.Sprintf("^.*%s$", f.Value) } cond, _ := regexp.Match(value, []byte(v.(string))) return cond } return match }
func TestSaveQuery(t *testing.T) { var e error for i := 1; i <= 5; i++ { ds := new(colonycore.DataSource) ds.ID = toolkit.Sprintf("ds%d", i) ds.ConnectionID = "conn1" ds.QueryInfo = toolkit.M{} ds.MetaData = nil e = colonycore.Save(ds) if e != nil { t.Fatalf("Save datasource fail. " + e.Error()) } } var dss []colonycore.DataSource c, e := colonycore.Find(new(colonycore.DataSource), nil) if e != nil { t.Fatalf("Load ds fail: " + e.Error()) } e = c.Fetch(&dss, 0, true) if e != nil { t.Fatalf("Ftech ds fail: " + e.Error()) } if len(dss) != 5 { t.Fatal("Fetch ds fail. Got %d records only", len(dss)) } toolkit.Println("Data:", toolkit.JsonString(dss)) }
func TestSaveApp(t *testing.T) { wd, _ := os.Getwd() colonycore.ConfigPath = filepath.Join(wd, "../config") for i := 1; i <= 5; i++ { appn := new(colonycore.Application) appn.ID = toolkit.Sprintf("appn%d", i) appn.Enable = true e = colonycore.Save(appn) if e != nil { t.Fatalf("Save %s fail: %s", appn.ID, e.Error()) } } appn := new(colonycore.Application) e := colonycore.Get(appn, "appn5") if e != nil { t.Fatal(e) } appn.ID = "appn3" e = colonycore.Delete(appn) if e != nil { t.Fatal(e) } }
func TestStorageWrite(t *testing.T) { skipIfClientNil(t) es := []string{} toolkit.Printf("Writing Data:\n") for i := 0; i < 200; i++ { dataku := toolkit.RandInt(1000) totalInt += dataku //toolkit.Printf("%d ", dataku) in := toolkit.M{}.Set("key", fmt.Sprintf("public.dataku.%d", i)).Set("data", toolkit.ToBytes(dataku, "")) writeResult := client.Call("set", in) if writeResult.Status != toolkit.Status_OK { es = append(es, toolkit.Sprintf("Fail to write data %d : %d => %s", i, dataku, writeResult.Message)) } } if len(es) > 0 { errorTxt := "" if len(es) <= 10 { errorTxt = strings.Join(es, "\n") } else { errorTxt = strings.Join(es[:10], "\n") + "\n... And others ..." } t.Errorf("Write data fail.\n%s", errorTxt) } }
func (s *SliceBase) Set(i int, d interface{}) error { e := toolkit.SliceSetItem(s.data, i, d) if e != nil { return errors.New(toolkit.Sprintf("SliceBase.Set: [%d] %s", i, e.Error())) } return nil }
func (c *Coordinator) getAvailableNode(data []byte) (nodeIndex int, e error) { var currentMax float64 found := false dataLength := float64(len(data)) nodes := c.Nodes(RoleStorage) for k, n := range nodes { resultAvail := n.Call("storagestatus", nil) if resultAvail.Status == toolkit.Status_OK { //m := toolkit.M{} sm := struct { Memory *StorageMedia Physical *StorageMedia }{} resultAvail.GetFromBytes(&sm) nodeAvailableSize := sm.Memory.Available() if nodeAvailableSize > dataLength && nodeAvailableSize > currentMax { found = true currentMax = nodeAvailableSize nodeIndex = k } } } if !found { e = errors.New(toolkit.Sprintf("No node available to hosts %s bytes of data", ParseSize(dataLength))) } return }
/* Write Write bytes of data into sebar storage. - Data need to be defined as []byte on in["data"] - To use memory or disk should be defined on in["storage"] as: MEM, DSK (sebar.StorageTypeMemory, sebar.StorageTypeMemory) - If no in["storage"] or the value is not eq to either disk or memory, it will be defaulted to memory */ func (s *Storage) Write(in toolkit.M) *toolkit.Result { r := toolkit.NewResult() key := in.Get("key").(string) storage := StorageTypeEnum(in.GetString("storage")) if storage != StorageTypeMemory && storage != StorageTypeDisk { storage = StorageTypeMemory } dataToWrite := in.Get("data").([]byte) dataLen := len(dataToWrite) // Validation nodeCoordinator := s.NodeByID(s.Coordinator) if nodeCoordinator == nil { return r.SetErrorTxt(s.Address + " no Coordinator has been setup") } // Since all is ok commit the change var ms *StorageMedia if storage == StorageTypeMemory { ms = s.MemoryStorage } else { ms = s.DiskStorage } ms.write(key, dataToWrite, nodeCoordinator) s.Log.Info(toolkit.Sprintf("Writing %s (%s) to node %s", key, ParseSize(float64(dataLen)), s.Address)) return r }
func TestSave(t *testing.T) { InitCall() for i := 1; i <= 1000; i++ { e := office.NewEmployee() e.ID = "emp" + strconv.Itoa(i) e.Title = toolkit.Sprintf("Test Title %d", i) e.Address = toolkit.Sprintf("Address %d", i) e.LastLogin = time.Now() if math.Mod(float64(i), 2) == 0 { e.Enable = true } else { e.Enable = false } //log.Printf("DB %+v", office.DB()) //log.Printf("e %+v", toolkit.JsonString(e)) office.DB().Save(e) } }
func TestInsert(t *testing.T) { // t.Skip("Skip : Comment this line to do test") skipIfConnectionIsNil(t) es := []string{} qinsert := ctx.NewQuery().From("Data_CUD").SetConfig("multiexec", true).Save() for i := 1; i <= 5; i++ { u := toolkit.M{}.Set("Id", toolkit.Sprintf("ID-1%d", i)). Set("Email", toolkit.Sprintf("user-1%d", i)). Set("FirstName", toolkit.Sprintf("User no.%d", i)). Set("LastName", toolkit.Sprintf("Test no.%d", i)) e := qinsert.Exec(toolkit.M{}.Set("data", u)) if e != nil { es = append(es, toolkit.Sprintf("Insert fail %d: %s \n", i, e.Error())) } } if len(es) > 0 { t.Fatal(es) } }
func main() { flag.Parse() source := makePath(*flagSource) outPath := makePath(*flagOut) log.Info(toolkit.Sprintf( "Generating *.go files\nSource: %s\nOutput Path: %s", source, outPath)) fileInfos, e := getOrms(source) check(e, true, "") for _, fi := range fileInfos { e := genGo(fi, source, outPath) check(e, true, "Gen-Go") } }
func (l *LoginController) Logout(r *knot.WebContext) interface{} { r.Config.OutputType = knot.OutputJson sessionId := toolkit.ToString(r.Session("sessionid", "")) if toolkit.ToString(sessionId) == "" { return helper.CreateResult(true, nil, "Active sessionid not found") } err := acl.Logout(sessionId) if err != nil && (err.Error() == "Session id not found" || err.Error() == "Session id is expired") { return helper.CreateResult(true, nil, "Active sessionid not found") } else if err != nil { return helper.CreateResult(true, nil, toolkit.Sprintf("Error found : %v", err.Error())) } r.SetSession("sessionid", "") return helper.CreateResult(true, nil, "Logout success") }
func (a *AclController) Logout(r *knot.WebContext) interface{} { r.Config.OutputType = knot.OutputJson payload := toolkit.M{} err := r.GetPayload(&payload) sessionid := "" switch { case err != nil: return helper.CreateResult(false, nil, err.Error()) case !payload.Has("username") && !payload.Has("sessionid"): return helper.CreateResult(false, nil, "username or session not found") case payload.Has("sessionid"): sessionid = toolkit.ToString(payload["sessionid"]) case payload.Has("username"): tUser := new(acl.User) err = acl.FindUserByLoginID(tUser, toolkit.ToString(payload["username"])) if err != nil { return helper.CreateResult(false, nil, "fail to get userid") } tSession := new(acl.Session) err = acl.FindActiveSessionByUser(tSession, tUser.ID) if err != nil { return helper.CreateResult(false, nil, "fail to get sessionid") } sessionid = tSession.ID } if sessionid == "" { return helper.CreateResult(true, nil, "Active sessionid not found") } err = acl.Logout(sessionid) if err != nil && (err.Error() == "Session id not found" || err.Error() == "Session id is expired") { return helper.CreateResult(true, nil, "Active sessionid not found") } else if err != nil { return helper.CreateResult(true, nil, toolkit.Sprintf("Error found : %v", err.Error())) } return helper.CreateResult(true, nil, "Logout success") }
func (p *Page) CopyFileToServer(server *Server, sourcePath string, destPath string, appID string, log *toolkit.LogEngine) error { var serverPathSeparator string if strings.Contains(destPath, "/") { serverPathSeparator = `/` } else { serverPathSeparator = "\\\\" } destZipPath := strings.Join([]string{destPath, appID}, serverPathSeparator) unzipCmd, sourceZipFile, destZipFile, err := p.DefineCommand(server, sourcePath, destZipPath, appID) log.AddLog(toolkit.Sprintf("Connect to server %v", server), "INFO") sshSetting, sshClient, err := p.connectSSH(server) defer sshClient.Close() log.AddLog(unzipCmd, "INFO") /*compress file on local colony manager*/ if strings.Contains(sourceZipFile, ".zip") { err = toolkit.ZipCompress(sourcePath, sourceZipFile) } else if strings.Contains(sourceZipFile, ".tar") { err = toolkit.TarCompress(sourcePath, sourceZipFile) } if err != nil { log.AddLog(err.Error(), "ERROR") return err } rmCmdZip := toolkit.Sprintf("rm -rf %s", destZipFile) log.AddLog(rmCmdZip, "INFO") _, err = sshSetting.GetOutputCommandSsh(rmCmdZip) /*delete zip file on server before copy file*/ if err != nil { log.AddLog(err.Error(), "ERROR") return err } log.AddLog(toolkit.Sprintf("scp from %s to %s", sourceZipFile, destPath), "INFO") err = sshSetting.SshCopyByPath(sourceZipFile, destPath) /*copy zip file from colony manager to server*/ if err != nil { log.AddLog(err.Error(), "ERROR") return err } rmCmdZipOutput := toolkit.Sprintf("rm -rf %s", destZipPath) log.AddLog(rmCmdZipOutput, "INFO") _, err = sshSetting.GetOutputCommandSsh(rmCmdZipOutput) /*delete folder before extract zip file on server*/ if err != nil { log.AddLog(err.Error(), "ERROR") return err } mkdirDestCmd := toolkit.Sprintf("%s %s%s%s", server.CmdMkDir, destZipPath, serverPathSeparator, appID) log.AddLog(mkdirDestCmd, "INFO") _, err = sshSetting.GetOutputCommandSsh(mkdirDestCmd) /*make new dest folder on server for folder extraction*/ if err != nil { log.AddLog(err.Error(), "ERROR") return err } chmodDestCmd := toolkit.Sprintf("chmod -R 755 %s%s%s", destZipPath, serverPathSeparator, appID) log.AddLog(chmodDestCmd, "INFO") _, err = sshSetting.GetOutputCommandSsh(chmodDestCmd) /*set chmod on new folder extraction*/ if err != nil { log.AddLog(err.Error(), "ERROR") return err } log.AddLog(unzipCmd, "INFO") _, err = sshSetting.GetOutputCommandSsh(unzipCmd) /*extract zip file to server*/ if err != nil { log.AddLog(err.Error(), "ERROR") return err } log.AddLog(toolkit.Sprintf("remove %s", sourceZipFile), "INFO") err = os.Remove(sourceZipFile) /*remove zip file from local colony manager*/ if err != nil { log.AddLog(err.Error(), "ERROR") return err } log.AddLog(rmCmdZip, "INFO") _, err = sshSetting.GetOutputCommandSsh(rmCmdZip) /*delete zip file on server after folder extraction*/ if err != nil { log.AddLog(err.Error(), "ERROR") return err } return nil }
func (q *Query) Exec(in toolkit.M) error { setting, e := q.prepare(in) commandType := setting["commandtype"].(string) //toolkit.Printf("Command type: %s\n", commandType) if e != nil { return err.Error(packageName, modQuery, "Exec: "+commandType, e.Error()) } if setting.GetString("commandtype") == dbox.QueryPartSelect { return err.Error(packageName, modQuery, "Exec: "+commandType, "Exec is not working with select command, please use .Cursor instead") } q.Lock() defer q.Unlock() var dataM toolkit.M var dataMs []toolkit.M hasData := in.Has("data") dataIsSlice := false data := in.Get("data") if toolkit.IsSlice(data) { dataIsSlice = true e = toolkit.Unjson(toolkit.Jsonify(data), dataMs) if e != nil { return err.Error(packageName, modQuery, "Exec: "+commandType, "Data encoding error: "+e.Error()) } } else { dataM, e = toolkit.ToM(data) dataMs = append(dataMs, dataM) if e != nil { return err.Error(packageName, modQuery, "Exec: "+commandType, "Data encoding error: "+e.Error()) } } hasWhere := setting.Has("where") where := setting.Get("where", []*dbox.Filter{}).([]*dbox.Filter) if hasWhere && len(where) == 0 { inWhere := in.Get("where") if inWhere == nil { hasWhere = false where = nil } else { if !toolkit.IsSlice(inWhere) { where = append(where, inWhere.(*dbox.Filter)) } else { where = inWhere.([]*dbox.Filter) } } } if hasData && hasWhere == false && toolkit.HasMember([]interface{}{dbox.QueryPartInsert, dbox.QueryPartDelete, dbox.QueryPartUpdate, dbox.QueryPartSave}, commandType) { hasWhere = true //toolkit.Println("check where") if toolkit.IsSlice(data) { ids := []interface{}{} idField := "" if idField == "" { return err.Error(packageName, modQuery, "Exec: "+commandType, "Data send is a slice, but its element has no ID") } dataCount := toolkit.SliceLen(data) for i := 0; i < dataCount; i++ { dataI := toolkit.SliceItem(data, i) if i == 0 { idField = toolkit.IdField(dataI) } ids = append(ids, toolkit.Id(dataI)) } where = []*dbox.Filter{dbox.In(idField, ids)} } else { idfield := "_id" id := toolkit.Id(data) if !toolkit.IsNilOrEmpty(id) { where = []*dbox.Filter{dbox.Eq(idfield, id)} } else { where = nil hasWhere = false } } } /* toolkit.Printf("CommandType: %s HasData: %v HasWhere: %v Where: %s\n", commandType, hasData, hasWhere, toolkit.JsonString(where)) */ e = q.openFile(commandType) //toolkit.Printf(commandType+" Open File, found record: %d\nData:%s\n", len(q.data), toolkit.JsonString(q.data)) if e != nil { return err.Error(packageName, modQuery, "Exec: "+commandType, e.Error()) } var indexes []interface{} if hasWhere && commandType != dbox.QueryPartInsert { whereIndex := dbox.Find(q.data, where) indexes = toolkit.ToInterfaceArray(&whereIndex) //toolkit.Printf("Where Index: %s Index:%s\n", toolkit.JsonString(whereIndex), toolkit.JsonString(indexes)) } if commandType == dbox.QueryPartInsert { if !hasData { return err.Error(packageName, modQuery, "Exec: "+commandType, "Data is empty") } if !dataIsSlice { dataMs = []toolkit.M{dataM} } //-- validate for _, datam := range dataMs { idField, idValue := toolkit.IdInfo(datam) toolkit.Serde(dbox.Find(q.data, []*dbox.Filter{dbox.Eq(idField, idValue)}), &indexes, "") if len(indexes) > 0 { return err.Error(packageName, modQuery, "Exec: "+commandType, toolkit.Sprintf("Data %v already exist", idValue)) } } //-- insert the data q.data = append(q.data, dataMs...) } else if commandType == dbox.QueryPartUpdate { //-- valida if !hasData { return err.Error(packageName, modQuery, "Exec: "+commandType, "Data is empty") } var dataUpdate toolkit.M var updateDataIndex int // if it is a slice then we need to update each data passed on its slice isDataSlice := toolkit.IsSlice(data) if isDataSlice == false { isDataSlice = false e = toolkit.Serde(data, &dataUpdate, "") if e != nil { return err.Error(packageName, modQuery, "Exec: "+commandType, "Serde data fail"+e.Error()) } } var idField string //toolkit.Printf("Indexes: %s\n", toolkit.JsonString(indexes)) for i, v := range q.data { // update only data that match given inde if toolkit.HasMember(indexes, i) || !hasWhere { if idField == "" { idField = toolkit.IdField(v) if idField == "" { return err.Error(packageName, modQuery, "Exec: "+commandType, "No ID") } } // If dataslice is sent, iterate f if isDataSlice { e = toolkit.Serde(toolkit.SliceItem(data, updateDataIndex), &dataUpdate, "") if e != nil { return err.Error(packageName, modQuery, "Exec: "+commandType, "Serde data fail "+e.Error()) } updateDataIndex++ } dataOrigin := q.data[i] toolkit.CopyM(&dataUpdate, &dataOrigin, false, []string{"_id"}) toolkit.Serde(dataOrigin, &v, "") q.data[i] = v } } } else if commandType == dbox.QueryPartDelete { if hasWhere && len(where) > 0 { indexes := dbox.Find(q.data, where) if len(indexes) > 0 { newdata := []toolkit.M{} for index, v := range q.data { partOfIndex := toolkit.HasMember(indexes, index) if partOfIndex == false { newdata = append(newdata, v) } //toolkit.Println("i:", indexes, ", index:", index, ", p.ofIndex: ", partOfIndex, ", data: ", toolkit.JsonString(newdata)) } q.data = newdata } } else { q.data = []toolkit.M{} } //toolkit.Printf("Data now: %s\n", toolkit.JsonString(q.data)) } else if commandType == dbox.QueryPartSave { if !hasData { return err.Error(packageName, modQuery, "Exec: "+commandType, "Data is empty") } var dataMs []toolkit.M var dataM toolkit.M if !toolkit.IsSlice(data) { e = toolkit.Serde(&data, &dataM, "json") if e != nil { return err.Error(packageName, modQuery, "Exec: "+commandType+" Serde data fail", e.Error()) } dataMs = append(dataMs, dataM) } else { e = toolkit.Serde(&data, &dataMs, "json") if e != nil { return err.Error(packageName, modQuery, "Exec: "+commandType+" Serde data fail", e.Error()) } } //toolkit.Printf("Saving: %s\n", toolkit.JsonString(dataMs)) for _, v := range dataMs { idField, idValue := toolkit.IdInfo(v) indexes := dbox.Find(q.data, []*dbox.Filter{dbox.Eq(idField, idValue)}) if len(indexes) == 0 { q.data = append(q.data, v) } else { dataOrigin := q.data[indexes[0]] //toolkit.Printf("Copy data %s to %s\n", toolkit.JsonString(v), toolkit.JsonString(dataOrigin)) toolkit.CopyM(&v, &dataOrigin, false, []string{idField}) q.data[indexes[0]] = dataOrigin } } } e = q.writeFile() if e != nil { return err.Error(packageName, modQuery, "Exec: "+commandType+" Write fail", e.Error()) } return nil }
func (sm *StructModel) Write(pkg *PackageModel, path string) error { if pkg.Name == "" || sm.Name == "" { return toolkit.Errorf("Both package name and struct name should be defined") } //return toolkit.Errorf("Fail to write %s.%s : method Write is not yet implemented", pkg.Name, sm.Name) //-- write base e := pkg.WriteBase(path) if e != nil { return e } filename := filepath.Join(path, strings.ToLower(sm.Name)+".go") //currentCode := "" f, e := os.Open(filename) if e == nil { //bcurrent, _ := ioutil.ReadAll(f) //currentCode = string(bcurrent) os.Remove(filename) } f, e = os.Create(filename) if e != nil { return toolkit.Errorf("Failed to write %s.%s: %s", pkg.Name, sm.Name, e.Error()) } defer f.Close() txts := []string{} //--- package txts = append(txts, "package "+pkg.Name) //--- imports txts = append(txts, toolkit.Sprintf("import (%s)", libs(mandatoryLibs, pkg.ObjectLibs, sm.Libs))) //--- struct definition txts = append(txts, "type "+sm.Name+" struct {\n"+ "orm.ModelBase `bson:\"-\" json:\"-\"`") for _, f := range sm.Fields { if f.Type == "" { f.Type = "string" } fieldStr := toolkit.Sprintf("%s %s %s", f.Name, f.Type, f.Tag) txts = append(txts, fieldStr) } txts = append(txts, "}") //--- tablename pluralNames := strings.ToLower(sm.Name) if strings.HasSuffix(pluralNames, "s") { pluralNames = pluralNames + "es" } else { pluralNames = pluralNames + "s" } tablename := toolkit.Sprintf("func (o *%s) TableName()string{"+ "return \"%s\"\n"+ "}", sm.Name, pluralNames) txts = append(txts, tablename) //--- new fieldBuilders := "" for _, field := range sm.Fields { notEmpty := !toolkit.IsNilOrEmpty(field.Default) if notEmpty { def := toolkit.Sprintf("%v", field.Default) if field.Type == "string" { def = "\"" + def + "\"" } fieldBuilders += toolkit.Sprintf("o.%s=%s", field.Name, def) + "\n" } } newfn := "func New{0}() *{0}{\n" + "o:=new({0})\n" + fieldBuilders + "return o" + "}" newfn = toolkit.Formatf(newfn, sm.Name) txts = append(txts, newfn) //--- find tpl := `func {0}Find(filter *dbox.Filter, fields, orders string, limit, skip int) dbox.ICursor { config := makeFindConfig(fields, orders, skip, limit) if filter != nil { config.Set("where", filter) } c, _ := DB().Find(new({0}), config) return c }` txts = append(txts, toolkit.Formatf(tpl, sm.Name)) //--- get tpl = `func {0}Get(filter *dbox.Filter, orders string, skip int) (emp *{0}, err error) { config := makeFindConfig("", orders, skip, 1) if filter != nil { config.Set("where", filter) } c, ecursor := DB().Find(new({0}), config) if ecursor != nil { return nil, ecursor } defer c.Close() emp = new({0}) err = c.Fetch(emp, 1, false) return emp, err }` txts = append(txts, toolkit.Formatf(tpl, sm.Name)) //-- method & get for _, method := range sm.Methods { txts = append(txts, sm.buildMethod( pkg, method.Type, method.Field)) } b := bufio.NewWriter(f) for _, txt := range txts { b.WriteString(txt + "\n") } e = b.Flush() if e != nil { return toolkit.Errorf("Failed to write %s.%s: %s", pkg.Name, sm.Name, e.Error()) } toolkit.RunCommand("/bin/sh", "-c", "gofmt -w "+filename) return nil }
func StringValue(v interface{}, db string) string { var ret string switch v.(type) { case string: t, e := time.Parse(time.RFC3339, toolkit.ToString(v)) if e != nil { ret = toolkit.Sprintf("%s", "'"+strings.Replace(v.(string), "'", "''", -1)+"'") } else { nullDateTime := time.Time{} if t.Equal(nullDateTime) { ret = "NULL" } else if strings.Contains(db, "oci8") { ret = "to_date('" + t.Format("02-01-2006 15:04:05") + "','DD-MM-YYYY hh24:mi:ss')" } else { ret = "'" + t.Format("2006-01-02 15:04:05") + "'" } } break case time.Time: t := v.(time.Time).UTC() if strings.Contains(db, "oci8") { ret = "to_date('" + t.Format("2006-01-02 15:04:05") + "','yyyy-mm-dd hh24:mi:ss')" } else { ret = "'" + t.Format("2006-01-02 15:04:05") + "'" } break case int, int32, int64, uint, uint32, uint64: ret = toolkit.Sprintf("%d", v.(int)) break case nil: ret = "" break case bool: // Please check for other database switch strings.ToLower(db) { case "mssql": if v.(bool) { ret = toolkit.Sprintf("%v", 1) } else { ret = toolkit.Sprintf("%v", 0) } break default: ret = toolkit.Sprintf("%v", v) } break case []string: for x, i := range v.([]string) { if x == 0 { ret += "'" + i + "'" } else { ret += ",'" + i + "'" } } break default: ret = toolkit.Sprintf("%v", v) break } return ret }
func RandomIDWithPrefix(prefix string) string { timestamp := time.Now().UnixNano() / int64(time.Millisecond) return toolkit.Sprintf("%s%d", prefix, timestamp) }
func (d *DataSourceController) ConnectToDataSourceDB(payload toolkit.M) (int, []toolkit.M, *colonycore.DataBrowser, error) { var hasLookup bool toolkit.Println("payload : ", payload) if payload.Has("haslookup") { hasLookup = payload.Get("haslookup").(bool) } _id := toolkit.ToString(payload.Get("browserid", "")) sort := payload.Get("sort") search := payload.Get("search") _ = search take := toolkit.ToInt(payload.Get("take", ""), toolkit.RoundingAuto) skip := toolkit.ToInt(payload.Get("skip", ""), toolkit.RoundingAuto) TblName := toolkit.M{} payload.Unset("browserid") //sorter = "" if sort != nil { tmsort, _ := toolkit.ToM(sort.([]interface{})[0]) fmt.Printf("====== sort %#v\n", tmsort["dir"]) if tmsort["dir"] == "asc" { sorter = tmsort["field"].(string) } else if tmsort["dir"] == "desc" { sorter = "-" + tmsort["field"].(string) } else if tmsort["dir"] == nil { sorter = " " } } else { sorter = " " } dataDS := new(colonycore.DataBrowser) err := colonycore.Get(dataDS, _id) if err != nil { return 0, nil, nil, err } dataConn := new(colonycore.Connection) err = colonycore.Get(dataConn, dataDS.ConnectionID) if err != nil { return 0, nil, nil, err } if err := d.checkIfDriverIsSupported(dataConn.Driver); err != nil { return 0, nil, nil, err } connection, err := helper.ConnectUsingDataConn(dataConn).Connect() if err != nil { return 0, nil, nil, err } if dataDS.QueryType == "" { TblName.Set("from", dataDS.TableNames) payload.Set("from", dataDS.TableNames) } else if dataDS.QueryType == "Dbox" { getTableName := toolkit.M{} toolkit.UnjsonFromString(dataDS.QueryText, &getTableName) payload.Set("from", getTableName.Get("from").(string)) if qSelect := getTableName.Get("select", "").(string); qSelect != "" { payload.Set("select", getTableName.Get("select").(string)) } } else if dataDS.QueryType == "SQL" { var QueryString string if dataConn.Driver == "mysql" || dataConn.Driver == "hive" { QueryString = " LIMIT " + toolkit.ToString(take) + " OFFSET " + toolkit.ToString(skip) } else if dataConn.Driver == "mssql" { QueryString = " OFFSET " + toolkit.ToString(skip) + " ROWS FETCH NEXT " + toolkit.ToString(take) + " ROWS ONLY " } else if dataConn.Driver == "postgres" { QueryString = " LIMIT " + toolkit.ToString(take) + " OFFSET " + toolkit.ToString(skip) } stringQuery := toolkit.Sprintf("%s %s", dataDS.QueryText, QueryString) payload.Set("freetext", stringQuery) // toolkit.Println(stringQuery) } qcount, _ := d.parseQuery(connection.NewQuery(), TblName) query, _ := d.parseQuery(connection.NewQuery() /*.Skip(skip).Take(take) .Order(sorter)*/, payload) var selectfield string for _, metadata := range dataDS.MetaData { tField := metadata.Field if payload.Has(tField) { selectfield = toolkit.ToString(tField) if toolkit.IsSlice(payload[tField]) { query = query.Where(dbox.In(tField, payload[tField].([]interface{})...)) qcount = qcount.Where(dbox.In(tField, payload[tField].([]interface{})...)) } else if !toolkit.IsNilOrEmpty(payload[tField]) { var hasPattern bool for _, val := range querypattern { if strings.Contains(toolkit.ToString(payload[tField]), val) { hasPattern = true } } if hasPattern { query = query.Where(dbox.ParseFilter(toolkit.ToString(tField), toolkit.ToString(payload[tField]), toolkit.ToString(metadata.DataType), "")) qcount = qcount.Where(dbox.ParseFilter(toolkit.ToString(tField), toolkit.ToString(payload[tField]), toolkit.ToString(metadata.DataType), "")) } else { switch toolkit.ToString(metadata.DataType) { case "int": query = query.Where(dbox.Eq(tField, toolkit.ToInt(payload[tField], toolkit.RoundingAuto))) qcount = qcount.Where(dbox.Eq(tField, toolkit.ToInt(payload[tField], toolkit.RoundingAuto))) case "float32": query = query.Where(dbox.Eq(tField, toolkit.ToFloat32(payload[tField], 2, toolkit.RoundingAuto))) qcount = qcount.Where(dbox.Eq(tField, toolkit.ToFloat32(payload[tField], 2, toolkit.RoundingAuto))) case "float64": query = query.Where(dbox.Eq(tField, toolkit.ToFloat64(payload[tField], 2, toolkit.RoundingAuto))) qcount = qcount.Where(dbox.Eq(tField, toolkit.ToFloat64(payload[tField], 2, toolkit.RoundingAuto))) default: query = query.Where(dbox.Contains(tField, toolkit.ToString(payload[tField]))) qcount = qcount.Where(dbox.Contains(tField, toolkit.ToString(payload[tField]))) } } } } } if hasLookup && selectfield != "" { if toolkit.HasMember(ds_flatfile, dataConn.Driver) { query = query.Select(selectfield) qcount = qcount.Select(selectfield) } else { query = query.Select(selectfield).Group(selectfield) qcount = qcount.Select(selectfield).Group(selectfield) } } ccount, err := qcount.Cursor(nil) if err != nil { return 0, nil, nil, err } defer ccount.Close() dcount := ccount.Count() cursor, err := query.Cursor(nil) if err != nil { return 0, nil, nil, err } defer cursor.Close() data := []toolkit.M{} cursor.Fetch(&data, 0, false) if err != nil { return 0, nil, nil, err } if hasLookup && selectfield != "" && !toolkit.HasMember(ds_rdbms, dataConn.Driver) && !toolkit.HasMember(ds_flatfile, dataConn.Driver) { dataMongo := []toolkit.M{} for _, val := range data { mVal, _ := toolkit.ToM(val.Get("_id")) dataMongo = append(dataMongo, mVal) } data = dataMongo } else if hasLookup && selectfield != "" && toolkit.HasMember(ds_flatfile, dataConn.Driver) { /*distinct value for flat file*/ dataFlat := []toolkit.M{} var existingVal = []string{""} for _, val := range data { valString := toolkit.ToString(val.Get(selectfield)) if !toolkit.HasMember(existingVal, valString) { dataFlat = append(dataFlat, val) existingVal = append(existingVal, valString) } } data = dataFlat } return dcount, data, dataDS, nil }
func (c *AccountController) ForgotPassword(k *knot.WebContext) interface{} { k.Config.OutputType = knot.OutputJson var e error result := "" d := struct { UserEmail string }{} e = k.GetPayload(&d) csr, e := c.Ctx.Find(new(UserModel), tk.M{}.Set("where", dbox.Eq("Email", d.UserEmail))) defer csr.Close() Users := []*UserModel{} e = csr.Fetch(&Users, 0, false) if e != nil { return ResultInfo(result, e) } if len(Users) > 0 { user := Users[0] var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") b := make([]rune, 5) for i := range b { b[i] = letterRunes[rand.Intn(len(letterRunes))] } newPassword := string(b) user.PasswordHash = GetMD5Hash(newPassword) e = c.Ctx.Delete(user) if e != nil { return ResultInfo(result, e) } e = c.Ctx.Save(user) if e != nil { return ResultInfo(result, e) } conf := gomail.NewDialer("smtp.office365.com", 587, "*****@*****.**", "B920Support") s, e := conf.Dial() if e != nil { return ResultInfo(result, e) } mailsubj := tk.Sprintf("%v", "[NOREPLY] Forgot Password SEC Apps") mailmsg := tk.Sprintf("Dear %v \n\n This is your new password : %v \n\nThis mail has been sent from forgot password feature and if you did not initiate this change, please contact your System Administrator", user.FullName, newPassword) m := gomail.NewMessage() m.SetHeader("From", "*****@*****.**") m.SetHeader("To", "*****@*****.**") m.SetHeader("Subject", mailsubj) m.SetBody("text/html", mailmsg) e = gomail.Send(s, m) m.Reset() if e != nil { return ResultInfo(result, e) } result = "OK" } else { result = "NOK" } return ResultInfo(result, e) }
func (d *DataBrowserController) hasAggr(ctx dbox.IConnection, data *colonycore.DataBrowser, conn *colonycore.Connection) (*colonycore.DataBrowser, error) { var fieldArr, aggrArr []string var indexAggr []map[int]string var query dbox.IQuery fieldAggr := toolkit.M{} for i, v := range data.MetaData { if v.Aggregate != "" { result := toolkit.M{} toolkit.UnjsonFromString(v.Aggregate, &result) cursor := []toolkit.M{} if data.QueryType == "" { aggregate, e := d.dboxAggr(data.TableNames, v.Field, ctx, query, result, fieldAggr, cursor, conn) if e != nil { return nil, e } v.Aggregate = toolkit.JsonString(aggregate) } else if data.QueryType == "SQL" { names := map[int]string{} fieldArr = append(fieldArr, v.Field) if _, sumOK := result["SUM"]; sumOK { aggrArr = append(aggrArr, "SUM("+v.Field+")") if len(result) > 1 { indexAggr = append(indexAggr, map[int]string{i: "sum"}) } else { names[i] = "sum" } } if _, avgOK := result["AVG"]; avgOK { aggrArr = append(aggrArr, "AVG("+v.Field+")") if len(result) > 1 { indexAggr = append(indexAggr, map[int]string{i: "avg"}) } else { names[i] = "avg" } } if _, maxOK := result["MAX"]; maxOK { aggrArr = append(aggrArr, "MAX("+v.Field+")") if len(result) > 1 { indexAggr = append(indexAggr, map[int]string{i: "max"}) } else { names[i] = "max" } } if _, minOK := result["MIN"]; minOK { aggrArr = append(aggrArr, "MIN("+v.Field+")") if len(result) > 1 { indexAggr = append(indexAggr, map[int]string{i: "min"}) } else { names[i] = "min" } } if _, minOK := result["COUNT"]; minOK { aggrArr = append(aggrArr, "COUNT("+v.Field+")") if len(result) > 1 { indexAggr = append(indexAggr, map[int]string{i: "count"}) } else { names[i] = "count" } } if len(result) > 1 { fieldAggr.Set(v.Field, indexAggr) } else { fieldAggr.Set(v.Field, names) } } else if data.QueryType == "Dbox" { getQuery := toolkit.M{} toolkit.UnjsonFromString(data.QueryText, &getQuery) aggregate, e := d.dboxAggr(getQuery.Get("from").(string), v.Field, ctx, query, result, fieldAggr, cursor, conn) if e != nil { return nil, e } v.Aggregate = toolkit.JsonString(aggregate) } } } if data.QueryType == "SQL" { // fieldString := strings.Join(fieldArr, ", ") aggrString := strings.Join(aggrArr, ", ") var queryText string r := regexp.MustCompile(`(([Ff][Rr][Oo][Mm])) (?P<from>([a-zA-Z][_a-zA-Z]+[_a-zA-Z0-1].*))`) temparray := r.FindStringSubmatch(data.QueryText) sqlpart := toolkit.M{} for i, val := range r.SubexpNames() { if val != "" { sqlpart.Set(val, temparray[i]) } } if fromOK := sqlpart.Get("from", "").(string); fromOK != "" { queryText = toolkit.Sprintf("select %s FROM %s", aggrString, sqlpart.Get("from", "").(string)) // toolkit.Printf("queryString:%v\n", queryString) } query = ctx.NewQuery().Command("freequery", toolkit.M{}. Set("syntax", queryText)) csr, e := query.Cursor(nil) if e != nil { return nil, e } defer csr.Close() cursor := []toolkit.M{} e = csr.Fetch(&cursor, 0, false) if e != nil { return nil, e } for f, m := range fieldAggr { aggrData := toolkit.M{} for _, aggs := range cursor { for k, agg := range aggs { if toolkit.SliceLen(m) > 0 { for _, vals := range m.([]map[int]string) { for key, val := range vals { if strings.Contains(k, f) && strings.Contains(k, data.MetaData[key].Field) && strings.Contains(k, val) { aggrData.Set(val, agg) data.MetaData[key].Aggregate = toolkit.JsonString(aggrData) } } } } else { for key, val := range m.(map[int]string) { if strings.Contains(k, f) && strings.Contains(k, data.MetaData[key].Field) && strings.Contains(k, val) { aggrData.Set(val, agg) data.MetaData[key].Aggregate = toolkit.JsonString(aggrData) // toolkit.Printf("k:%v f:%v key:%v val:%v agg:%v\n", k, f, key, val, data.MetaData[key].Aggregate) } } } } } } } return data, nil }
func (sm *StructModel) buildMethod( pkg *PackageModel, methodType string, fields string) string { fieldIds := strings.Split(fields, ",") fieldNameConcat := "" filter := "" filtersEq := []string{} parmNames := []string{} for _, fieldId := range fieldIds { fieldId = strings.Trim(fieldId, " ") field := sm.Field(fieldId) if field != nil { fieldNameConcat += field.Name parmNames = append(parmNames, toolkit.Sprintf("p%s %s", field.Name, field.Type)) fieldNameFn := strings.ToLower(field.Name) if fieldNameFn == "id" { fieldNameFn = "_id" } filtersEq = append(filtersEq, toolkit.Formatf(`dbox.Eq("{0}",{1})`, fieldNameFn, "p"+field.Name)) } } if len(filtersEq) == 1 { filter = filtersEq[0] } else if len(filtersEq) > 1 { filter = "dbox.And(" + strings.Join(filtersEq, ",") + ")" } var tpl string if methodType == MethodFind { tpl = ` func {0}FindBy{1}({2},fields string,limit,skip int) dbox.ICursor{ return {0}Find({3},orders,"",limit,skip) }` } else { tpl = ` func {0}GetBy{1}({2},orders string)(*{0},error){ return {0}Get({3},orders,0) } ` } return toolkit.Formatf(tpl, sm.Name, //--0 fieldNameConcat, //--1 strings.Join(parmNames, ","), //--2 filter) }