func (s *session) Auth(user string, auth []byte, salt []byte) bool { strs := strings.Split(user, "@") if len(strs) != 2 { log.Warnf("Invalid format for user: %s", user) return false } // Get user password. name := strs[0] host := strs[1] pwd, err := s.getPassword(name, host) if err != nil { if terror.ExecResultIsEmpty.Equal(err) { log.Errorf("User [%s] not exist %v", name, err) } else { log.Errorf("Get User [%s] password from SystemDB error %v", name, err) } return false } if len(pwd) != 0 && len(pwd) != 40 { log.Errorf("User [%s] password from SystemDB not like a sha1sum", name) return false } hpwd, err := util.DecodePassword(pwd) if err != nil { log.Errorf("Decode password string error %v", err) return false } checkAuth := util.CalcPassword(salt, hpwd) if !bytes.Equal(auth, checkAuth) { return false } variable.GetSessionVars(s).SetCurrentUser(user) return true }
func (c *Conf) CheckConfigUpdate() { if c.proxyConfig.Global.ConfAutoload == 1 { for { time.Sleep(time.Minute) log.Infof("CheckConfigUpdate checking") fileinfo, err := os.Stat(c.path) if err != nil { log.Errorf("CheckConfigUpdate error %s", err.Error()) continue } //config been modified if c.lastModifiedTime.Before(fileinfo.ModTime()) { log.Infof("CheckConfigUpdate config change and load new config") defaultProxyConfig := getDefaultProxyConfig() err = c.parseConfigFile(defaultProxyConfig) if err != nil { log.Errorf("CheckConfigUpdate error %s", err.Error()) continue } c.lastModifiedTime = fileinfo.ModTime() //goroutine need mutex lock c.mu.Lock() c.proxyConfig = defaultProxyConfig c.mu.Unlock() log.Infof("CheckConfigUpdate new config load success") } } } }
func (s *session) Execute(sql string) ([]ast.RecordSet, error) { if err := s.checkSchemaValidOrRollback(); err != nil { return nil, errors.Trace(err) } charset, collation := getCtxCharsetInfo(s) rawStmts, err := s.ParseSQL(sql, charset, collation) if err != nil { log.Warnf("compiling %s, error: %v", sql, err) return nil, errors.Trace(err) } var rs []ast.RecordSet ph := sessionctx.GetDomain(s).PerfSchema() for i, rst := range rawStmts { st, err1 := Compile(s, rst) if err1 != nil { log.Errorf("Syntax error: %s", sql) log.Errorf("Error occurs at %s.", err1) return nil, errors.Trace(err1) } id := variable.GetSessionVars(s).ConnectionID s.stmtState = ph.StartStatement(sql, id, perfschema.CallerNameSessionExecute, rawStmts[i]) r, err := runStmt(s, st) ph.EndStatement(s.stmtState) if err != nil { log.Warnf("session:%v, err:%v", s, err) return nil, errors.Trace(err) } if r != nil { rs = append(rs, r) } } return rs, nil }
// IsAutocommit checks if it is in the auto-commit mode. func (s *session) isAutocommit(ctx context.Context) bool { sessionVar := variable.GetSessionVars(ctx) autocommit := sessionVar.GetSystemVar("autocommit") if autocommit.IsNull() { if s.initing { return false } autocommitStr, err := s.GetGlobalSysVar(ctx, "autocommit") if err != nil { log.Errorf("Get global sys var error: %v", err) return false } autocommit.SetString(autocommitStr) err = sessionVar.SetSystemVar("autocommit", autocommit) if err != nil { log.Errorf("Set session sys var error: %v", err) } } autocommitStr := autocommit.GetString() if autocommitStr == "ON" || autocommitStr == "on" || autocommitStr == "1" { variable.GetSessionVars(ctx).SetStatusFlag(mysql.ServerStatusAutocommit, true) return true } variable.GetSessionVars(ctx).SetStatusFlag(mysql.ServerStatusAutocommit, false) return false }
func (c *Conf) CheckConfigUpdate(notifyChans ...chan bool) { if c.proxyConfig.Global.ConfAutoload == 1 { for { //TODO sleep config by the config file time.Sleep(time.Second * 10) //log.Infof("CheckConfigUpdate checking") fileinfo, err := os.Stat(c.path) if err != nil { log.Errorf("CheckConfigUpdate error %s", err.Error()) continue } if c.lastModifiedTime.Before(fileinfo.ModTime()) { log.Infof("CheckConfigUpdate config change and load new config") defaultProxyConfig := getDefaultProxyConfig() err = c.parseConfigFile(defaultProxyConfig) if err != nil { log.Errorf("CheckConfigUpdate error %s", err.Error()) continue } c.lastModifiedTime = fileinfo.ModTime() //goroutine need mutex lock c.mu.Lock() c.proxyConfig = defaultProxyConfig c.mu.Unlock() //modify the log level when update log.SetLevel(log.LogLevel(conf.proxyConfig.Global.LogLevel)) for _, notifyChan := range notifyChans { notifyChan <- true } } } } }
// ExecRestrictedSQL implements SQLHelper interface. // This is used for executing some restricted sql statements. func (s *session) ExecRestrictedSQL(ctx context.Context, sql string) (rset.Recordset, error) { if ctx.Value(&sqlexec.RestrictedSQLExecutorKeyType{}) != nil { // We do not support run this function concurrently. // TODO: Maybe we should remove this restriction latter. return nil, errors.New("Should not call ExecRestrictedSQL concurrently.") } statements, err := Compile(ctx, sql) if err != nil { log.Errorf("Compile %s with error: %v", sql, err) return nil, errors.Trace(err) } if len(statements) != 1 { log.Errorf("ExecRestrictedSQL only executes one statement. Too many/few statement in %s", sql) return nil, errors.New("Wrong number of statement.") } st := statements[0] // Check statement for some restriction // For example only support DML on system meta table. // TODO: Add more restrictions. log.Debugf("Executing %s [%s]", st.OriginText(), sql) ctx.SetValue(&sqlexec.RestrictedSQLExecutorKeyType{}, true) defer ctx.ClearValue(&sqlexec.RestrictedSQLExecutorKeyType{}) rs, err := st.Exec(ctx) return rs, errors.Trace(err) }
func (s *session) Execute(sql string) ([]rset.Recordset, error) { statements, err := Compile(s, sql) if err != nil { log.Errorf("Syntax error: %s", sql) log.Errorf("Error occurs at %s.", err) return nil, errors.Trace(err) } var rs []rset.Recordset for _, st := range statements { r, err := runStmt(s, st) if err != nil { log.Warnf("session:%v, err:%v", s, err) return nil, errors.Trace(err) } // Record executed query if isPreparedStmt(st) { ps := st.(*stmts.PreparedStmt) s.history.add(ps.ID, st) } else { s.history.add(0, st) } if r != nil { rs = append(rs, r) } } return rs, nil }
// pathMap is the inner paths func (b *planBuilder) nextPath(pathMap map[*ast.TableName]*joinPath, equivMap map[*Equiv]bool, prevs []*joinPath) *joinPath { cans := b.candidatePaths(pathMap) if len(cans) == 0 { for _, v := range pathMap { log.Errorf("index dep %v, prevs %v\n", v.idxDeps, len(prevs)) } log.Errorf("%v\n", b.obj) panic(b.obj) } indexPath := b.nextIndexPath(cans) if indexPath != nil { return indexPath } var cansWithEquiv []*joinPath for _, can := range cans { if len(can.equivs) > 0 { cansWithEquiv = append(cansWithEquiv, can) } } if len(cansWithEquiv) > 0 { return b.pickPath(cansWithEquiv) } return b.pickPath(cans) }
// ExecRestrictedSQL implements RestrictedSQLExecutor interface. // This is used for executing some restricted sql statements, usually executed during a normal statement execution. // Unlike normal Exec, it doesn't reset statement status, doesn't commit or rollback the current transaction // and doesn't write binlog. func (s *session) ExecRestrictedSQL(ctx context.Context, sql string) (ast.RecordSet, error) { if err := s.checkSchemaValidOrRollback(); err != nil { return nil, errors.Trace(err) } charset, collation := s.sessionVars.GetCharsetInfo() rawStmts, err := s.ParseSQL(sql, charset, collation) if err != nil { return nil, errors.Trace(err) } if len(rawStmts) != 1 { log.Errorf("ExecRestrictedSQL only executes one statement. Too many/few statement in %s", sql) return nil, errors.New("wrong number of statement") } // Some execution is done in compile stage, so we reset it before compile. st, err := Compile(s, rawStmts[0]) if err != nil { log.Errorf("Compile %s with error: %v", sql, err) return nil, errors.Trace(err) } // Check statement for some restrictions. // For example only support DML on system meta table. // TODO: Add more restrictions. log.Debugf("Executing %s [%s]", st.OriginText(), sql) s.sessionVars.InRestrictedSQL = true rs, err := st.Exec(ctx) s.sessionVars.InRestrictedSQL = false return rs, errors.Trace(err) }
// LoadCommonGlobalVariableIfNeeded loads and applies commonly used global variables for the session // right before creating a transaction for the first time. func (s *session) loadCommonGlobalVariablesIfNeeded() error { vars := s.sessionVars if vars.CommonGlobalLoaded { return nil } if s.Value(context.Initing) != nil { // When running bootstrap or upgrade, we should not access global storage. return nil } // Set the variable to true to prevent cyclic recursive call. vars.CommonGlobalLoaded = true rs, err := s.ExecRestrictedSQL(s, loadCommonGlobalVarsSQL) if err != nil { vars.CommonGlobalLoaded = false log.Errorf("Failed to load common global variables.") return errors.Trace(err) } for { row, err1 := rs.Next() if err1 != nil { vars.CommonGlobalLoaded = false log.Errorf("Failed to load common global variables.") return errors.Trace(err1) } if row == nil { break } varName := row.Data[0].GetString() if d := varsutil.GetSystemVar(vars, varName); d.IsNull() { varsutil.SetSystemVar(s.sessionVars, varName, row.Data[1]) } } vars.CommonGlobalLoaded = true return nil }
func (s *Scan) nextBatch() int { startKey := s.nextStartKey if startKey == nil { startKey = s.StartRow } // Notice: ignore error here. // TODO: add error check, now only add a log. rs, err := s.getData(startKey, 0) if err != nil { log.Errorf("scan next batch failed - [startKey=%q], %v", startKey, errors.ErrorStack(err)) } // Current region get 0 data, try switch to next region. if len(rs) == 0 && len(s.nextStartKey) > 0 { // TODO: add error check, now only add a log. rs, err = s.getData(s.nextStartKey, 0) if err != nil { log.Errorf("scan next batch failed - [startKey=%q], %v", s.nextStartKey, errors.ErrorStack(err)) } } s.cache = rs return len(s.cache) }
func (cc *clientConn) Run() { defer func() { r := recover() if r != nil { const size = 4096 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] log.Errorf("lastCmd %s, %v, %s", cc.lastCmd, r, buf) } cc.Close() }() for { cc.alloc.Reset() data, err := cc.readPacket() if err != nil { if terror.ErrorNotEqual(err, io.EOF) { log.Error(err) } return } if err := cc.dispatch(data); err != nil { if terror.ErrorEqual(err, io.EOF) { return } log.Errorf("dispatch error %s, %s", errors.ErrorStack(err), cc) log.Errorf("cmd: %s", string(data[1:])) cc.writeError(err) } cc.pkg.sequence = 0 } }
func (s *Server) onConn(c net.Conn) { conn, err := s.newConn(c) if err != nil { log.Errorf("newConn error %s", errors.ErrorStack(err)) return } if err := conn.handshake(); err != nil { log.Errorf("handshake error %s", errors.ErrorStack(err)) c.Close() return } conn.ctx, err = s.driver.OpenCtx(conn.capability, uint8(conn.collation), conn.dbname) if err != nil { log.Errorf("open ctx error %s", errors.ErrorStack(err)) c.Close() return } defer func() { log.Infof("close %s", conn) }() s.rwlock.Lock() s.clients[conn.connectionID] = conn s.rwlock.Unlock() conn.Run() }
func (c *client) parseRegion(rr *ResultRow) *RegionInfo { if regionInfoCol, ok := rr.Columns["info:regioninfo"]; ok { offset := strings.Index(string(regionInfoCol.Value), "PBUF") + 4 regionInfoBytes := regionInfoCol.Value[offset:] var info proto.RegionInfo err := pb.Unmarshal(regionInfoBytes, &info) if err != nil { log.Errorf("Unable to parse region location: %#v", err) } ret := &RegionInfo{ StartKey: info.GetStartKey(), EndKey: info.GetEndKey(), Name: bytes.NewBuffer(rr.Row).String(), TableNamespace: string(info.GetTableName().GetNamespace()), TableName: string(info.GetTableName().GetQualifier()), Offline: info.GetOffline(), Split: info.GetSplit(), } if v, ok := rr.Columns["info:server"]; ok { ret.Server = string(v.Value) } return ret } log.Errorf("Unable to parse region location (no regioninfo column): %#v", rr) return nil }
func (s *Server) handleMigrateState(slotIndex int, keys ...[]byte) error { shd := s.slots[slotIndex] if shd.slotInfo.State.Status != models.SLOT_STATUS_MIGRATE { return nil } if shd.migrateFrom == nil { log.Fatalf("migrateFrom not exist %+v", shd) } if shd.dst.Master() == shd.migrateFrom.Master() { log.Fatalf("the same migrate src and dst, %+v", shd) } redisConn, err := s.pools.GetConn(shd.migrateFrom.Master()) if err != nil { return errors.Trace(err) } defer s.pools.PutConn(redisConn) err = writeMigrateKeyCmd(redisConn, shd.dst.Master(), MigrateKeyTimeoutMs, keys...) if err != nil { redisConn.Close() log.Errorf("migrate key %s error, from %s to %s, err:%v", string(keys[0]), shd.migrateFrom.Master(), shd.dst.Master(), err) return errors.Trace(err) } redisReader := redisConn.BufioReader() // handle migrate result for i := 0; i < len(keys); i++ { resp, err := parser.Parse(redisReader) if err != nil { log.Errorf("migrate key %s error, from %s to %s, err:%v", string(keys[i]), shd.migrateFrom.Master(), shd.dst.Master(), err) redisConn.Close() return errors.Trace(err) } result, err := resp.Bytes() log.Debug("migrate", string(keys[0]), "from", shd.migrateFrom.Master(), "to", shd.dst.Master(), string(result)) if resp.Type == parser.ErrorResp { redisConn.Close() log.Error(string(keys[0]), string(resp.Raw), "migrateFrom", shd.migrateFrom.Master()) return errors.New(string(resp.Raw)) } } s.counter.Add("Migrate", int64(len(keys))) return nil }
// some comments func init() { log.Errorf("create conn") var err error c, err = hbase.NewClient([]string{"localhost"}, "/hbase") if err != nil { log.Fatal(err) } log.Errorf("create conn done") }
// some comments func init() { rand.Seed(time.Now().UnixNano()) log.Errorf("create conn") var err error c, err = hbase.NewClient([]string{"localhost"}, "/hbase") if err != nil { log.Fatal(err) } log.Errorf("create conn done") }
// Reload reloads InfoSchema. func (do *Domain) Reload() error { // for test if do.SchemaValidity.MockReloadFailed { err := kv.RunInNewTxn(do.store, false, func(txn kv.Transaction) error { do.SchemaValidity.setLastFailedTS(txn.StartTS()) return nil }) if err != nil { log.Errorf("mock reload failed err:%v", err) return errors.Trace(err) } return errors.New("mock reload failed") } // lock here for only once at same time. do.m.Lock() defer do.m.Unlock() timeout := do.ddl.GetLease() / 2 if timeout < defaultMinReloadTimeout { timeout = defaultMinReloadTimeout } exit := int32(0) done := make(chan error, 1) go func() { var err error for { err = kv.RunInNewTxn(do.store, false, do.loadInfoSchema) if err == nil { atomic.StoreInt64(&do.lastLeaseTS, time.Now().UnixNano()) break } log.Errorf("[ddl] load schema err %v, retry again", errors.ErrorStack(err)) if atomic.LoadInt32(&exit) == 1 { return } // TODO: use a backoff algorithm. time.Sleep(500 * time.Millisecond) continue } done <- err }() select { case err := <-done: return errors.Trace(err) case <-time.After(timeout): atomic.StoreInt32(&exit, 1) return ErrLoadSchemaTimeOut } }
func (h *Handler) parseFullResyncReply(resp string) (string, int64) { seps := strings.Split(resp, " ") if len(seps) != 3 || len(seps[1]) != 40 { log.Errorf("master %s returns invalid fullresync format %s", h.masterAddr, resp) } masterRunID := seps[1] initailSyncOffset, err := strconv.ParseInt(seps[2], 10, 64) if err != nil { log.Errorf("master %s returns invalid fullresync offset, err: %v", h.masterAddr, err) initailSyncOffset = -1 } return masterRunID, initailSyncOffset }
func V1POSTWorkspaceHandler(ctx *macaron.Context, workspace WorkspacePOSTJSON) (int, []byte) { w := models.Workspace{} if id, err := w.Create(workspace.Name, workspace.Description); err != nil { log.Errorf("[vessel] Create workspace error: %s", err.Error()) result, _ := json.Marshal(map[string]string{"status": "Error", "message": err.Error()}) return http.StatusBadRequest, result } else { log.Errorf("[vessel] Create workspace successfully, id is: %d", id) result, _ := json.Marshal(map[string]int64{"id": id}) return http.StatusOK, result } }
// Eval implements the Expression Eval interface. func (f *FunctionConvert) Eval(ctx context.Context, args map[interface{}]interface{}) (interface{}, error) { value, err := f.Expr.Eval(ctx, args) if err != nil { return nil, err } // Casting nil to any type returns nil if value == nil { return nil, nil } str, ok := value.(string) if !ok { return nil, nil } if strings.ToLower(f.Charset) == "ascii" { return value, nil } else if strings.ToLower(f.Charset) == "utf8mb4" { return value, nil } encoding, _ := Lookup(f.Charset) if encoding == nil { return nil, fmt.Errorf("unknown encoding: %s", f.Charset) } target, _, err := transform.String(encoding.NewDecoder(), str) if err != nil { log.Errorf("Convert %s to %s with error: %v", str, f.Charset, err) return nil, errors.Trace(err) } return target, nil }
// See https://dev.mysql.com/doc/refman/5.7/en/cast-functions.html#function_convert func builtinConvert(args []types.Datum, _ context.Context) (d types.Datum, err error) { // Casting nil to any type returns nil if args[0].Kind() != types.KindString { return d, nil } str := args[0].GetString() Charset := args[1].GetString() if strings.ToLower(Charset) == "ascii" { d.SetString(str) return d, nil } else if strings.ToLower(Charset) == "utf8mb4" { d.SetString(str) return d, nil } encoding, _ := charset.Lookup(Charset) if encoding == nil { return d, errors.Errorf("unknown encoding: %s", Charset) } target, _, err := transform.String(encoding.NewDecoder(), str) if err != nil { log.Errorf("Convert %s to %s with error: %v", str, Charset, err) return d, errors.Trace(err) } d.SetString(target) return d, nil }
func (e *Evaluator) funcConvert(f *ast.FuncConvertExpr) bool { value := f.Expr.GetValue() // Casting nil to any type returns nil if value == nil { f.SetValue(nil) return true } str, ok := value.(string) if !ok { return true } if strings.ToLower(f.Charset) == "ascii" { f.SetValue(value) return true } else if strings.ToLower(f.Charset) == "utf8mb4" { f.SetValue(value) return true } encoding, _ := charset.Lookup(f.Charset) if encoding == nil { e.err = ErrInvalidOperation.Gen("unknown encoding: %s", f.Charset) return false } target, _, err := transform.String(encoding.NewDecoder(), str) if err != nil { log.Errorf("Convert %s to %s with error: %v", str, f.Charset, err) e.err = errors.Trace(err) return false } f.SetValue(target) return true }
func (s *session) doCommit() error { if s.txn == nil { return nil } defer func() { s.txn = nil s.sessionVars.SetStatusFlag(mysql.ServerStatusInTrans, false) }() if binloginfo.PumpClient != nil { prewriteValue := binloginfo.GetPrewriteValue(s, false) if prewriteValue != nil { prewriteData, err := prewriteValue.Marshal() if err != nil { return errors.Trace(err) } bin := &binlog.Binlog{ Tp: binlog.BinlogType_Prewrite, PrewriteValue: prewriteData, } s.txn.SetOption(kv.BinlogData, bin) } } if err := s.checkSchemaValid(); err != nil { err1 := s.txn.Rollback() if err1 != nil { log.Errorf("rollback txn failed, err:%v", err1) } return errors.Trace(err) } if err := s.txn.Commit(); err != nil { return errors.Trace(err) } return nil }
// IsAutocommit checks if it is in the auto-commit mode. func (s *session) isAutocommit(ctx context.Context) bool { if ctx.Value(&sqlexec.RestrictedSQLExecutorKeyType{}) != nil { return false } autocommit, ok := variable.GetSessionVars(ctx).Systems["autocommit"] if !ok { if s.initing { return false } var err error autocommit, err = s.GetGlobalSysVar(ctx, "autocommit") if err != nil { log.Errorf("Get global sys var error: %v", err) return false } variable.GetSessionVars(ctx).Systems["autocommit"] = autocommit ok = true } if ok && (autocommit == "ON" || autocommit == "on" || autocommit == "1") { variable.GetSessionVars(ctx).SetStatusFlag(mysql.ServerStatusAutocommit, true) return true } variable.GetSessionVars(ctx).SetStatusFlag(mysql.ServerStatusAutocommit, false) return false }
// onWorker is for async online schema change, it will try to become the owner first, // then wait or pull the job queue to handle a schema change job. func (d *ddl) onWorker() { defer d.wait.Done() // we use 4 * lease time to check owner's timeout, so here, we will update owner's status // every 2 * lease time, if lease is 0, we will use default 10s. checkTime := chooseLeaseTime(2*d.lease, 10*time.Second) ticker := time.NewTicker(checkTime) defer ticker.Stop() for { select { case <-ticker.C: log.Debugf("wait %s to check DDL status again", checkTime) case <-d.jobCh: case <-d.quitCh: return } err := d.handleJobQueue() if err != nil { log.Errorf("handle job err %v", errors.ErrorStack(err)) } } }
// RunInNewTxn will run the f in a new transaction environment. func RunInNewTxn(store Storage, retryable bool, f func(txn Transaction) error) error { for { txn, err := store.Begin() if err != nil { log.Errorf("RunInNewTxn error - %v", err) return errors.Trace(err) } err = f(txn) if retryable && IsRetryableError(err) { log.Warnf("Retry txn %v", txn) txn.Rollback() continue } if err != nil { return errors.Trace(err) } err = txn.Commit() if retryable && IsRetryableError(err) { log.Warnf("Retry txn %v", txn) txn.Rollback() continue } if err != nil { return errors.Trace(err) } break } return nil }
func (c *txnCommitter) cleanupSingleRegion(bo *Backoffer, batch batchKeys) error { req := &pb.Request{ Type: pb.MessageType_CmdBatchRollback, CmdBatchRollbackReq: &pb.CmdBatchRollbackRequest{ Keys: batch.keys, StartVersion: c.startTS, }, } resp, err := c.store.SendKVReq(bo, req, batch.region) if err != nil { return errors.Trace(err) } if regionErr := resp.GetRegionError(); regionErr != nil { err = bo.Backoff(boRegionMiss, errors.New(regionErr.String())) if err != nil { return errors.Trace(err) } err = c.cleanupKeys(bo, batch.keys) return errors.Trace(err) } if keyErr := resp.GetCmdBatchRollbackResp().GetError(); keyErr != nil { err = errors.Errorf("cleanup failed: %s", keyErr) log.Errorf("txn failed cleanup key: %v, tid: %d", err, c.startTS) return errors.Trace(err) } return nil }
func (s *Store) Reset() error { if err := s.acquire(); err != nil { return err } defer s.release() log.Infof("store is reseting...") for i := s.splist.Len(); i != 0; i-- { v := s.splist.Remove(s.splist.Front()).(*StoreSnapshot) v.Close() } for i := s.itlist.Len(); i != 0; i-- { v := s.itlist.Remove(s.itlist.Front()).(*storeIterator) v.Close() } if err := s.db.Clear(); err != nil { s.db.Close() s.db = nil log.Errorf("store reset failed - %s", err) return err } else { s.serial++ log.Infof("store is reset") return nil } }
// Less implements sort.Interface Less interface. func (t *orderByTable) Less(i, j int) bool { for index, asc := range t.Ascs { v1 := t.Rows[i].Key[index] v2 := t.Rows[j].Key[index] ret, err := types.Compare(v1, v2) if err != nil { // we just have to log this error and skip it. // TODO: record this error and handle it out later. log.Errorf("compare %v %v err %v", v1, v2, err) } if !asc { ret = -ret } if ret < 0 { return true } else if ret > 0 { return false } } return false }