func (x singleWriter) Write(p []byte) (int, error) { w := x.w if w.seq != x.seq { return 0, errors.New("leveldb/journal: stale writer") } if w.err != nil { return 0, w.err } n0 := len(p) for len(p) > 0 { // Write a block, if it is full. if w.j == blockSize { w.fillHeader(false) w.writeBlock() if w.err != nil { return 0, w.err } w.first = false } // Copy bytes into the buffer. n := copy(w.buf[w.j:], p) w.j += n p = p[n:] } return n0, nil }
func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) { if r.cache != nil { var err error ch := r.cache.Get(bh.offset, func() (charge int, value interface{}) { if !fillCache { return 0, nil } var b *block b, err = r.readBlock(bh, verifyChecksum) if err != nil { return 0, nil } return cap(b.data), b }) if ch != nil { b, ok := ch.Value().(*block) if !ok { ch.Release() return nil, nil, errors.New("leveldb/table: inconsistent block type") } return b, ch, err } else if err != nil { return nil, nil, err } } b, err := r.readBlock(bh, verifyChecksum) return b, b, err }
func (x *singleReader) ReadByte() (byte, error) { r := x.r if r.seq != x.seq { return 0, errors.New("leveldb/journal: stale reader") } if x.err != nil { return 0, x.err } if r.err != nil { return 0, r.err } for r.i == r.j { if r.last { return 0, io.EOF } x.err = r.nextChunk(false) if x.err != nil { if x.err == errSkip { x.err = io.ErrUnexpectedEOF } return 0, x.err } } c := r.buf[r.i] r.i++ return c, nil }
func upgradeViaRest() error { cfg, err := config.Load(locations[locConfigFile], protocol.LocalDeviceID) if err != nil { return err } target := cfg.GUI().URL() r, _ := http.NewRequest("POST", target+"/rest/system/upgrade", nil) r.Header.Set("X-API-Key", cfg.GUI().APIKey()) tr := &http.Transport{ Dial: dialer.Dial, Proxy: http.ProxyFromEnvironment, TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, } client := &http.Client{ Transport: tr, Timeout: 60 * time.Second, } resp, err := client.Do(r) if err != nil { return err } if resp.StatusCode != 200 { bs, err := ioutil.ReadAll(resp.Body) defer resp.Body.Close() if err != nil { return err } return errors.New(string(bs)) } return err }
func (x *singleReader) Read(p []byte) (int, error) { r := x.r if r.seq != x.seq { return 0, errors.New("leveldb/journal: stale reader") } if x.err != nil { return 0, x.err } if r.err != nil { return 0, r.err } for r.i == r.j { if r.last { return 0, io.EOF } x.err = r.nextChunk(false) if x.err != nil { if x.err == errSkip { x.err = io.ErrUnexpectedEOF } return 0, x.err } } n := copy(p, r.buf[r.i:r.j]) r.i += n return n, nil }
func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) { if r.cache != nil { var ( err error ch *cache.Handle ) if fillCache { ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) { var b *filterBlock b, err = r.readFilterBlock(bh) if err != nil { return 0, nil } return cap(b.data), b }) } else { ch = r.cache.Get(bh.offset, nil) } if ch != nil { b, ok := ch.Value().(*filterBlock) if !ok { ch.Release() return nil, nil, errors.New("leveldb/table: inconsistent block type") } return b, ch, err } else if err != nil { return nil, nil, err } } b, err := r.readFilterBlock(bh) return b, b, err }
func GetParentInfo(node string) (node_info.NodeInfo, tree_lib.TreeError) { var ( err tree_lib.TreeError pname string ) err.From = tree_lib.FROM_GET_PARENT_INFO err = ForEach(DB_NODE, func(key []byte, val []byte) error { n := node_info.NodeInfo{} err := ffjson.Unmarshal(val, &n) if err != nil { return err } if _, ok := tree_lib.ArrayContains(n.Childs, node); ok { pname = n.Name return errors.New("Just Err for break") } return nil }) if len(pname) == 0 { return node_info.NodeInfo{}, tree_lib.TreeError{} } // Node relations first element should be parent node return GetNodeInfo(pname) }
// Close finishes the current journal and closes the writer. func (w *Writer) Close() error { w.seq++ w.writePending() if w.err != nil { return w.err } w.err = errors.New("leveldb/journal: closed Writer") return nil }
// UpdateConfig writes the updated configuration data to the storage location. func UpdateConfig(conf Configuration) (err error) { cfgDir, _ := getConfigDir() d, err := json.Marshal(&conf) if err != nil { return errors.New("Could not save new json config") } ioutil.WriteFile(cfgDir+"/1Password.json", d, 0664) return nil }
func del(k string) error { if len(strings.TrimSpace(k)) == 0 { return errors.New("uid was nil") } _, err := redis.Int(rds.Do("DEL", k)) if err != nil { return err } return nil }
func (ms *MirageStorage) updateSubdomainMap(subdomainMap map[string]int) error { //dump.Dump(subdomainMap) subdomainData, err := json.Marshal(subdomainMap) err = ms.Set("subdomain-map", subdomainData) if err != nil { return errors.New(fmt.Sprintf("failed to update subdomain-map: %s", err.Error())) } return nil }
func getDataNodeFromId(topo *Topology, id string) (foundDn *DataNode) { nid := NodeId(id) topo.WalkDataNode(func(dn *DataNode) (e error) { if dn.Id() == nid { foundDn = dn e = errors.New("Found.") } return }) return }
func nodeCreate(ukey string, node *Node) error { v, err := Incr.NodeInrc() if err != nil { return err } node.Id = v json, err := json.Marshal(node) if err != nil { return err } key := ukey + ":" + strconv.FormatInt(node.HubId, 10) + ":" + strconv.FormatInt(node.Id, 10) _, err = rds.Do("SET", key, json) if err != nil { return err } //验证nodetype if NodeTypeEnum.GetName(node.Type-1) == "" { return errors.New("node type error") } //初始化控制器 if node.Type == NodeTypeEnum.Switcher { err := Controller.BeginSwitcher(ukey, node.HubId, node.Id) if err != nil { return errors.New("init error") } } else if node.Type == NodeTypeEnum.GenControl { err := Controller.BeginGenControl(ukey, node.HubId, node.Id) if err != nil { return errors.New("init error") } } else if node.Type == NodeTypeEnum.RangeControl { err := Controller.BeginRangeControl(ukey, node.HubId, node.Id) if err != nil { return errors.New("init error") } } return nil }
func (ms *MirageStorage) RemoveFromSubdomainMap(subdomain string) error { subdomainMap, err := ms.getSubdomainMap() if err != nil { return errors.New(fmt.Sprintf("failed to get subdomain-map: %s", err.Error())) } beforeLen := len(subdomainMap) delete(subdomainMap, subdomain) if beforeLen == len(subdomainMap) { return nil } return ms.updateSubdomainMap(subdomainMap) }
func nodeStartWith(k string) ([]*Node, error) { data, err := redis.Strings(rds.Do("KEYSSTART", k)) if err != nil { return nil, err } if len(data) <= 0 { return nil, errors.New("no data") } var ndata []*Node for _, v := range data { o, _ := redis.String(rds.Do("GET", v)) h := &Node{} json.Unmarshal([]byte(o), &h) ndata = append(ndata, h) } return ndata, nil }
func (ms *MirageStorage) AddToSubdomainMap(subdomain string) error { subdomainMap, err := ms.getSubdomainMap() if err != nil { return errors.New(fmt.Sprintf("failed to get subdomain-map: %s", err.Error())) } beforeLen := len(subdomainMap) subdomainMap[subdomain] = 1 // meanless value if beforeLen == len(subdomainMap) { // need not to update fmt.Println("subdomainMap length is not changed!") return nil } return ms.updateSubdomainMap(subdomainMap) }
func loadConfig(cfgFile string) (*config.Wrapper, string, error) { info, err := os.Stat(cfgFile) if err != nil { return nil, "", err } if !info.Mode().IsRegular() { return nil, "", errors.New("configuration is not a file") } cfg, err := config.Load(cfgFile, myID) if err != nil { return nil, "", err } myCfg := cfg.Devices()[myID] myName := myCfg.Name if myName == "" { myName, _ = os.Hostname() } return cfg, myName, nil }
func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter { bi := &blockIter{ tr: r, block: b, blockReleaser: bReleaser, // Valid key should never be nil. key: make([]byte, 0), dir: dirSOI, riStart: 0, riLimit: b.restartsLen, offsetStart: 0, offsetRealStart: 0, offsetLimit: b.restartsOffset, } if slice != nil { if slice.Start != nil { if bi.Seek(slice.Start) { bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset) bi.offsetStart = b.restartOffset(bi.riStart) bi.offsetRealStart = bi.prevOffset } else { bi.riStart = b.restartsLen bi.offsetStart = b.restartsOffset bi.offsetRealStart = b.restartsOffset } } if slice.Limit != nil { if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) { bi.offsetLimit = bi.prevOffset bi.riLimit = bi.restartIndex + 1 } } bi.reset() if bi.offsetStart > bi.offsetLimit { bi.sErr(errors.New("leveldb/table: invalid slice range")) } } return bi }
// Key -> value ..... node_name -> node1,node2,node3 // []byte -> []string{}.Join(",") // First element of string array should be parent node func SetRelations(node string) (err tree_lib.TreeError) { err.From = tree_lib.FROM_SET_RELATIONS parent_name := "" inf := node_info.NodeInfo{} inf, err = GetNodeInfo(node) if !err.IsNull() { return } rels := inf.Childs // Getting parent node err = ForEach(DB_NODE, func(key []byte, val []byte) error { nf := node_info.NodeInfo{} err := ffjson.Unmarshal(val, &nf) if err != nil { return err } if _, ok := tree_lib.ArrayContains(nf.Childs, node); ok { parent_name = nf.Name return errors.New("") // Just ending the ForEach with empty error } return nil }) if !err.IsNull() { return } if len(parent_name) != 0 { rels = append(append([]string{}, parent_name), rels...) } err = Set(DB_RELATIONS, []byte(node), []byte(strings.Join(rels, ","))) return }
// Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package leveldb import ( "sync" "time" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" ) var ( errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting") ) type cStat struct { duration time.Duration read int64 write int64 } func (p *cStat) add(n *cStatStaging) { p.duration += n.duration p.read += n.read p.write += n.write } func (p *cStat) get() (duration time.Duration, read, write int64) {
const ( batchHdrLen = 8 + 4 batchGrowRec = 3000 ) type BatchReplay interface { Put(key, value []byte) Delete(key []byte) ======= "errors" "github.com/syndtr/goleveldb/leveldb/memdb" ) var ( errBatchTooShort = errors.New("leveldb: batch is too short") errBatchBadRecord = errors.New("leveldb: bad record in batch") ) const kBatchHdrLen = 8 + 4 type batchReplay interface { put(key, value []byte, seq uint64) delete(key []byte, seq uint64) >>>>>>> 9bca75c48d6c31becfbb127702b425e7226052e3 } // Batch is a write batch. type Batch struct { <<<<<<< HEAD data []byte
// NewReader creates a new initialized table reader for the file. // The fi, cache and bpool is optional and can be nil. // // The returned table reader instance is goroutine-safe. func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.CacheGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) { if f == nil { return nil, errors.New("leveldb/table: nil file") } r := &Reader{ fd: fd, reader: f, cache: cache, bpool: bpool, o: o, cmp: o.GetComparer(), verifyChecksum: o.GetStrict(opt.StrictBlockChecksum), } if size < footerLen { r.err = r.newErrCorrupted(0, size, "table", "too small") return r, nil } footerPos := size - footerLen var footer [footerLen]byte if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF { return nil, err } if string(footer[footerLen-len(magic):footerLen]) != magic { r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number") return r, nil } var n int // Decode the metaindex block handle. r.metaBH, n = decodeBlockHandle(footer[:]) if n == 0 { r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle") return r, nil } // Decode the index block handle. r.indexBH, n = decodeBlockHandle(footer[n:]) if n == 0 { r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle") return r, nil } // Read metaindex block. metaBlock, err := r.readBlock(r.metaBH, true) if err != nil { if errors.IsCorrupted(err) { r.err = err return r, nil } else { return nil, err } } // Set data end. r.dataEnd = int64(r.metaBH.offset) // Read metaindex. metaIter := r.newBlockIter(metaBlock, nil, nil, true) for metaIter.Next() { key := string(metaIter.Key()) if !strings.HasPrefix(key, "filter.") { continue } fn := key[7:] if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn { r.filter = f0 } else { for _, f0 := range o.GetAltFilters() { if f0.Name() == fn { r.filter = f0 break } } } if r.filter != nil { filterBH, n := decodeBlockHandle(metaIter.Value()) if n == 0 { continue } r.filterBH = filterBH // Update data end. r.dataEnd = int64(filterBH.offset) break } } metaIter.Release() metaBlock.Release() // Cache index and filter block locally, since we don't have global cache. if cache == nil { r.indexBlock, err = r.readBlock(r.indexBH, true) if err != nil { if errors.IsCorrupted(err) { r.err = err return r, nil } else { return nil, err } } if r.filter != nil { r.filterBlock, err = r.readFilterBlock(r.filterBH) if err != nil { if !errors.IsCorrupted(err) { return nil, err } // Don't use filter then. r.filter = nil } } } return r, nil }
func main() { app := &App{ cli: cli.NewApp(), } app.configure() app.addCmd(cli.Command{ Name: "apply", Aliases: []string{"a"}, Usage: "register the requred builds", Flags: []cli.Flag{ cli.StringFlag{ Name: "build-file, b", Value: "builds.yml", Usage: "specify build file to apply", }, }, Action: func(c *cli.Context) error { b := c.String("build-file") res := make(map[string]map[string]interface{}) data, err := ioutil.ReadFile(b) app.handleErr(err) err = yaml.Unmarshal([]byte(data), &res) app.handleErr(err) for key, config := range res { config["name"] = key fmt.Printf("creating job definition %s\n", key) err := app.post("/api/users/"+app.User+"/job_definitions", config, nil) app.handleErr(err) } return nil }, }) //app.addCmd(cli.Command{ // Name: "sandbox", // Usage: "build up your test docker image to play around with", // Flags: []cli.Flag{ // cli.StringFlag{Name: "build-file, b", Value: "builds.yml", Usage: "specify build file"}, // cli.StringFlag{Name: "job, j", Usage: "specify job to run, defaults to first"}, // }, // Action: func(c *cli.Context) { // val := map[string] struct{ // Env map[string] string `json:"env"` // Services []Service `json:"services"` // Before []string `json:"before"` // After []string `json:"after"` // Main []string `json:"main"` // OnSuccess []string `json:"on_success"` // OnFailure []string `json:"on_failure"` // }{} // // // for k, v := range val[] // // // Build{ // Env: // } // // }, //}) app.addCmd(cli.Command{ Name: "provision-secrets", Usage: "load in secret environment variables", Action: func(c *cli.Context) error { res := make(map[string][]struct { Key string `json:"key"` Value string `json:"value"` }) err := app.get("/api/users/"+app.User+"/secrets", nil, &res) app.handleErr(err) for _, secret := range res["secrets"] { fmt.Printf("export %s=%s\n", secret.Key, secret.Value) } return nil }, }) app.addCmd(cli.Command{ Name: "emit", Usage: "emit a custom event", ArgsUsage: "event [ payload ]", Action: func(c *cli.Context) error { if c.Args().First() == "" { app.handleErr(errors.New("requires event name")) } payload := make(map[string]interface{}) if c.Args().Get(1) != "" { err := json.Unmarshal([]byte(c.Args().Get(1)), &payload) app.handleErr(err) } err := app.post("/api/users/"+app.User+"/events", map[string]map[string]interface{}{ "event": map[string]interface{}{ "name": c.Args().First(), "payload": payload, }, }, nil) app.handleErr(err) return nil }, }) app.addCmd(cli.Command{ Name: "server", Usage: "start the minion server", Flags: []cli.Flag{ cli.BoolFlag{ Name: "exit-post-build, e", Usage: "should the server stop after a build", }, }, Action: func(c *cli.Context) { minion := &Minion{ cancel: make(chan bool), app: app, exitPostBuild: c.Bool("exit-post-build"), } minion.Start() }, }) app.cli.Run(os.Args) }
"github.com/golang/snappy" "github.com/syndtr/goleveldb/leveldb/cache" "github.com/syndtr/goleveldb/leveldb/comparer" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/util" ) var ( ErrNotFound = errors.ErrNotFound ErrReaderReleased = errors.New("leveldb/table: reader released") ErrIterReleased = errors.New("leveldb/table: iterator released") ) type ErrCorrupted struct { Pos int64 Size int64 Kind string Reason string } func (e *ErrCorrupted) Error() string { return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason) } func max(x, y int) int {
// GetProperty returns value of the given property name. // // Property names: // leveldb.num-files-at-level{n} // Returns the number of files at level 'n'. // leveldb.stats // Returns statistics of the underlying DB. // leveldb.sstables // Returns sstables list for each level. // leveldb.blockpool // Returns block pool stats. // leveldb.cachedblock // Returns size of cached block. // leveldb.openedtables // Returns number of opened tables. // leveldb.alivesnaps // Returns number of alive snapshots. // leveldb.aliveiters // Returns number of alive iterators. func (db *DB) GetProperty(name string) (value string, err error) { err = db.ok() if err != nil { return } const prefix = "leveldb." if !strings.HasPrefix(name, prefix) { return "", errors.New("leveldb: GetProperty: unknown property: " + name) } p := name[len(prefix):] v := db.s.version() defer v.release() numFilesPrefix := "num-files-at-level" switch { case strings.HasPrefix(p, numFilesPrefix): var level uint var rest string n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest) if n != 1 || int(level) >= db.s.o.GetNumLevel() { err = errors.New("leveldb: GetProperty: invalid property: " + name) } else { value = fmt.Sprint(v.tLen(int(level))) } case p == "stats": value = "Compactions\n" + " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" + "-------+------------+---------------+---------------+---------------+---------------\n" for level, tables := range v.tables { duration, read, write := db.compStats[level].get() if len(tables) == 0 && duration == 0 { continue } value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n", level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(), float64(read)/1048576.0, float64(write)/1048576.0) } case p == "sstables": for level, tables := range v.tables { value += fmt.Sprintf("--- level %d ---\n", level) for _, t := range tables { value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.file.Num(), t.size, t.imin, t.imax) } } case p == "blockpool": value = fmt.Sprintf("%v", db.s.tops.bpool) case p == "cachedblock": if bc := db.s.o.GetBlockCache(); bc != nil { value = fmt.Sprintf("%d", bc.Size()) } else { value = "<nil>" } case p == "openedtables": value = fmt.Sprintf("%d", db.s.tops.cache.Size()) case p == "alivesnaps": value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps)) case p == "aliveiters": value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters)) default: err = errors.New("leveldb: GetProperty: unknown property: " + name) } return }
c.Put(&p.Warnings, &p.Status) } else if c.HasCap(CLIENT_TRANSACTIONS) { c.Put(&p.Status) } if c.HasCap(CLIENT_SESSION_TRACK) { c.Put(&p.Info) if Status(p.Status).Has(SERVER_SESSION_STATE_CHANGED) { c.Put(&p.SessionState) } } else { c.Put(&p.Info, StrEof) } } var ErrNotStatePack = errors.New("Not OK,ERR of EOF packet") func ReadErrOk(proto Proto) (p Pack, err error) { b, err := proto.PeekByte() if err != nil { return nil, err } switch b { case 0: p = &OKPack{} case 0xFF: p = &ERRPack{} case 0xFE: p = &EOFPack{} default: return nil, ErrNotStatePack
// Copyright (c) 2014, Suryandaru Triandana <*****@*****.**> // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package leveldb import ( "github.com/syndtr/goleveldb/leveldb/errors" ) var ( ErrNotFound = errors.ErrNotFound ErrSnapshotReleased = errors.New("leveldb: snapshot released") ErrIterReleased = errors.New("leveldb: iterator released") ErrClosed = errors.New("leveldb: closed") )
} // NewReader returns a new reader. The dropper may be nil, and if // strict is true then corrupted or invalid chunk will halt the journal // reader entirely. func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader { return &Reader{ r: r, dropper: dropper, strict: strict, checksum: checksum, last: true, } } var errSkip = errors.New("leveldb/journal: skipped") func (r *Reader) corrupt(n int, reason string, skip bool) error { if r.dropper != nil { r.dropper.Drop(&ErrCorrupted{n, reason}) } if r.strict && !skip { r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason}) return r.err } return errSkip } // nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the // next block into the buffer if necessary. func (r *Reader) nextChunk(first bool) error {
// Package memdb provides in-memory key/value database implementation. package memdb import ( "math/rand" "sync" "github.com/syndtr/goleveldb/leveldb/comparer" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/util" ) var ( ErrNotFound = errors.ErrNotFound ErrIterReleased = errors.New("leveldb/memdb: iterator released") ) const tMaxHeight = 12 type dbIter struct { util.BasicReleaser p *DB slice *util.Range node int forward bool key, value []byte err error } func (i *dbIter) fill(checkStart, checkLimit bool) bool {
package main import ( "encoding/json" "fmt" "log" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" ) var ErrNotFound = errors.New("Not Found") type MirageStorage struct { storage *leveldb.DB } func NewMirageStorage(cfg *Config) *MirageStorage { fileStorage, err := storage.OpenFile(cfg.Storage.DataDir) if err != nil { fmt.Println("cannot open leveldb fileStorage") log.Fatal(err) } storage, err := leveldb.Open(fileStorage, &opt.Options{}) if err != nil { fmt.Println("cannot open leveldb") log.Fatal(err) }