// This returns a field descriptor for FieldType_BLOB (i.e., Field_blob) func NewBlobFieldDescriptor(nullable NullableColumn, metadata []byte) ( fd FieldDescriptor, remaining []byte, err error) { if len(metadata) < 1 { return nil, nil, errors.New("Metadata has too few bytes") } packedLen := LittleEndian.Uint8(metadata) if packedLen > 4 { return nil, nil, errors.New("Invalid packed length") } return &blobFieldDescriptor{ packedLengthFieldDescriptor: packedLengthFieldDescriptor{ baseFieldDescriptor: baseFieldDescriptor{ fieldType: mysql_proto.FieldType_BLOB, isNullable: nullable, }, packedLength: int(packedLen), }, }, metadata[1:], nil }
// Lock for writing, waiting up to 'timeout' for successful exclusive // acquisition of the lock. func (rw *BoundedRWLock) WLock(timeout time.Duration) (err error) { deadline := time.After(timeout) rw.control.Lock() if rw.readers != 0 || rw.nextWriter != nil { me := newWait(true) if rw.nextWriter == nil { rw.nextWriter = me } else { select { case rw.waiters <- me: default: err = errors.New("Waiter capacity reached in WLock") } } rw.control.Unlock() if err != nil { return } woken := me.WaitAtomic(deadline) if !woken { return errors.New("Waiter timeout") } rw.control.Lock() if rw.readers != 0 { panic("readers??") } } else { rw.nextWriter = newWait(true) } rw.control.Unlock() return }
func (d *usecTemporalFieldDescriptor) init( fieldType mysql_proto.FieldType_Type, nullable NullableColumn, fixedSize int, metadata []byte) ( remaining []byte, err error) { d.fieldType = fieldType d.isNullable = nullable if len(metadata) < 1 { return nil, errors.New("Metadata has too few bytes") } d.fixedSize = fixedSize d.neededBytes = fixedSize d.microSecondPrecision = uint8(metadata[0]) switch d.microSecondPrecision { case 0: // do nothing case 1, 2: d.neededBytes++ case 3, 4: d.neededBytes += 2 case 5, 6: d.neededBytes += 3 default: return nil, errors.New("Invalid usec precision") } return metadata[1:], nil }
// Wait for a read lock for up to 'timeout'. // // Error will be non-nil on timeout or when the wait list is at capacity. func (rw *BoundedRWLock) RLock(timeout time.Duration) (err error) { deadline := time.After(timeout) rw.control.Lock() if rw.nextWriter != nil { me := newWait(false) select { case rw.waiters <- me: default: err = errors.New("Waiter capacity reached in RLock") } rw.control.Unlock() if err != nil { return } woken := me.WaitAtomic(deadline) if !woken { return errors.New("Waiter timeout") } rw.control.Lock() } rw.readers++ rw.control.Unlock() return }
func Page(q PageQuery) (*PageResponse, error) { if q.ID == 0 { return nil, errors.New("a page id is required") } resp, err := http.Get(q.urlString()) if err != nil { return nil, errors.Wrap(err, "could not get http response") } defer resp.Body.Close() if resp.StatusCode != 200 { return nil, errors.Wrapf(errors.New(resp.Status), "StatusCode: %d; URL: %s", resp.StatusCode, q.urlString()) } body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, errors.Wrap(err, "could not read http response body") } var response PageResponse if err := json.Unmarshal(body, &response); err != nil { return nil, errors.Wrap(err, "could not unmarshal http response") } return &response, nil }
func (s *lockStatementImpl) String(database string) (sql string, err error) { if !validIdentifierName(database) { return "", errors.New("Invalid database name specified") } if len(s.locks) == 0 { return "", errors.New("No locks added") } buf := new(bytes.Buffer) buf.WriteString("LOCK TABLES ") for idx, lock := range s.locks { if lock.t == nil { return "", errors.Newf("nil table. Generated sql: %s", buf.String()) } if err = lock.t.SerializeSql(database, buf); err != nil { return } if lock.w { buf.WriteString(" WRITE") } else { buf.WriteString(" READ") } if idx != len(s.locks)-1 { buf.WriteString(", ") } } return buf.String(), nil }
// Compile launches the search and compiles a list of weather list. func Request(q *Query) ([]*Record, error) { if q.Start.IsZero() || q.End.IsZero() { return nil, errors.New("StartDate and EndDate (time.Time) values are required to compile weather data.") } if len(q.Stations) == 0 { return nil, errors.New("at least one weather station is required to provide weather response") } var list []*Record for _, station := range q.Stations { values, err := q.getAPIList(station) if err != nil { return nil, err } for _, v := range values { list = q.addToList(list, v, station) } } sort.Sort(weatherSorter{list}) return list, nil }
func (c *RawClient) sendCountRequest( code opCode, key string, delta uint64, initValue uint64, expiration uint32) CountResponse { if !c.validState { // An error has occurred in one of the previous requests. It's not // safe to continue sending. return NewCountErrorResponse( key, errors.New("Skipping due to previous error")) } if !isValidKeyString(key) { return NewCountErrorResponse( key, errors.New("Invalid key")) } err := c.sendRequest( code, 0, []byte(key), nil, delta, initValue, expiration) if err != nil { c.validState = false return NewCountErrorResponse(key, err) } return nil }
func (p *QueryEventParser) parseUpdatedDbNames(data []byte, q *QueryEvent) ( []byte, error) { if len(data) == 0 { return data, errors.New("Not enough data") } q.numUpdatedDbs = new(uint8) data, err := readLittleEndian(data, q.numUpdatedDbs) if err != nil { return data, err } if *q.numUpdatedDbs < MaxDbsInEventMts { q.updatedDbNames = make([][]byte, *q.numUpdatedDbs, *q.numUpdatedDbs) for i := uint8(0); i < *q.numUpdatedDbs; i++ { idx := bytes.IndexByte(data, byte(0)) if idx > -1 { q.updatedDbNames[i] = data[:idx] data = data[idx+1:] } else { return data, errors.New("Not enough data") } } } return data, nil }
func (r *rawV4EventReader) NextEvent() (Event, error) { if r.isClosed { return nil, errors.New("Event reader is closed") } if r.nextEvent == nil { // new event r.nextEvent = &RawV4Event{ sourceName: r.srcName, sourcePosition: r.logPosition, } } if r.nextEvent.data == nil { // still parsing the header headerBytes, err := r.getHeaderBuffer().PeekAll() if err != nil { return nil, err } _, err = readLittleEndian(headerBytes, &r.nextEvent.header) if err != nil { return nil, err } bodySize := int(r.nextEvent.EventLength()) - sizeOfBasicV4EventHeader if bodySize < 0 { // should never happen return nil, errors.New("Invalid event size") } r.nextEvent.data = make( []byte, r.nextEvent.EventLength(), r.nextEvent.EventLength()) copied := copy(r.nextEvent.data, headerBytes) if copied != sizeOfBasicV4EventHeader { // should never happen panic("Failed to copy header") } r.bodyBuffer = bufio2.NewLookAheadBufferUsing( r.src, r.nextEvent.data[sizeOfBasicV4EventHeader:]) } _, err := r.bodyBuffer.PeekAll() if err != nil { return nil, err } // consume the constructed event and clean the look ahead buffers event := r.nextEvent r.nextEvent = nil r.headerBuffer = nil r.bodyBuffer = nil r.logPosition += int64(event.EventLength()) return event, nil }
func (c *RawAsciiClient) DeleteMulti(keys []string) []MutateResponse { responses := make([]MutateResponse, len(keys), len(keys)) c.mutex.Lock() defer c.mutex.Unlock() // NOTE: delete requests are pipelined. for i, key := range keys { if !isValidKeyString(key) { responses[i] = NewMutateErrorResponse( key, errors.New("Invalid key")) continue } err := c.writeStrings("delete ", key, "\r\n") if err != nil { responses[i] = NewMutateErrorResponse(key, err) } } err := c.flushWriter() if err != nil { // The delete requests may or may not have successfully reached the // memcached, just error out. for i, key := range keys { if responses[i] == nil { responses[i] = NewMutateErrorResponse(key, err) } } } for i, key := range keys { if responses[i] != nil { continue } line, err := c.readLine() if err != nil { responses[i] = NewMutateErrorResponse(key, err) continue } if line == "DELETED" { responses[i] = NewMutateResponse(key, StatusNoError, 0, true) } else if line == "NOT_FOUND" { responses[i] = NewMutateResponse(key, StatusKeyNotFound, 0, true) } else { // Unexpected error msg responses[i] = NewMutateErrorResponse(key, errors.New(line)) } } _ = c.checkEmptyBuffers() return responses }
func (c *RawAsciiClient) countRequest( cmd string, key string, delta uint64, initValue uint64, expiration uint32) CountResponse { if expiration != 0xffffffff { return NewCountErrorResponse( key, errors.New( "Ascii protocol does not support initial value / "+ "expiration. expiration must be set to 0xffffffff.")) } if !isValidKeyString(key) { return NewCountErrorResponse( key, errors.New("Invalid key")) } c.mutex.Lock() defer c.mutex.Unlock() err := c.writeStrings( cmd, " ", key, " ", strconv.FormatUint(delta, 10), "\r\n") if err != nil { return NewCountErrorResponse(key, err) } err = c.flushWriter() if err != nil { return NewCountErrorResponse(key, err) } line, err := c.readLine() if err != nil { return NewCountErrorResponse(key, err) } _ = c.checkEmptyBuffers() if line == "NOT_FOUND" { return NewCountResponse(key, StatusKeyNotFound, 0) } val, err := strconv.ParseUint(line, 10, 64) if err != nil { return NewCountErrorResponse(key, err) } return NewCountResponse(key, StatusNoError, val) }
// checkExtensionTypes checks that the given extension is valid for pb. func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { // Check the extended type. if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) } // Check the range. if !isExtensionField(pb, extension.Field) { return errors.New("proto: bad extension number; not in declared ranges") } return nil }
func (c *RawAsciiClient) Stat(statsKey string) StatResponse { shardEntries := make(map[int](map[string]string)) entries := make(map[string]string) shardEntries[c.ShardId()] = entries if statsKey != "" { return NewStatErrorResponse( errors.New("Ascii protocol does not support specific stats lookup"), shardEntries) } var err error c.mutex.Lock() defer c.mutex.Unlock() err = c.writeStrings("stats\r\n") if err != nil { return NewStatErrorResponse(err, shardEntries) } err = c.flushWriter() if err != nil { return NewStatErrorResponse(err, shardEntries) } for { line, err := c.readLine() if err != nil { NewStatErrorResponse(err, shardEntries) } if line == "END" { break } // line is of the form: STAT <key> <value> slice := strings.SplitN(line, " ", 3) if len(slice) != 3 || slice[0] != "STAT" { // The channel is no longer in valid state since we're exiting // stats mid stream. c.validState = false return NewStatErrorResponse(errors.New(line), shardEntries) } entries[slice[1]] = slice[2] } _ = c.checkEmptyBuffers() return NewStatResponse(StatusNoError, shardEntries) }
func (c *RawAsciiClient) checkEmptyBuffers() error { if c.writer.Buffered() != 0 { c.validState = false return errors.New("writer buffer not fully flushed") } if c.reader.Buffered() != 0 { c.validState = false return errors.New("reader buffer not fully drained") } return nil }
func (s *CheckersSuite) TestNoErr(c *C) { // Test the true/false behavior. test(c, NoErr, true, "", nil) test(c, NoErr, true, "", 3) test(c, NoErr, true, "", error(nil)) test(c, NoErr, false, "", stdlibErrors.New("message")) test(c, NoErr, false, "", errors.New("message")) // Test the message behavior. params := []interface{}{errors.New("1\n2\n3")} text := params[0].(error).Error() NoErr.Check(params, nil) c.Assert(fmt.Sprintf("%#v", params[0]), Equals, "\n"+text) }
// Consume drops the first numBytes number of populated bytes from the look // ahead buffer. NOTE: This is an O(n) operation since it requires shifting // the remaining bytes to the beginning of the buffer. Avoid consuming the // buffer byte by byte. func (b *LookAheadBuffer) Consume(numBytes int) error { if numBytes == 0 { return nil } if numBytes < 0 { return errors.New("Cannot drop negative numBytes") } if b.bytesBuffered < numBytes { return errors.New("Consuming more bytes than bytes in buffer.") } copy(b.buffer, b.buffer[numBytes:b.bytesBuffered]) b.bytesBuffered -= numBytes return nil }
func (t *traffic) ClientReadErr(c lib.SSClienter, err error) { //err => EOF | i/o timeout | use closed network if err != io.EOF { log.Println(errors.New(err.Error())) } }
// Note: this is equivalent to net_field_length in sql-common/pack.c func readFieldLength(valBytes []byte) ( length uint64, remaining []byte, err error) { if len(valBytes) == 0 { return 0, nil, errors.New("Empty field length input") } val := uint64(valBytes[0]) if val < 251 { return val, valBytes[1:], nil } else if val == 251 { return NullLength, valBytes[1:], nil } size := 9 if val == 252 { size = 3 } else if val == 253 { size = 4 } if len(valBytes) < size { return 0, nil, errors.Newf( "Invalid field length input (expected at least %d bytes)", size) } // NOTE: mysql's net_field_length implementation is somewhat broken. // In particular, when net_store_length encode a ulong using 8 bytes, // net_field_length will only read the first 4 bytes, and ignore the // rest .... return bytesToLEUint(valBytes[1:size]), valBytes[size:], nil }
func (d *newDecimalFieldDescriptor) ParseValue(data []byte) ( value interface{}, remaining []byte, err error) { return nil, nil, errors.New("TODO") }
func (c *RawClient) sendCountRequest( code opCode, key string, delta uint64, initValue uint64, expiration uint32) CountResponse { if !isValidKeyString(key) { return NewCountErrorResponse( key, errors.New("Invalid key")) } err := c.sendRequest( code, 0, []byte(key), nil, delta, initValue, expiration) if err != nil { return NewCountErrorResponse(key, err) } return nil }
// See ResourcePool for documentation. func (p *MultiResourcePool) Register(resourceLocation string) error { if resourceLocation == "" { return errors.New("Registering invalid resource location") } p.rwMutex.Lock() defer p.rwMutex.Unlock() if p.isLameDuck { return errors.Newf( "Cannot register %s to lame duck resource pool", resourceLocation) } if _, inMap := p.locationPools[resourceLocation]; inMap { return nil } pool := p.createPool(p.options) if err := pool.Register(resourceLocation); err != nil { return err } p.locationPools[resourceLocation] = pool return nil }
func (c *RawAsciiClient) Verbosity(verbosity uint32) Response { c.mutex.Lock() defer c.mutex.Unlock() err := c.writeStrings( "verbosity ", strconv.FormatUint(uint64(verbosity), 10), "\r\n") if err != nil { return NewErrorResponse(err) } err = c.flushWriter() if err != nil { return NewErrorResponse(err) } line, err := c.readLine() if err != nil { return NewErrorResponse(err) } _ = c.checkEmptyBuffers() if line != "OK" { // memcached returned an error message. This should never happen // according to the docs. return NewErrorResponse(errors.New(line)) } return NewResponse(StatusNoError) }
func (g *GoogleClient) Update(db *database.Database) (err error) { client := conf.NewClient(g.acct) err = client.Refresh(db) if err != nil { return } data := struct { Emails []struct { Type string `json:"type"` Value string `json:"value"` } `json:"emails"` }{} err = client.GetJson("https://www.googleapis.com/plus/v1/people/me", &data) if err != nil { return } if len(data.Emails) < 1 { err = errortypes.UnknownError{ errors.New("Unable to get email from profile"), } return } g.acct.Id = data.Emails[0].Value return }
// See ResourcePool for documentation. func (p *RoundRobinResourcePool) Register(resourceLocation string) error { if resourceLocation == "" { return errors.New("Registering invalid resource location") } p.rwMutex.Lock() defer p.rwMutex.Unlock() if p.isLameDuck { return errors.Newf( "Cannot register %s to lame duck resource pool", resourceLocation) } for _, locPool := range p.pools { if locPool.ResourceLocation == resourceLocation { return nil } } pool := p.createPool(p.options) if err := pool.Register(resourceLocation); err != nil { return err } p.pools = append( p.pools, &ResourceLocationPool{ ResourceLocation: resourceLocation, Pool: pool, }) shuffle(p.pools) return nil }
func (this *watcher) doDel(path string) error { info, found := this.watchedByPath[path] if !found { return errors.Newf("can't remove non-existent kevent watch for: %s", path) } var kbuf [1]syscall.Kevent_t watchEntry := &kbuf[0] syscall.SetKevent(watchEntry, info.fd, syscall.EVFILT_VNODE, syscall.EV_DELETE) entryFlags := watchEntry.Flags success, errno := syscall.Kevent(this.kq, kbuf[:], nil, nil) if success == sysCallFailed { return os.NewSyscallError("kevent_rm_watch", errno) } else if entryFlags&syscall.EV_ERROR == syscall.EV_ERROR { return errors.New("kevent rm error") } syscall.Close(info.fd) //Remove childs if it's directory for _, child := range info.childes { this.doDel(child) } delete(this.watchedByPath, path) delete(this.watchedByFD, info.fd) return nil }
// Peek returns a slice of the look ahead buffer which holds numBytes // number of bytes. If the look ahead buffer does not already hold enough // bytes, it will read from the underlying reader to populate the rest. // NOTE: the returned slice is not a copy of the raw buffer. func (b *LookAheadBuffer) Peek(numBytes int) ([]byte, error) { if numBytes < 0 { return nil, errors.New("Cannot fill negative numBytes") } if numBytes > len(b.buffer) { return nil, errors.Newf( "Buffer full (buffer size: %d n: %d)", len(b.buffer), numBytes) } var err error var numRead int if b.bytesBuffered < numBytes { numRead, err = io.ReadAtLeast( b.src, b.buffer[b.bytesBuffered:], numBytes-b.bytesBuffered) if err != nil { if err == io.ErrUnexpectedEOF { // ErrUnexpectedEOF is a terrible error only returned by // ReadAtLeast. Return EOF (i.e., the original error) instead // ErrUnexpectedEOF since no one ever checks for this. err = io.EOF } } } b.bytesBuffered += numRead if numBytes > b.bytesBuffered { numBytes = b.bytesBuffered } return b.buffer[:numBytes], err }
func (c *RawAsciiClient) Version() VersionResponse { versions := make(map[int]string, 1) c.mutex.Lock() defer c.mutex.Unlock() err := c.writeStrings("version\r\n") if err != nil { return NewVersionErrorResponse(err, versions) } err = c.flushWriter() if err != nil { return NewVersionErrorResponse(err, versions) } line, err := c.readLine() if err != nil { return NewVersionErrorResponse(err, versions) } _ = c.checkEmptyBuffers() if !strings.HasPrefix(line, "VERSION ") { // memcached returned an error message. return NewVersionErrorResponse(errors.New(line), versions) } versions[c.ShardId()] = line[len("VERSION "):len(line)] return NewVersionResponse(StatusNoError, versions) }
// See Client interface for documentation. func (c *RawClient) Version() VersionResponse { versions := make(map[int]string) c.mutex.Lock() defer c.mutex.Unlock() if !c.validState { // An error has occurred in one of the previous requests. It's not // safe to continue sending. return NewVersionErrorResponse( errors.New("Skipping due to previous error"), versions) } err := c.sendRequest(opVersion, 0, nil, nil) if err != nil { c.validState = false return NewVersionErrorResponse(err, versions) } status, _, _, value, err := c.receiveResponse(opVersion) if err != nil { c.validState = false return NewVersionErrorResponse(err, versions) } versions[c.ShardId()] = string(value) return NewVersionResponse(status, versions) }
// See ConnectionPool for documentation. func (p *MultiConnectionPool) Register(network string, address string) error { if network == "" && address == "" { return errors.New("Registering invalid (network, address)") } key := NetworkAddress{ Network: network, Address: address, } p.rwMutex.Lock() defer p.rwMutex.Unlock() if p.isLameDuck { return errors.Newf( "Cannot register (%s, %s) to lame duck connection pool", network, address) } if _, inMap := p.addressPools[key]; inMap { return nil } pool := p.createPool(p.options) if err := pool.Register(network, address); err != nil { return err } p.addressPools[key] = pool return nil }