func (mqtt *Mqtt) Show() { if mqtt.FixedHeader != nil { mqtt.FixedHeader.Show() } else { log.Debug("Fixed header is nil") } if mqtt.ConnectFlags != nil { mqtt.ConnectFlags.Show() } else { log.Debug("ConnectFlags is nil") } fmt.Println("ProtocolName:", mqtt.ProtocolName) fmt.Println("Version:", mqtt.ProtocolVersion) fmt.Println("TopicName:", mqtt.TopicName) fmt.Println("ClientId:", mqtt.ClientId) fmt.Println("WillTopic:", mqtt.WillTopic) fmt.Println("WillMessage:", mqtt.WillMessage) fmt.Println("Username:"******"Password:"******"KeepAliveTimer:", mqtt.KeepAliveTimer) fmt.Println("MessageId:", mqtt.MessageId) fmt.Println("Data:", mqtt.Data) fmt.Println("Topics:", len(mqtt.Topics)) for i := 0; i < len(mqtt.Topics); i++ { fmt.Printf("(%s) (qos=%d)\n", mqtt.Topics[i], mqtt.Topics_qos[i]) } fmt.Println("ReturnCode:", mqtt.ReturnCode) }
// mouse handler func (win *Window) Mouse(x int, y int, deltaX int, deltaY int, flags uint16) { // drag only if clicked inside titlebar. checking Y position is enough, because X will be inside the window bounds anyway if !win.tbHidden && win.Element.Y+deltaY <= y && y <= win.Element.Y+deltaY+win.titleBarHeight { if (flags & mouse.F_L_CLICK) != 0 { log.Debug("Window ms handler: click") win.wasClicked = true } else if win.wasClicked && (flags&mouse.F_L_HOLD) != 0 { log.Debug("Window ms handler: drag") win.Element.X += deltaX win.Element.Y += deltaY win.Element.ScreenX += deltaX win.Element.ScreenY += deltaY // update screen position for all the children for v := win.Children.Front(); v != nil; v = v.Next() { v.Value.(base.IElement).UpdateScreenX(deltaX) v.Value.(base.IElement).UpdateScreenY(deltaY) } } else if (flags & mouse.F_L_DBL_CLICK) != 0 { log.Debug("Window ms handler: double click") } else { log.Debug("Window ms handler: title do nothing...") win.wasClicked = false } } }
func printSorted(msg string, paths []FieldPath) { log.Debug("paths") sort.Sort(FieldPaths(paths)) for _, p := range paths { log.Debug(" ", p) } }
// mouse handler func (but *Button) Mouse(x int, y int, deltaX int, deltaY int, flags uint16) { if but.style&BS_TOGGLE != 0 { if (flags & mouse.F_L_CLICK) != 0 { but.pushed = !but.pushed but.Draw() but.clickHndr(but.pushed) } } else { if (flags & mouse.F_L_CLICK) != 0 { log.Debug("Button ms handler: click") but.pushed = true but.wasClicked = true but.Draw() } else if but.wasClicked && (flags&mouse.F_L_RELEASE) != 0 { log.Debug("Button ms handler: release") but.pushed = false but.wasClicked = false but.Draw() but.clickHndr(false) } else if but.wasClicked && (flags&mouse.F_EL_LEAVE) != 0 { // release the button if user clicked inside it and then dragged the mouse outside without releasing the mouse button log.Debug("Button ms handler: clicked inside. released outside.") but.pushed = false but.wasClicked = false but.Draw() } } }
//Close all the objects at once and wait forr them to finish with a channel. func (d *Death) closeInMass(closable ...Closable) { count := len(closable) //call close async done := make(chan bool, count) for _, c := range closable { go d.closeObjects(c, done) } //wait on channel for notifications. timer := time.NewTimer(d.timeout) for { select { case <-timer.C: log.Warn(count, " object(s) remaining but timer expired.") return case <-done: count-- log.Debug(count, " object(s) left") if count == 0 { log.Debug("Finished closing objects") return } } } }
func (r *Consumer) handlerLoop(handler Handler) { clog.Debug("starting Handler") for { message, ok := <-r.incomingMessages if !ok { goto exit } if r.shouldFailMessage(message, handler) { message.Finish() continue } err := handler.HandleMessage(message) if err != nil { clog.Errorf("Handler returned error (%s) for msg %s", err, message.ID) if !message.IsAutoResponseDisabled() { message.Requeue(-1) } continue } if !message.IsAutoResponseDisabled() { message.Finish() } } exit: clog.Debug("stopping Handler") if atomic.AddInt32(&r.runningHandlers, -1) == 0 { r.exit() } }
func (a *run_tagger_action) Run() { SetupLogging(*a.verbosity) a.loadTokens() log.Debug("Tokens loaded") log.Debug("Filling connection pool") a.setupConnPool() taggers := new(tagger.Taggers) taggers.Init(a.connPool, a.workers) go taggers.Spawn() log.Debug("Tagging") // For each token, find it in the db for i := range a.tokens { taggers.Queue <- &a.tokens[i] } close(taggers.Queue) <-taggers.Done // Write the missing tokens to disk fw := new(filewriter.TrecFileWriter) fw.Init("/tmp/missing_tokens") go fw.WriteAllTokens() for i := range taggers.MissingTokens { fw.StringChan <- &taggers.MissingTokens[i] } close(fw.StringChan) fw.Wait() // If not found... }
func (r *RabbitConnection) Connect(connected chan bool) { for { log.Debug("[Rabbit] Attempting to connect…") if err := r.tryToConnect(); err != nil { sleepFor := time.Second log.Debugf("[Rabbit] Failed to connect, sleeping %s…", sleepFor.String()) time.Sleep(sleepFor) continue } connected <- true r.connected = true notifyClose := make(chan *amqp.Error) r.Connection.NotifyClose(notifyClose) // Block until we get disconnected, or shut down select { case err := <-notifyClose: r.connected = false log.Debugf("[Rabbit] AMQP connection closed (notifyClose): %s", err.Error()) return case <-r.closeChan: // Shut down connection log.Debug("[Rabbit] Closing AMQP connection (closeChan closed)…") if err := r.Connection.Close(); err != nil { log.Errorf("Failed to close AMQP connection: %v", err) } r.connected = false return } } }
func connPing() { timer := time.NewTicker(2 * time.Second) log.Debug("ping tcp ......") for { select { case <-timer.C: _, err := connection.Write([]byte("")) if err != nil { if netType == "tcp" { conn, err := utils.ConnTCP(address) log.Debug("can connection tcp ", err) if err == nil { connection = conn } } else if netType == "tls" { conn, err := utils.ConnTLS(address) log.Debug("can connection tls ", err) if err == nil { connection = conn } } } } } }
func (s *ESAPIV0) UpdateIndexSettings(name string, settings map[string]interface{}) error { log.Debug("update index: ", name, settings) cleanSettings(settings) url := fmt.Sprintf("%s/%s/_settings", s.Host, name) if _, ok := settings["settings"].(map[string]interface{})["index"]; ok { if set, ok := settings["settings"].(map[string]interface{})["index"].(map[string]interface{})["analysis"]; ok { log.Debug("update static index settings: ", name) staticIndexSettings := getEmptyIndexSettings() staticIndexSettings["settings"].(map[string]interface{})["index"].(map[string]interface{})["analysis"] = set Post(fmt.Sprintf("%s/%s/_close", s.Host, name), s.Auth, "", s.HttpProxy) body := bytes.Buffer{} enc := json.NewEncoder(&body) enc.Encode(staticIndexSettings) bodyStr, err := Request("PUT", url, s.Auth, &body, s.HttpProxy) if err != nil { log.Error(bodyStr, err) panic(err) return err } delete(settings["settings"].(map[string]interface{})["index"].(map[string]interface{}), "analysis") Post(fmt.Sprintf("%s/%s/_open", s.Host, name), s.Auth, "", s.HttpProxy) } } log.Debug("update dynamic index settings: ", name) body := bytes.Buffer{} enc := json.NewEncoder(&body) enc.Encode(settings) _, err := Request("PUT", url, s.Auth, &body, s.HttpProxy) return err }
func (m *Migrator) NewFileReadWorker(pb *pb.ProgressBar, wg *sync.WaitGroup) { log.Debug("start reading file") f, err := os.Open(m.Config.DumpInputFile) if err != nil { log.Error(err) return } defer f.Close() r := bufio.NewReader(f) lineCount := 0 for { line, err := r.ReadString('\n') if io.EOF == err || nil != err { break } lineCount += 1 js := map[string]interface{}{} //log.Trace("reading file,",lineCount,",", line) err = json.Unmarshal([]byte(line), &js) if err != nil { log.Error(err) continue } m.DocChan <- js pb.Increment() } defer f.Close() log.Debug("end reading file") close(m.DocChan) wg.Done() }
func (s *RtmpNetStream) Play(streamName string, args ...Args) error { conn := s.conn s.mode = MODE_PRODUCER sendCreateStream(conn) for { msg, err := readMessage(conn) if err != nil { return err } if m, ok := msg.(*UnknowCommandMessage); ok { log.Debug(m) continue } reply := new(ReplyCreateStreamMessage) reply.Decode0(msg.Header(), msg.Body()) log.Debug(reply) conn.streamid = reply.StreamId break } sendPlay(conn, streamName, 0, 0, false) for { msg, err := readMessage(conn) if err != nil { return err } if m, ok := msg.(*UnknowCommandMessage); ok { log.Debug(m) continue } result := new(ReplyPlayMessage) result.Decode0(msg.Header(), msg.Body()) log.Debug(result) code := getString(result.Object, "code") if code == NetStream_Play_Reset { continue } else if code == NetStream_Play_Start { break } else { return errors.New(code) } } sendSetBufferMessage(conn) if strings.HasSuffix(conn.app, "/") { s.path = conn.app + strings.Split(streamName, "?")[0] } else { s.path = conn.app + "/" + strings.Split(streamName, "?")[0] } err := notifyPlaying(s) if err != nil { return err } go s.cserve() return nil }
func (manager *ConnectionManager) listenOnUdpConnection() { var buffer [2048]byte // Listen forever // TODO: Revisit for { length, remoteAddr, err := manager.udpConn.ReadFromUDP(buffer[0:]) if err != nil { panic(err.Error()) } // Check if we've seen UDP packets from this address before - if so, reuse // existing client object client, ok := manager.udpClients[remoteAddr.String()] if !ok { log.Debug("New UDP client") writer := udp.NewUDPWriter(manager.udpConn, remoteAddr) bufferedWriter := bufio.NewWriter(writer) client = NewClient(strconv.Itoa(manager.rand.Int()), bufferedWriter, nil) manager.udpClients[remoteAddr.String()] = client } else { log.Debug("Found UDP client!") } // Log the number of bytes received manager.qm.metricsManager.metricsChannel <- NewMetric("bytesin.udp", "counter", int64(length)) // TODO: Parse message, and check if we're expecting a message commandTokens := strings.Fields(string(buffer[:length])) var message []byte if commandTokens[0] == "pub" { // Use bytes.Equal until Go1.7 (https://github.com/golang/go/issues/14302) for { var err error length, _, err := manager.udpConn.ReadFromUDP(buffer[0:]) if err != nil { return } // TODO: Is this cross platform? Needs testing if !bytes.Equal(buffer[:length], []byte{'.', '\r', '\n'}) { message = append(message, buffer[:length]...) } else { break } } } manager.parseClientCommand(commandTokens, &message, client) log.Debugf("Read %d bytes from %s: %s", length, remoteAddr, string(buffer[:length])) } }
// Initialize frame buffer device func Init(fbdev, tty string) (*Framebuffer, error) { var fb = new(Framebuffer) var err error fb.tty, err = os.OpenFile(tty, os.O_RDWR, os.ModeDevice) if err != nil { return nil, err } // switch to graphics mode // this prevents kernel modifying the video ram (vt switching/blanking, cursor, gpm mouse cursor) err = ioctl(fb.tty.Fd(), KDSETMODE, unsafe.Pointer(uintptr(KD_GRAPHICS))) if err != nil { fb.tty.Close() return nil, err } fb.dev, err = os.OpenFile(fbdev, os.O_RDWR, os.ModeDevice) if err != nil { fb.tty.Close() return nil, err } err = ioctl(fb.dev.Fd(), FBIOGET_FSCREENINFO, unsafe.Pointer(&fb.finfo)) if err != nil { fb.dev.Close() fb.tty.Close() return nil, err } log.Debug(utils.StructPrint(&fb.finfo)) err = ioctl(fb.dev.Fd(), FBIOGET_VSCREENINFO, unsafe.Pointer(&fb.Vinfo)) if err != nil { fb.dev.Close() fb.tty.Close() return nil, err } log.Debug(utils.StructPrint(&fb.Vinfo)) memSize := int(fb.finfo.Smem_len + uint32(fb.finfo.Smem_start&uint64(syscall.Getpagesize()-1))) fb.Mem, err = syscall.Mmap(int(fb.dev.Fd()), 0, memSize, PROT_READ|PROT_WRITE, MAP_SHARED) if err != nil { fb.dev.Close() fb.tty.Close() return nil, err } fb.MemOffscreen = make([]byte, memSize) return fb, nil }
func (s *ESAPIV0) DeleteIndex(name string) (err error) { log.Debug("start delete index: ", name) url := fmt.Sprintf("%s/%s", s.Host, name) Request("DELETE", url, s.Auth, nil, s.HttpProxy) log.Debug("delete index: ", name) return nil }
func (imageManager *dockerImageManager) removeLeastRecentlyUsedImage() error { seelog.Debug("Attempting to obtain ImagePullDeleteLock for removing images") ImagePullDeleteLock.Lock() seelog.Debug("Obtained ImagePullDeleteLock for removing images") defer seelog.Debug("Released ImagePullDeleteLock after removing images") defer ImagePullDeleteLock.Unlock() leastRecentlyUsedImage := imageManager.getUnusedImageForDeletion() if leastRecentlyUsedImage == nil { return fmt.Errorf("No more eligible images for deletion") } imageManager.removeImage(leastRecentlyUsedImage) return nil }
func CreateApp(app *model.App) (*model.App, error) { log.Debug(s.GetLeader()) if _, err := s.CreateApp(app); err != nil { return nil, err } if app.Instance == 0 { log.Debug("====:", app.Instance) return app, nil } for i := uint64(0); i < app.Instance; i++ { go CreateAndStartContainer(app) } return app, nil }
// mouse handler func (but *TitleBarButton) Mouse(x int, y int, deltaX int, deltaY int, flags uint16) { if (flags & mouse.F_L_CLICK) != 0 { log.Debug("TitleBarButton ms handler: click") but.wasClicked = true gfx.RectFilled(but.parent.Element.Buffer, but.Element.X, but.Element.Y, but.Element.X+but.Element.Width, but.Element.Y+but.Element.Height, but.parent.Element.Width, 49, 80, 0, gfx.A_OPAQUE) gfx.Rect(but.parent.Element.Buffer, but.Element.X, but.Element.Y, but.Element.X+but.Element.Width-1, but.Element.Y+but.Element.Height-1, but.parent.Element.Width, 0, 0, 0, gfx.A_OPAQUE) } else if but.wasClicked && (flags&mouse.F_L_RELEASE) != 0 { log.Debug("TitleBarButton ms handler: release") but.wasClicked = false but.Draw() but.clickHndr(false) } }
func complex_handshake(rw *bufio.ReadWriter, input []byte) bool { result, scheme, challenge, digest := validateClient(input) if !result { return result } log.Debugf("Validate Client %v scheme %v challenge %0X digest %0X", result, scheme, challenge, digest) s1 := create_s1() log.Debug("s1 length", len(s1)) off := getDigestOffset(s1, scheme) log.Debug("s1 digest offset", off) buf := new(bytes.Buffer) buf.Write(s1[:off]) buf.Write(s1[off+32:]) tempHash, _ := HMACsha256(buf.Bytes(), GENUINE_FMS_KEY[:36]) copy(s1[off:], tempHash) log.Debug("s1 length", len(s1)) //compute the challenge digest tempHash, _ = HMACsha256(digest, GENUINE_FMS_KEY[:68]) log.Debug("s2 length tempHash", len(tempHash)) randBytes := create_s2() log.Debug("s2 length", len(randBytes)) lastHash, _ := HMACsha256(randBytes, tempHash) log.Debug("s2 length lastHash", len(lastHash)) log.Debug("s2 length", len(randBytes)) buf = new(bytes.Buffer) buf.WriteByte(0x03) buf.Write(s1) buf.Write(randBytes) buf.Write(lastHash) log.Debug("send s0s1s2", buf.Len()) rw.Write(buf.Bytes()) rw.Flush() ReadBuf(rw, HANDSHAKE_SIZE) return true }
func serve(srv *Server, con net.Conn) { conn := newconn(con, srv) if !handshake1(conn.buf) { conn.Close() return } log.Debug("readMessage") msg, err := readMessage(conn) if err != nil { log.Error("NetConnecton read error", err) conn.Close() return } cmd, ok := msg.(*ConnectMessage) if !ok || cmd.Command != "connect" { log.Error("NetConnecton Received Invalid ConnectMessage ", msg) conn.Close() return } conn.app = getString(cmd.Object, "app") conn.objectEncoding = int(getNumber(cmd.Object, "objectEncoding")) log.Debug(cmd) err = sendAckWinsize(conn, 512<<10) if err != nil { log.Error("NetConnecton sendAckWinsize error", err) conn.Close() return } err = sendPeerBandwidth(conn, 512<<10) if err != nil { log.Error("NetConnecton sendPeerBandwidth error", err) conn.Close() return } err = sendStreamBegin(conn) if err != nil { log.Error("NetConnecton sendStreamBegin error", err) conn.Close() return } err = sendConnectSuccess(conn) if err != nil { log.Error("NetConnecton sendConnectSuccess error", err) conn.Close() return } conn.connected = true newNetStream(conn, shandler, nil).serve() }
func CreateContainer(master string, container *model.Container) (*model.ContainerResponse, error) { c, err := json.Marshal(container) log.Debug(string(c)) if err != nil { return nil, err } req, err := http.NewRequest("POST", master+containercreate, strings.NewReader(string(c))) if err != nil { return nil, err } req.Header.Add("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) if err != nil { return nil, err } rbody, err := ReadResponseBody(resp.Body) if err != nil { return nil, err } var body model.ContainerResponse err = json.Unmarshal(rbody, &body) if err != nil { return nil, err } return &body, nil }
func (s *ESAPIV0) GetIndexSettings(indexNames string) (*Indexes, error) { // get all settings allSettings := &Indexes{} url := fmt.Sprintf("%s/%s/_settings", s.Host, indexNames) resp, body, errs := Get(url, s.Auth, s.HttpProxy) if errs != nil { return nil, errs[0] } defer resp.Body.Close() if resp.StatusCode != 200 { return nil, errors.New(body) } log.Debug(body) err := json.Unmarshal([]byte(body), allSettings) if err != nil { panic(err) return nil, err } return allSettings, nil }
func (fs *GDriveFileSystem) Get(p string) (webdav.StatusCode, io.ReadCloser, int64) { pFile := fs.getFile(p, false) if pFile == nil { return webdav.StatusCode(404), nil, -1 } f := pFile.file downloadUrl := f.DownloadUrl log.Debug("downloadUrl=", downloadUrl) if downloadUrl == "" { log.Error("No download url: ", f) return webdav.StatusCode(500), nil, -1 } req, err := http.NewRequest("GET", downloadUrl, nil) if err != nil { log.Error("NewRequest ", err) return webdav.StatusCode(500), nil, -1 } resp, err := fs.transport.RoundTrip(req) if err != nil { log.Error("RoundTrip ", err) return webdav.StatusCode(500), nil, -1 } return webdav.StatusCode(200), resp.Body, f.FileSize }
func (slack *SlackNotifier) postToSlack(slackMessage *SlackMessage) error { data, err := json.Marshal(slackMessage) if err != nil { log.Errorf("Unable to marshal slack payload:%+v", err) return err } log.Debugf("struct = %+v, json = %s", slackMessage, string(data)) b := bytes.NewBuffer(data) req, err := http.NewRequest("POST", slack.Url, b) req.Header.Set("Content-Type", "application/json") if res, err := slack.HttpClient.Do(req); err != nil { log.Errorf("Unable to send data to slack:%+v", err) return err } else { defer res.Body.Close() statusCode := res.StatusCode if statusCode != 200 { body, _ := ioutil.ReadAll(res.Body) log.Errorf("Unable to notify slack:%s", string(body)) return errors.New("Send to Slack failed") } else { log.Debug("Slack notification sent") return nil } } }
//rebalance the cache capacity allocations; has to be called on each cache creation or deletion. //'shouldTrim', if true, causes trimCommitted() to be called on all the caches. Recommended if a new cache was created //because otherwise the old caches would stay over the new capacity until their next WriteAt happens. func (r *RAMCacheProvider) rebalance(shouldTrim bool) { //Cache size is a diminishing return thing: //The more of it a torrent has, the less of a difference additional cache makes. //Thus, instead of scaling the distribution lineraly with torrent size, we'll do it by square-root log.Debug("Rebalancing caches...") var scalingTotal float64 sqrts := make(map[string]float64) for i, cache := range r.caches { sqrts[i] = math.Sqrt(float64(cache.torrentLength)) scalingTotal += sqrts[i] } scalingFactor := float64(r.capacity*1024*1024) / scalingTotal for i, cache := range r.caches { newCap := int(math.Floor(scalingFactor * sqrts[i] / float64(cache.pieceSize))) if newCap == 0 { newCap = 1 //Something's better than nothing! } log.Debugf("Setting cache '%s' to new capacity %v (%v MiB)", cache.infohash, newCap, float32(newCap*cache.pieceSize)/float32(1024*1024)) cache.setCapacity(newCap) } if shouldTrim { for _, cache := range r.caches { cache.trimCommitted() } } }
func marshal(obj interface{}) []byte { bytes, err := json.MarshalIndent(obj, "", " ") if err != nil { log.Debug("marshal:", err) } return bytes }
func startSession(url string, region string, credentialProvider *credentials.Credentials, acceptInvalidCert bool, statsEngine stats.Engine, publishMetricsInterval time.Duration, deregisterInstanceEventStream *eventstream.EventStream) error { client := tcsclient.New(url, region, credentialProvider, acceptInvalidCert, statsEngine, publishMetricsInterval) defer client.Close() err := deregisterInstanceEventStream.Subscribe(deregisterContainerInstanceHandler, client.Disconnect) if err != nil { return err } defer deregisterInstanceEventStream.Unsubscribe(deregisterContainerInstanceHandler) // start a timer and listens for tcs heartbeats/acks. The timer is reset when // we receive a heartbeat from the server or when a publish metrics message // is acked. timer := time.AfterFunc(utils.AddJitter(heartbeatTimeout, heartbeatJitter), func() { // Close the connection if there haven't been any messages received from backend // for a long time. log.Debug("TCS Connection hasn't had a heartbeat or an ack message in too long of a timeout; disconnecting") client.Close() }) defer timer.Stop() client.AddRequestHandler(heartbeatHandler(timer)) client.AddRequestHandler(ackPublishMetricHandler(timer)) err = client.Connect() if err != nil { log.Error("Error connecting to TCS: " + err.Error()) return err } return client.Serve() }
func calcF2() { x := 1 y := 2 log.Debug("Calculating F") result := library.CalculateF(x, y) log.Debugf("Got F = %d", result) }
func (emailer *EmailNotifier) sendConsumerGroupStatusNotify() error { var bytesToSend bytes.Buffer log.Debug("send email") msgs := make([]Message, len(emailer.Groups)) i := 0 for group, msg := range emailer.groupMsgs { msgs[i] = msg delete(emailer.groupMsgs, group) i++ } err := emailer.template.Execute(&bytesToSend, struct { From string To string Results []Message }{ From: emailer.From, To: emailer.To, Results: msgs, }) if err != nil { log.Error("Failed to assemble email:", err) return err } err = smtp.SendMail(fmt.Sprintf("%s:%v", emailer.Server, emailer.Port), emailer.auth, emailer.From, []string{emailer.To}, bytesToSend.Bytes()) if err != nil { log.Error("Failed to send email message:", err) return err } return nil }
func (rm *RouteManager) Add(route *Route) error { rm.Lock() defer rm.Unlock() factory, found := AdapterFactories.Lookup(route.AdapterType()) if !found { return errors.New("bad adapter: " + route.Adapter) } adapter, err := factory(route) if err != nil { return err } if route.ID == "" { h := sha1.New() io.WriteString(h, strconv.Itoa(int(time.Now().UnixNano()))) route.ID = fmt.Sprintf("%x", h.Sum(nil))[:12] } route.closer = make(chan bool) route.adapter = adapter rm.routes[route.ID] = route if rm.persistor != nil { if err := rm.persistor.Add(route); err != nil { log.Debug("persistor:", err) } } if rm.routing { go rm.route(route) } return nil }