func showHandler(backend appchilada.Backend) func(http.ResponseWriter, *http.Request) { getTemplate := getTemplateFunc("resources/show.html") return func(w http.ResponseWriter, r *http.Request) { r.ParseForm() name := r.URL.Path[len("/show/"):] var start, end int64 if startVal := r.Form.Get("start"); startVal != "" { start, _ = strconv.Atoi64(startVal) } else { // Default to last 24 hours start = time.Seconds() - 86400 } if endVal := r.Form.Get("end"); endVal != "" { end, _ = strconv.Atoi64(endVal) } else { // Default to now end = time.Seconds() } results, err := backend.Read(name, appchilada.Interval{start, end}) if err != nil { // TODO Output error in response log.Printf("Error getting results for %s: %v", name, err) return } if err := getTemplate().Execute(w, results); err != nil { log.Printf("Error executing template: %v", err) } } }
func (self *TarwFS) Create(name string, flags, mode uint32) (file fuse.File, eno fuse.Status) { //log.Printf("Create:%s", name) self.lock.Lock() _, exists := self.isDir[name] if !exists { self.files[name] = &os.FileInfo{Mode: mode | syscall.S_IFREG} self.isDir[name] = false self.lock.Unlock() start := time.Seconds() file = newFile(func(r io.Reader, rlen int64) (err os.Error) { self.lock.Lock() err = self.w.WriteHeader(&tar.Header{ Typeflag: tar.TypeReg, Name: name, Mode: int64(mode), Size: rlen, Ctime: start, Mtime: time.Seconds(), Atime: time.Seconds(), }) if err == nil { _, err = io.Copyn(self.w, r, rlen) } self.lock.Unlock() return }) eno = fuse.OK } else { self.lock.Unlock() eno = fuse.EINVAL } return }
func timer(sess string, killme chan bool) { limit := time.Seconds() + 5 for { /* _,test := killme; if !test { return; } */ select { case <-killme: return default: if limit < time.Seconds() { _, test1 := sessions[sess] if test1 { sessions[sess] <- "timeout" } close(killme) return } time.Sleep(1000000000) } } return }
// Close connections that have been idle for > p.timeout seconds. func timeoutCloser(p *pool) { for p != nil && !p.closed { p.cond.L.Lock() now := time.Seconds() delay := 1e9 * p.timeout for p.conns.Len() > 0 { front := p.conns.Front() pc := front.Value.(poolConn) atime := pc.atime if (now - atime) > p.timeout { pc.Conn.Close() p.conns.Remove(front) p.n-- p.log("idle connection closed") } else { // Wait until first connection would timeout if it isn't used. delay = 1e9 * (p.timeout - (now - atime) + 1) break } } // don't let the pool fall below the min for i := p.n; i < p.min; i++ { c, err := Connect(p.params, LogError) if err != nil { p.log("can't create connection") } else { p.conns.PushFront(poolConn{c, time.Seconds()}) p.n++ } } p.cond.L.Unlock() time.Sleep(delay) } p.log("timeoutCloser finished") }
func (h OpenOrders) convert() (o xgen.OpenOrders) { // Convert campbx.OpenOrders to xgen.OpenOrders var err os.Error o.Sell = make(map[string]xgen.OpenOrder) o.Buy = make(map[string]xgen.OpenOrder) for _, order := range h.Buy { if order.Oid != "" { var t xgen.OpenOrder t.Date = time.Seconds() // Should use time.Parse to convert order.DateEntered to Unix time t.Price, err = strconv.Atof64(order.Price) check(err) t.Amount, err = strconv.Atof64(order.Quantity) check(err) o.Buy[order.Oid] = t } } for _, order := range h.Sell { if order.Oid != "" { var t xgen.OpenOrder t.Date = time.Seconds() // Should use time.Parse to convert order.DateEntered to Unix time t.Price, err = strconv.Atof64(order.Price) check(err) t.Amount, err = strconv.Atof64(order.Quantity) check(err) o.Sell[order.Oid] = t } } return }
func TestObjectId(t *testing.T) { t1 := time.Seconds() min := MinObjectIdForTime(t1) id := NewObjectId() max := MaxObjectIdForTime(time.Seconds()) if id < min { t.Errorf("%q < %q", id, min) } if id > max { t.Errorf("%q > %q", id, max) } if min.CreationTime() != t1 { t.Errorf("min.CreationTime() = %d, want %d", min.CreationTime(), t1) } id2, err := NewObjectIdHex(id.String()) if err != nil { t.Errorf("NewObjectIdString returned %q", err) } if id2 != id { t.Errorf("%q != %q", id2, id) } t2 := ObjectId("").CreationTime() if t2 != 0 { t.Error("creation time for invalid id = %d, want 0", t1) } }
func init() { start := time.Seconds() Publish("runtime", map[string]interface{}{ "cgocalls": Func(func() interface{} { return runtime.Cgocalls() }), "goroutines": Func(func() interface{} { return runtime.Goroutines() }), "version": runtime.Version(), "memstats": &runtime.MemStats, }) Publish("uptimeSeconds", Func(func() interface{} { return time.Seconds() - start })) Publish("cmdline", &os.Args) }
// Set Value From HTMLForm Values func (t *Tasks) SetValue(Id string, r *http.Request) os.Error { var err os.Error t.UserId = Id t.KeyID, err = strconv.Atoi64(r.FormValue(FORM_KEY)) if err != nil { t.KeyID = 0 } t.Status, err = strconv.Atoi(r.FormValue(FORM_STATUS)) if err != nil { log.Println(err) return err } log.Println("Status") t.Context = html.EscapeString(r.FormValue(FORM_CONTEXT)) t.IsUseLimit, err = strconv.Atob(r.FormValue(FORM_LIMIT)) if err != nil { log.Println(err) return err } log.Println("IsUseLimit") t.IsComplete = (t.Status == 2) t.IsCanceld = (t.Status == 9) log.Println("Set Bool Value") if t.IsUseLimit { log.Println(r.FormValue(FORM_DATE)) log.Println(time.RFC3339) var limit *time.Time limit, err = time.Parse("2006-01-02 15:04:05", r.FormValue(FORM_DATE)) if err == nil { t.PlanDate = datastore.SecondsToTime(limit.Seconds()) } else { log.Println(err) return err } } log.Println("PostDate") t.PostDate = datastore.SecondsToTime(time.Seconds()) if t.IsComplete { t.CompleteDate = datastore.SecondsToTime(time.Seconds()) } return nil }
func WaitOnline(id, maxWait int) os.Error { start := time.Seconds() for tc.nodes[id].Cluster.MyNode.Status != cluster.Status_Online { if time.Seconds()-start > int64(maxWait) { return os.NewError("Maximum wait time exceed") } time.Sleep(1000000) // 1 ms } return nil }
// // Receive and process a new ping // // PING node nodeid nodetype hash // PING 192.168.1.5 1 slave cf81a8580f8296424ee7589c3aca3b83981af958 // func (myDHTServer *DHTServerStruct) newPing(con *net.TCPConn, s string, chall string) { fields := strings.Split(s, " ", 0) if len(fields) != 5 { // Not enough fields just exit myDHTServer.logger.Logf(LMED, "Received ping with insufficient params") return } // Authenticate sharesp := fields[4] shap := SHA1String(fmt.Sprintf("%s%s", chall, G_clusterKey)) shastr := fmt.Sprintf("%x", shap) if shastr != sharesp { myDHTServer.logger.Logf(LMIN, "Failed PING: %s %s", fields[1], fields[2]) return } if compareConWithIPString(con, fields[1]) == false { myDHTServer.logger.Logf(LMIN, "Bad PING, RemoteAddr and IP parameter different: %s %s", con.RemoteAddr().String(), fields[1]) return } // Does this IP address or node already exist? G_nodesLock.Lock() for c := range G_nodes.Iter() { if (c.(*nodesInClusterStruct).ip == fields[1]) && (c.(*nodesInClusterStruct).nodeid == fields[2]) { // Already exits, great // Update time stamp c.(*nodesInClusterStruct).lastPing = time.Seconds() G_nodesLock.Unlock() // listCluster uses G_nodesLock myDHTServer.listCluster(con) return } } // Need to check for IP in list with different nodeid and vice versa // TDB n := new(nodesInClusterStruct) n.ip = fields[1] n.nodeid = fields[2] n.nodetype = fields[3] n.lastPing = time.Seconds() G_nodes.PushBack(n) G_nodesLock.Unlock() myDHTServer.logger.Logf(LMED, "New %s node joins cluster %s %s", fields[3], fields[1], fields[2]) myDHTServer.listCluster(con) }
// The prime sieve: Daisy-chain filter processes together. func sieves(w http.ResponseWriter, r *http.Request) { start := time.Seconds() fmt.Fprintf(w, "the first %v prime numbers are:<br />", NUM_PRIMES) ch := make(chan int) // Create a new channel. go generate(ch) // Start generate() as a goroutine. for i := 0; i < NUM_PRIMES; i++ { // Print the first hundred primes. prime := <-ch fmt.Fprintf(w, "%v<br />", prime) ch1 := make(chan int) go filter(ch, ch1, prime) ch = ch1 } end := time.Seconds() fmt.Fprintf(w, "<br /><br />the computation took %v seconds", end-start) }
func (ctx *Context) GetSecureCookie(name string) (string, bool) { for _, cookie := range ctx.Request.Cookie { if cookie.Name != name { continue } parts := strings.SplitN(cookie.Value, "|", 3) val := parts[0] timestamp := parts[1] sig := parts[2] if getCookieSig(ctx.Server.Config.CookieSecret, []byte(val), timestamp) != sig { return "", false } ts, _ := strconv.Atoi64(timestamp) if time.Seconds()-31*86400 > ts { return "", false } buf := bytes.NewBufferString(val) encoder := base64.NewDecoder(base64.StdEncoding, buf) res, _ := ioutil.ReadAll(encoder) return string(res), true } return "", false }
func beginWatch() { //fmt.Println("Begin watching...") if total > 0.5 || start == 0 { fmt.Println("Begin watching...") start = time.Seconds() } }
func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType) (err error) { if signer.PrivateKey == nil { return error_.InvalidArgumentError("signing key doesn't have a private key") } if signer.PrivateKey.Encrypted { return error_.InvalidArgumentError("signing key is encrypted") } sig := new(packet.Signature) sig.SigType = sigType sig.PubKeyAlgo = signer.PrivateKey.PubKeyAlgo sig.Hash = crypto.SHA256 sig.CreationTime = uint32(time.Seconds()) sig.IssuerKeyId = &signer.PrivateKey.KeyId h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) if err != nil { return } io.Copy(wrappedHash, message) err = sig.Sign(h, signer.PrivateKey) if err != nil { return } return sig.Serialize(w) }
func (t SingleElimination) Run(array player.Array, match Match) []int { rand := rand.New(rand.NewSource(time.Seconds())) results := make([]int, array.Len()) num_ranked := len(results) - 1 rounds := make([][]int, intlog(array.Len())+1) rounds[0] = initialSeeds(array.Len()) for r := 1; ; r++ { if len(rounds[r-1])/2 == 0 { break } rounds[r] = make([]int, len(rounds[r-1])/2) for i := range rounds[r] { a := rounds[r-1][2*i] b := rounds[r-1][2*i+1] rounds[r][i], results[num_ranked], _ = match.Play(a, b, array, rand) num_ranked-- } } results[num_ranked] = rounds[len(rounds)-1][0] if num_ranked != 0 { panic("Didn't rank all the players!") } return results }
/* parse a storage command parameters and read the related data returns a flag indicating sucesss */ func (self *StorageCommand) parse(line []string) bool { var flags, exptime, bytes, casuniq uint64 var err os.Error if len(line) < 5 { return Error(self.session, ClientError, "Bad storage command: missing parameters") } else if flags, err = strconv.Atoui64(line[2]); err != nil { return Error(self.session, ClientError, "Bad storage command: bad flags") } else if exptime, err = strconv.Atoui64(line[3]); err != nil { return Error(self.session, ClientError, "Bad storage command: bad expiration time") } else if bytes, err = strconv.Atoui64(line[4]); err != nil { return Error(self.session, ClientError, "Bad storage command: bad byte-length") } else if line[0] == "cas" { if casuniq, err = strconv.Atoui64(line[5]); err != nil { return Error(self.session, ClientError, "Bad storage command: bad cas value") } } self.command = line[0] self.key = line[1] self.flags = uint32(flags) if exptime == 0 || exptime > secondsInMonth { self.exptime = uint32(exptime) } else { self.exptime = uint32(time.Seconds()) + uint32(exptime) } self.bytes = uint32(bytes) self.cas_unique = casuniq if line[len(line)-1] == "noreply" { self.noreply = true } return self.readData() }
// NewPool returns a new Pool that will create new connections on demand // using connectParams, up to a maximum of maxConns outstanding connections. // An error is returned if an initial connection cannot be created. // Connections that have been idle for idleTimeout seconds will be automatically // closed. func NewPool(connectParams string, maxConns, idleTimeout int) (p *Pool, err os.Error) { if maxConns < 1 { return nil, os.NewError("maxConns must be >= 1") } if idleTimeout < 5 { return nil, os.NewError("idleTimeout must be >= 5") } // Create initial connection to verify connectParams will work. c, err := Connect(connectParams) if err != nil { return } p = &Pool{ &pool{ params: connectParams, conns: list.New(), max: maxConns, n: 1, cond: sync.NewCond(new(sync.Mutex)), timeout: int64(idleTimeout), }, } p.conns.PushFront(poolConn{c, time.Seconds()}) go timeoutCloser(p.pool) runtime.SetFinalizer(p, (*Pool).close) return }
func (ctx *Context) GetSecureCookie(name string) (string, bool) { cookie, ok := ctx.Request.Cookies[name] if !ok { return "", false } parts := strings.Split(cookie, "|", 3) val := parts[0] timestamp := parts[1] sig := parts[2] if getCookieSig([]byte(val), timestamp) != sig { return "", false } ts, _ := strconv.Atoi64(timestamp) if time.Seconds()-31*86400 > ts { return "", false } buf := bytes.NewBufferString(val) encoder := base64.NewDecoder(base64.StdEncoding, buf) res, _ := ioutil.ReadAll(encoder) return string(res), true }
func main() { api = twitter.NewApi() done = make(chan bool) r = rand.New(rand.NewSource(time.Seconds())) crawl(kStart, 0) <-done }
// 勉強会の追加 func (group *Group) Add(c appengine.Context) os.Error { count := new(Counter) countKey := datastore.NewKey(c, "Counter", "mycounter", 0, nil) countErr := datastore.RunInTransaction(c, func(c appengine.Context) os.Error { err := datastore.Get(c, countKey, count) if err != nil && err != datastore.ErrNoSuchEntity { return err } count.GroupCount++ _, err = datastore.Put(c, countKey, count) return err }, nil) if countErr != nil { return countErr } group.Id = count.GroupCount group.CreateDate = datastore.SecondsToTime(time.Seconds()) key := datastore.NewKey(c, "Group", "", int64(group.Id), nil) _, err := datastore.Put(c, key, group) return err }
func (c *Comment) Save() (err os.Error) { newRecord := false if c.Id == 0 { newRecord = true } if newRecord { // New record we should get an Id for it id, err := client.Incr("global:nextCommentId") if err != nil { return err } c.Id = id c.CreatedAt = time.Seconds() } // Store it by the primary key client.Set(fmt.Sprintf("comment:id:%d", c.Id), []uint8(c.ToJson())) if err != nil { return err } if newRecord { // New record we should insert it into the page listing err := client.Lpush(fmt.Sprintf("comment:page_url:%s", c.PageUrl), bytes.NewBufferString(strconv.Itoa64(c.Id)).Bytes()) if err != nil { return err } } return nil }
func LoadWorldStore(worldPath string) (world *WorldStore, err os.Error) { levelData, err := loadLevelData(worldPath) if err != nil { return } // In both single-player and SMP maps, the 'spawn position' is stored in // the level data. x, xok := levelData.Lookup("Data/SpawnX").(*nbt.Int) y, yok := levelData.Lookup("Data/SpawnY").(*nbt.Int) z, zok := levelData.Lookup("Data/SpawnZ").(*nbt.Int) if !xok || !yok || !zok { err = os.NewError("Invalid map level data: does not contain Spawn{X,Y,Z}") log.Printf("%#v", levelData) return } spawnPosition := BlockXyz{ BlockCoord(x.Value), BlockYCoord(y.Value), BlockCoord(z.Value), } var timeTicks Ticks if timeTag, ok := levelData.Lookup("Data/Time").(*nbt.Long); ok { timeTicks = Ticks(timeTag.Value) } var chunkStores []chunkstore.IChunkStore persistantChunkStore, err := chunkstore.ChunkStoreForLevel(worldPath, levelData, DimensionNormal) if err != nil { return } chunkStores = append(chunkStores, chunkstore.NewChunkService(persistantChunkStore)) var seed int64 if seedNbt, ok := levelData.Lookup("Data/RandomSeed").(*nbt.Long); ok { seed = seedNbt.Value } else { seed = rand.NewSource(time.Seconds()).Int63() } chunkStores = append(chunkStores, chunkstore.NewChunkService(generation.NewTestGenerator(seed))) for _, store := range chunkStores { go store.Serve() } world = &WorldStore{ WorldPath: worldPath, Seed: seed, Time: timeTicks, LevelData: levelData, ChunkStore: chunkstore.NewChunkService(chunkstore.NewMultiStore(chunkStores)), SpawnPosition: spawnPosition, } go world.ChunkStore.Serve() return }
// scrape scrapes the given URL and saves it to disk func scrape(url string, done chan bool) { // Notify main when we're done right after we return defer func() { done <- true }() // Anonymous functions ftw! fmt.Printf("Scraping %s...\n", url) defer fmt.Printf("Finished scraping %s\n", url) // Don't make the user type "http://" for every freaking URL! if !strings.Contains(url, "://") { url = "http://" + url } // Download website contents req, err := http.Get(url) if somethingBroke(err) { return } // Save contents to variable contents, err := ioutil.ReadAll(req.Body) defer req.Body.Close() if somethingBroke(err) { return } // Write contents to disk. TODO: Store URL, text data in a DB url = strings.Replace(url, "/", "___", -1) filename := fmt.Sprintf("%v-%v", url, time.Seconds()) err = ioutil.WriteFile(SCRAPES_DIR+filename, contents, 0644) if somethingBroke(err) { return } return }
// Subscribe registers a new subscriber. It takes If-Modified-Since and Etag // arguments to determine the requested message. If a suitable message is // immediately available (or a conflict has occured), only the message will be // returned. If the interval polling mechanism is used, it will return // immediately but with zero'd return values. Otherwise a list.Element is // returned, whose value is a channel of *Message type, that might eventually // receive the desired message. func (c *channel) Subscribe(since int64, etag int) (*list.Element, *Message) { c.lock.Lock() defer c.lock.Unlock() c.stats.LastRequested = time.Seconds() switch c.config.ConcurrencyMode { case ConcurrencyModeLIFO: c.publish(conflictMessage, false) case ConcurrencyModeFILO: if c.stats.Subscribers > 0 { return nil, conflictMessage } } for _, m := range c.queue { if m.time >= since { if m.time == since && m.etag <= etag { continue } c.stats.Delivered++ return nil, m } } if c.config.PollingMechanism == PollingMechanismInterval { return nil, nil } ch := make(chan *Message, 0) elem := c.subscribers.PushBack((chan *Message)(ch)) c.stats.Subscribers++ return elem, nil }
// Close connections that have been idle for > p.timeout seconds. func timeoutCloser(p *pool) { for p != nil && !p.closed { p.cond.L.Lock() now := time.Seconds() delay := 1e9 * p.timeout for p.conns.Len() > 0 { front := p.conns.Front() pc := front.Value.(poolConn) atime := pc.atime if (now - atime) > p.timeout { pc.Conn.Close() p.conns.Remove(front) p.n-- p.log("idle connection closed") } else { // Wait until first connection would timeout if it isn't used. delay = 1e9 * (p.timeout - (now - atime) + 1) break } } p.cond.L.Unlock() time.Sleep(delay) } p.log("timeoutCloser finished") }
func (t RoundRobin) Run(array player.Array, match Match) []int { rand := rand.New(rand.NewSource(time.Seconds())) results := make([]int, array.Len()) wins := make([]int, array.Len()) // Every player plays every other once, sort players by number of wins for a := 0; a < array.Len(); a++ { for b := a + 1; b < array.Len(); b++ { winner, _, _ := match.Play(a, b, array, rand) wins[winner]++ } } for i := 0; i < len(results); i++ { best := 0 for j := range wins { if wins[j] > wins[best] { best = j } } results[i] = best wins[best] = -1 } return results }
// stocke une note en BD func (store *MysqlStore) SaveNote(db *mysql.Client, note *Note) error { if note.Id > 0 { // update // TODO utiliser comme clef l'id ET l'auteur, par sécurité (qu'on ne puisse pas effacer une note de quelqu'un d'autre) (pareil pour le delete) fmt.Println("update de note") } else { // insert sql := "insert into note (auteur, type_sujet, id_sujet, x_sujet, y_sujet, z_sujet, partage, date_changement, contenu, diplo)" sql += " values ( ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" stmt, err := db.Prepare(sql) if err != nil { return err } defer stmt.FreeResult() seconds := time.Seconds() err = stmt.BindParams(note.Auteur, note.TypeSujet, note.IdSujet, note.XSujet, note.YSujet, note.ZSujet, note.Partage, seconds, note.Contenu, note.Diplo) if err != nil { fmt.Printf("Erreur stockage (in) note : %s\n", err.Error()) // FIXME l'erreur ne semble pas retransmise ??? return err } err = stmt.Execute() if err != nil { return err } } return nil }
func (c *ChokeMgr) RequestPeers() []*PeerChoke { // Prepare peer array lastPiece := int64(0) // Request info //log.Println("ChokeMgr -> Receiving from channels") //c.inStats <- inStats stats := c.stats.GetStats() list := c.peerMgr.GetPeers() //log.Println("ChokeMgr -> Finished receiving") // Prepare peer array peers := make([]*PeerChoke, 0, 10) for addr, peer := range list { //log.Println("ChokeMgr -> Checking if completed") if peer.Connected() && !peer.Completed() { //log.Println("ChokeMgr -> Not completed, adding to list") p := new(PeerChoke) p.am_choking, p.am_interested, p.peer_choking, p.peer_interested, lastPiece = peer.Am_choking(), peer.Am_interested(), peer.Peer_choking(), peer.Peer_interested(), peer.LastPiece() now := time.Seconds() if ((now - lastPiece) > SNUBBED_PERIOD) && p.am_interested { p.snubbed = true } p.peer = peer if stat, ok := stats[addr]; ok { p.speed = stat.Speed } peers = append(peers, p) } //log.Println("ChokeMgr -> Finished processing peer") } //log.Println("ChokeMgr -> Returning list with len:", len(peers)) return peers }
func TestParse(t *testing.T) { // good sec := time.Seconds() login := "******" key := []byte("another secret key") c := New(login, sec, key) l, e, err := Parse(c, key) if err != nil { t.Errorf("error parsing valid cookie: %s", err) } if l != login { t.Errorf("login: expected %q, got %q", login, l) } if e != sec { t.Errorf("expiration: expected %d, got %d", sec, e) } // bad key = []byte("secret key") bad := []string{ "", "AAAAKvgQ2I_RGePVk9oAu55q-Valnf__Fx_hlTM-dLwYxXOf", "badcookie", "AAAAAKmhlbGxvIHdvcmxk9p6koQvSacAeliAm445i7errSk1NPkYJGYZhF93wG9U=", "zAAAKmhlbGxvIHdvcmxk9p6koQvSacAeliAm445i7errSk1NPkYJGYZhF93wG9U=", "AAAAAKmhlbGxvIHdvcmxk9p6kiQvSacAeliAm445i7errSk1NPkYJGYZhF93wG9U=", } for _, v := range bad { _, _, err := Parse(v, key) if err == nil { t.Errorf("bad cookie didn't return error: %q", v) } } }
func register(w http.ResponseWriter, r *http.Request) { c := appengine.NewContext(r) g := Member{ Usern: r.FormValue("usern"), Name: r.FormValue("name"), Passwd: r.FormValue("passwd"), Repasswd: r.FormValue("repasswd"), Phone: r.FormValue("phone"), Email: r.FormValue("email"), Study: r.FormValue("study"), Address: r.FormValue("address"), Date: datastore.SecondsToTime(time.Seconds()), } if g.Passwd == g.Repasswd && g.Usern != "" && g.Name != "" && g.Phone != "" && g.Email != "" { _, err := datastore.Put(c, datastore.NewIncompleteKey("Member"), &g) if err != nil { http.Error(w, err.String(), http.StatusInternalServerError) return } } else { http.Redirect(w, r, "/signin", http.StatusFound) } http.Redirect(w, r, "/view", http.StatusFound) }