Пример #1
1
// shouldRunContinuousQuery returns true if the CQ should be schedule to run. It will use the
// lastRunTime of the CQ and the rules for when to run set through the query to determine
// if this CQ should be run
func (cq *ContinuousQuery) shouldRunContinuousQuery(now time.Time) (bool, time.Time, error) {
	// if it's not aggregated we don't run it
	if cq.q.IsRawQuery {
		return false, cq.LastRun, errors.New("continuous queries must be aggregate queries")
	}

	// since it's aggregated we need to figure how often it should be run
	interval, err := cq.q.GroupByInterval()
	if err != nil {
		return false, cq.LastRun, err
	}

	// allow the interval to be overwritten by the query's resample options
	resampleEvery := interval
	if cq.Resample.Every != 0 {
		resampleEvery = cq.Resample.Every
	}

	// if we've passed the amount of time since the last run, or there was no last run, do it up
	if cq.HasRun {
		nextRun := cq.LastRun.Add(resampleEvery)
		if nextRun.UnixNano() <= now.UnixNano() {
			return true, nextRun, nil
		}
	} else {
		return true, now, nil
	}

	return false, cq.LastRun, nil
}
Пример #2
0
func setClock(t time.Time) error {
	tv := syscall.NsecToTimeval(t.UnixNano())
	if err := syscall.Settimeofday(&tv); err != nil {
		return errors.New("settimeofday: " + err.Error())
	}
	return nil
}
Пример #3
0
func (d *deadline) setTime(t time.Time) {
	if t.IsZero() {
		d.set(0)
	} else {
		d.set(t.UnixNano())
	}
}
Пример #4
0
// Route routes an update packet to the correct server.
func (r *Router) Route(cancelSignal <-chan bool, uaid, chid string, version int64, sentAt time.Time, logID string) (err error) {
	startTime := time.Now()
	locator := r.Locator()
	if locator == nil {
		if r.logger.ShouldLog(ERROR) {
			r.logger.Error("router", "No discovery service set; unable to route message",
				LogFields{"rid": logID, "uaid": uaid, "chid": chid})
		}
		r.metrics.Increment("router.broadcast.error")
		return ErrNoLocator
	}
	segment := capn.NewBuffer(nil)
	routable := NewRootRoutable(segment)
	routable.SetChannelID(chid)
	routable.SetVersion(version)
	routable.SetTime(sentAt.UnixNano())
	contacts, err := locator.Contacts(uaid)
	if err != nil {
		if r.logger.ShouldLog(CRITICAL) {
			r.logger.Critical("router", "Could not query discovery service for contacts",
				LogFields{"rid": logID, "error": err.Error()})
		}
		r.metrics.Increment("router.broadcast.error")
		return err
	}
	if r.logger.ShouldLog(DEBUG) {
		r.logger.Debug("router", "Fetched contact list from discovery service",
			LogFields{"rid": logID, "servers": strings.Join(contacts, ", ")})
	}
	if r.logger.ShouldLog(INFO) {
		r.logger.Info("router", "Sending push...", LogFields{
			"rid":     logID,
			"uaid":    uaid,
			"chid":    chid,
			"version": strconv.FormatInt(version, 10),
			"time":    strconv.FormatInt(sentAt.UnixNano(), 10)})
	}
	ok, err := r.notifyAll(cancelSignal, contacts, uaid, segment, logID)
	endTime := time.Now()
	if err != nil {
		if r.logger.ShouldLog(WARNING) {
			r.logger.Warn("router", "Could not post to server",
				LogFields{"rid": logID, "error": err.Error()})
		}
		r.metrics.Increment("router.broadcast.error")
		return err
	}
	var counterName, timerName string
	if ok {
		counterName = "router.broadcast.hit"
		timerName = "updates.routed.hits"
	} else {
		counterName = "router.broadcast.miss"
		timerName = "updates.routed.misses"
	}
	r.metrics.Increment(counterName)
	r.metrics.Timer(timerName, endTime.Sub(sentAt))
	r.metrics.Timer("router.handled", endTime.Sub(startTime))
	return nil
}
Пример #5
0
func (sl *scrapeLoop) report(start time.Time, duration time.Duration, err error) {
	sl.scraper.report(start, duration, err)

	ts := model.TimeFromUnixNano(start.UnixNano())

	var health model.SampleValue
	if err == nil {
		health = 1
	}

	healthSample := &model.Sample{
		Metric: model.Metric{
			model.MetricNameLabel: scrapeHealthMetricName,
		},
		Timestamp: ts,
		Value:     health,
	}
	durationSample := &model.Sample{
		Metric: model.Metric{
			model.MetricNameLabel: scrapeDurationMetricName,
		},
		Timestamp: ts,
		Value:     model.SampleValue(float64(duration) / float64(time.Second)),
	}

	sl.reportAppender.Append(healthSample)
	sl.reportAppender.Append(durationSample)
}
Пример #6
0
// advance cycles the buckets at each level until the latest bucket in
// each level can hold the time specified.
func (ts *timeSeries) advance(t time.Time) {
	if !t.After(ts.levels[0].end) {
		return
	}
	for i := 0; i < len(ts.levels); i++ {
		level := ts.levels[i]
		if !level.end.Before(t) {
			break
		}

		// If the time is sufficiently far, just clear the level and advance
		// directly.
		if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
			for _, b := range level.buckets {
				ts.resetObservation(b)
			}
			level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
		}

		for t.After(level.end) {
			level.end = level.end.Add(level.size)
			level.newest = level.oldest
			level.oldest = (level.oldest + 1) % ts.numBuckets
			ts.resetObservation(level.buckets[level.newest])
		}

		t = level.end
	}
}
Пример #7
0
// Backup will write a tar archive of any TSM files modified since the passed
// in time to the passed in writer. The basePath will be prepended to the names
// of the files in the archive. It will force a snapshot of the WAL first
// then perform the backup with a read lock against the file store. This means
// that new TSM files will not be able to be created in this shard while the
// backup is running. For shards that are still acively getting writes, this
// could cause the WAL to backup, increasing memory usage and evenutally rejecting writes.
func (e *Engine) Backup(w io.Writer, basePath string, since time.Time) error {
	if err := e.WriteSnapshot(); err != nil {
		return err
	}
	e.FileStore.mu.RLock()
	defer e.FileStore.mu.RUnlock()

	var files []FileStat

	// grab all the files and tombstones that have a modified time after since
	for _, f := range e.FileStore.files {
		if stat := f.Stats(); stat.LastModified > since.UnixNano() {
			files = append(files, f.Stats())
		}
		for _, t := range f.TombstoneFiles() {
			if t.LastModified > since.UnixNano() {
				files = append(files, f.Stats())
			}
		}
	}

	tw := tar.NewWriter(w)
	defer tw.Close()

	for _, f := range files {
		if err := e.writeFileToBackup(f, basePath, tw); err != nil {
			return err
		}
	}

	return nil
}
Пример #8
0
// Given a set of arguments for index, type, id, data create a set of bytes that is formatted for bulkd index
// http://www.elasticsearch.org/guide/reference/api/bulk.html
func WriteBulkBytes(op string, index string, _type string, id, parent, ttl string, date *time.Time, data interface{}) ([]byte, error) {
	// only index and update are currently supported
	if op != "index" && op != "update" && op != "create" {
		return nil, errors.New(fmt.Sprintf("Operation '%s' is not yet supported", op))
	}

	// First line
	buf := bytes.Buffer{}
	buf.WriteString(fmt.Sprintf(`{"%s":{"_index":"`, op))
	buf.WriteString(index)
	buf.WriteString(`","_type":"`)
	buf.WriteString(_type)
	buf.WriteString(`"`)
	if len(id) > 0 {
		buf.WriteString(`,"_id":"`)
		buf.WriteString(id)
		buf.WriteString(`"`)
	}

	if len(parent) > 0 {
		buf.WriteString(`,"_parent":"`)
		buf.WriteString(parent)
		buf.WriteString(`"`)
	}

	if op == "update" {
		buf.WriteString(`,"_retry_on_conflict":3`)
	}

	if len(ttl) > 0 {
		buf.WriteString(`,"ttl":"`)
		buf.WriteString(ttl)
		buf.WriteString(`"`)
	}
	if date != nil {
		buf.WriteString(`,"_timestamp":"`)
		buf.WriteString(strconv.FormatInt(date.UnixNano()/1e6, 10))
		buf.WriteString(`"`)
	}

	buf.WriteString(`}}`)
	buf.WriteRune('\n')
	//buf.WriteByte('\n')
	switch v := data.(type) {
	case *bytes.Buffer:
		io.Copy(&buf, v)
	case []byte:
		buf.Write(v)
	case string:
		buf.WriteString(v)
	default:
		body, jsonErr := json.Marshal(data)
		if jsonErr != nil {
			return nil, jsonErr
		}
		buf.Write(body)
	}
	buf.WriteRune('\n')
	return buf.Bytes(), nil
}
Пример #9
0
func (sl *scrapeLoop) report(start time.Time, duration time.Duration, err error) {
	sl.scraper.report(start, duration, err)

	ts := model.TimeFromUnixNano(start.UnixNano())

	var health model.SampleValue
	if err == nil {
		health = 1
	}

	healthSample := &model.Sample{
		Metric: model.Metric{
			model.MetricNameLabel: scrapeHealthMetricName,
		},
		Timestamp: ts,
		Value:     health,
	}
	durationSample := &model.Sample{
		Metric: model.Metric{
			model.MetricNameLabel: scrapeDurationMetricName,
		},
		Timestamp: ts,
		Value:     model.SampleValue(float64(duration) / float64(time.Second)),
	}

	if err := sl.reportAppender.Append(healthSample); err != nil {
		log.With("sample", healthSample).With("error", err).Warn("Scrape health sample discarded")
	}
	if err := sl.reportAppender.Append(durationSample); err != nil {
		log.With("sample", durationSample).With("error", err).Warn("Scrape duration sample discarded")
	}
}
Пример #10
0
func (ss *SimpleRate) Touch(key string, nowTs time.Time) {
	var (
		found  bool
		bucket *srateBucket
		now    = nowTs.UnixNano()
	)
	bucket, found = ss.hash[key]
	if found {
		// we already have the correct bucket
	} else if len(ss.heap) < ss.size {
		// create new bucket
		bucket = &srateBucket{}
		ss.hash[key] = bucket
		bucket.key = key
		heap.Push(&ss.heap, bucket)
	} else {
		// use minimum bucket
		bucket = ss.heap[0]
		delete(ss.hash, bucket.key)
		ss.hash[key] = bucket
		bucket.error, bucket.errorTs, bucket.errorRate =
			bucket.count, bucket.countTs, bucket.countRate
		bucket.key = key
	}

	bucket.count += 1
	bucket.countRate = ss.count(bucket.countRate, bucket.countTs, now)
	bucket.countTs = now

	heap.Fix(&ss.heap, bucket.index)
}
Пример #11
0
func passDetails(
	binary_path string,
	begin_time time.Time,
	duration time.Duration,
	latitude_degrees,
	longitude_degrees,
	elevation_metres float64,
	tle string,
	resolution_seconds float64) ([]SatPoint, error) {

	begin_timestamp := 1e-9 * float64(begin_time.UnixNano())

	c := exec.Command("python", binary_path)
	in_pipe, err := c.StdinPipe()
	if err != nil {
		return nil, err
	}
	defer in_pipe.Close()
	out_pipe, err := c.StdoutPipe()
	if err != nil {
		return nil, err
	}
	defer out_pipe.Close()
	if err := c.Start(); err != nil {
		return nil, err
	}
	defer c.Wait()

	fmt.Fprintf(in_pipe, "%f\n%f\n%f\n%f\n%f\n%s\n%f\n",
		begin_timestamp,
		duration.Seconds(),
		latitude_degrees,
		longitude_degrees,
		elevation_metres,
		tle,
		resolution_seconds)
	in_pipe.Close()

	var n int
	_, err = fmt.Fscanln(out_pipe, &n)
	if err != nil {
		return nil, err
	}

	r := make([]SatPoint, n)
	for i := 0; i < n; i++ {
		p := &r[i]
		fmt.Fscanln(out_pipe,
			&p.Timestamp,
			&p.AzimuthDegrees,
			&p.AltitudeDegrees,
			&p.Range,
			&p.RangeVelocity,
			&p.LatitudeDegrees,
			&p.LongitudeDegrees,
			&p.Elevation,
			&p.IsEclipsed)
	}
	return r, nil
}
Пример #12
0
func (h *MatrixLoginHandler) makeLoginResponse(userID string, expires time.Time, nonce string) (*matrixLoginResponse, error) {

	var response matrixLoginResponse

	accessToken, err := macaroon.New([]byte(h.macaroonSecret), "key", h.serverName)
	if err != nil {
		return nil, err
	}
	accessToken.AddFirstPartyCaveat("gen = 1")
	accessToken.AddFirstPartyCaveat(fmt.Sprintf("user_id = %s", userID))
	refreshToken := accessToken.Clone()

	accessToken.AddFirstPartyCaveat("type = access")
	accessToken.AddFirstPartyCaveat(fmt.Sprintf("time < %d", expires.UnixNano()/1000000))

	refreshToken.AddFirstPartyCaveat("type = refresh")
	refreshToken.AddFirstPartyCaveat(fmt.Sprintf("nonce = %s", nonce))

	if response.AccessToken, err = encodeMacaroon(accessToken); err != nil {
		return nil, err
	}

	if response.RefreshToken, err = encodeMacaroon(refreshToken); err != nil {
		return nil, err
	}

	response.HomeServer = h.serverName
	response.UserID = userID

	return &response, nil
}
Пример #13
0
// TimeToTimestamp is a utility function that converts a time.Time into
// a *google_protobuf.Timestamp.
func TimeToTimestamp(t time.Time) *google_protobuf.Timestamp {
	unixNano := t.UnixNano()
	return &google_protobuf.Timestamp{
		Seconds: unixNano / int64(time.Second),
		Nanos:   int32(unixNano % int64(time.Second)),
	}
}
Пример #14
0
func (d *Datum) stamp(timestamp time.Time) {
	if timestamp.IsZero() {
		atomic.StoreInt64(&d.Time, time.Now().UTC().UnixNano())
	} else {
		atomic.StoreInt64(&d.Time, timestamp.UnixNano())
	}
}
Пример #15
0
// pnTime converts Go time.Time to Proton millisecond Unix time.
func pnTime(t time.Time) C.pn_timestamp_t {
	secs := t.Unix()
	// Note: sub-second accuracy is not guaraunteed if the Unix time in
	// nanoseconds cannot be represented by an int64 (sometime around year 2260)
	msecs := (t.UnixNano() % int64(time.Second)) / int64(time.Millisecond)
	return C.pn_timestamp_t(secs*1000 + msecs)
}
Пример #16
0
func (actual ActualLRP) ShouldRestartCrash(now time.Time, calc RestartCalculator) bool {
	if actual.State != ActualLRPStateCrashed {
		return false
	}

	return calc.ShouldRestart(now.UnixNano(), actual.Since, actual.CrashCount)
}
Пример #17
0
// findTraceTimes finds the minimum and maximum timespan event times for the
// given set of events, or returns ok == false if there are no such events.
func findTraceTimes(events []Event) (start, end time.Time, ok bool) {
	// Find the start and end time of the trace.
	var (
		eStart, eEnd time.Time
		haveTimes    = false
	)
	for _, e := range events {
		e, ok := e.(TimespanEvent)
		if !ok {
			continue
		}
		if !haveTimes {
			haveTimes = true
			eStart = e.Start()
			eEnd = e.End()
			continue
		}
		if v := e.Start(); v.UnixNano() < eStart.UnixNano() {
			eStart = v
		}
		if v := e.End(); v.UnixNano() > eEnd.UnixNano() {
			eEnd = v
		}
	}
	if !haveTimes {
		// We didn't find any timespan events at all, so we're done here.
		ok = false
		return
	}
	return eStart, eEnd, true
}
Пример #18
0
// Writing to a Socket channel is slightly more involved than reading from
// it. If the channel was closed, it must be reestablished.
func (t *SocketChannel) Write(p []byte) (n int, err error) {
	var deadline time.Time
	if t.timeout > 0 {
		deadline = time.Now().Add(t.timeout)
	}
	for {
		if deadline.UnixNano() > 0 {
			t.conn.SetWriteDeadline(deadline)
		}
		n, err = t.conn.Write(p)
		if err == nil {
			return
		} else {
			if err == io.ErrClosedPipe {
				t.conn.Close()
				// Don't try to wait beyond the deadline, that's pointless.
				if time.Now().After(deadline) {
					return
				}
				if t.timeout > 0 {
					t.conn, err = net.DialTimeout(t.proto, t.peerAddr, t.timeout)
				} else {
					t.conn, err = net.Dial(t.proto, t.peerAddr)
				}
				if err != nil {
					return
				}
			} else {
				return
			}
		}
	}
}
Пример #19
0
func recordScrapeHealth(
	sampleAppender storage.SampleAppender,
	timestamp time.Time,
	baseLabels model.LabelSet,
	health TargetHealth,
	scrapeDuration time.Duration,
) {
	healthMetric := make(model.Metric, len(baseLabels)+1)
	durationMetric := make(model.Metric, len(baseLabels)+1)

	healthMetric[model.MetricNameLabel] = scrapeHealthMetricName
	durationMetric[model.MetricNameLabel] = scrapeDurationMetricName

	for ln, lv := range baseLabels {
		healthMetric[ln] = lv
		durationMetric[ln] = lv
	}

	ts := model.TimeFromUnixNano(timestamp.UnixNano())

	healthSample := &model.Sample{
		Metric:    healthMetric,
		Timestamp: ts,
		Value:     health.value(),
	}
	durationSample := &model.Sample{
		Metric:    durationMetric,
		Timestamp: ts,
		Value:     model.SampleValue(float64(scrapeDuration) / float64(time.Second)),
	}

	sampleAppender.Append(healthSample)
	sampleAppender.Append(durationSample)
}
Пример #20
0
// Moment returns a pointer to the current moment structure corresponding to time t.
func (x *SlidingMoment) Slot(t time.Time) *Moment {
	slot := t.UnixNano() / x.slotdur
	if !x.spin(slot) {
		return nil
	}
	return &x.slots[int(slot%int64(len(x.slots)))]
}
Пример #21
0
// loadBufferedEvents iterates over the cached events in the buffer
// and returns those that were emitted between two specific dates.
// It uses `time.Unix(seconds, nanoseconds)` to generate valid dates with those arguments.
// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages.
func (e *Events) loadBufferedEvents(since, until time.Time, topic func(interface{}) bool) []eventtypes.Message {
	var buffered []eventtypes.Message
	if since.IsZero() && until.IsZero() {
		return buffered
	}

	var sinceNanoUnix int64
	if !since.IsZero() {
		sinceNanoUnix = since.UnixNano()
	}

	var untilNanoUnix int64
	if !until.IsZero() {
		untilNanoUnix = until.UnixNano()
	}

	for i := len(e.events) - 1; i >= 0; i-- {
		ev := e.events[i]

		if ev.TimeNano < sinceNanoUnix {
			break
		}

		if untilNanoUnix > 0 && ev.TimeNano > untilNanoUnix {
			continue
		}

		if topic == nil || topic(ev) {
			buffered = append([]eventtypes.Message{ev}, buffered...)
		}
	}
	return buffered
}
Пример #22
0
func (t *Target) report(app storage.SampleAppender, start time.Time, duration time.Duration, err error) {
	t.status.setLastScrape(start)
	t.status.setLastError(err)

	ts := model.TimeFromUnixNano(start.UnixNano())

	var health model.SampleValue
	if err == nil {
		health = 1
	}

	healthSample := &model.Sample{
		Metric: model.Metric{
			model.MetricNameLabel: scrapeHealthMetricName,
		},
		Timestamp: ts,
		Value:     health,
	}
	durationSample := &model.Sample{
		Metric: model.Metric{
			model.MetricNameLabel: scrapeDurationMetricName,
		},
		Timestamp: ts,
		Value:     model.SampleValue(float64(duration) / float64(time.Second)),
	}

	app = t.wrapReportingAppender(app)

	app.Append(healthSample)
	app.Append(durationSample)
}
Пример #23
0
// evictBefore evicts traces that were created before t. The rs.mu lock
// must be held while calling evictBefore.
func (rs *RecentStore) evictBefore(t time.Time) {
	evictStart := time.Now()
	rs.lastEvicted = evictStart
	tnano := t.UnixNano()
	var toEvict []ID
	for id, ct := range rs.created {
		if ct < tnano {
			toEvict = append(toEvict, id)
			delete(rs.created, id)
		}
	}
	if len(toEvict) == 0 {
		return
	}

	if rs.Debug {
		log.Printf("RecentStore: deleting %d traces created before %s (age check took %s)", len(toEvict), t, time.Since(evictStart))
	}

	// Spawn separate goroutine so we don't hold the rs.mu lock.
	go func() {
		deleteStart := time.Now()
		if err := rs.DeleteStore.Delete(toEvict...); err != nil {
			log.Printf("RecentStore: failed to delete traces: %s", err)
		}
		if rs.Debug {
			log.Printf("RecentStore: finished deleting %d traces created before %s (took %s)", len(toEvict), t, time.Since(deleteStart))
		}
	}()
}
Пример #24
0
func (s *scheduler) schedule(e event) (err error) {
	var on time.Time
	var duration time.Duration
	now := time.Now()
	zone, _ := now.Zone()
	if on, err = time.Parse("2006-01-02 "+time.Kitchen+" MST", now.Format("2006-01-02 ")+e.When+" "+zone); err != nil {
		log.Println("could not parse when of '" + e.When + "' for " + e.What)
		return
	}
	if duration, err = time.ParseDuration(e.Interval); err != nil {
		log.Println("could not parse interval of '" + e.Interval + "' for " + e.What)
		return
	}

	go func() {
		log.Println("scheduled '" + e.What + "' for: " + on.String())
		wait := time.Duration((on.UnixNano() - time.Now().UnixNano()) % int64(duration))
		if wait < 0 {
			wait += duration
		}
		time.Sleep(wait)
		s.maybeRun(time.Now(), e)
		for t := range time.NewTicker(duration).C {
			s.maybeRun(t, e)
		}
	}()
	return
}
Пример #25
0
// ===== CFDate =====
func convertTimeToCFDate(t time.Time) C.CFDateRef {
	// truncate to milliseconds, to get a more predictable conversion
	ms := int64(time.Duration(t.UnixNano()) / time.Millisecond * time.Millisecond)
	nano := C.double(ms) / C.double(time.Second)
	nano -= C.kCFAbsoluteTimeIntervalSince1970
	return C.CFDateCreate(nil, C.CFAbsoluteTime(nano))
}
Пример #26
0
func buildExpectedLog(timestamp time.Time, requestId, method, path, sourceHost, sourcePort, dstHost, dstPort string) string {
	extensions := []string{
		fmt.Sprintf("rt=%d", timestamp.UnixNano()/int64(time.Millisecond)),
		"cs1Label=userAuthenticationMechanism",
		"cs1=oauth-access-token",
		"cs2Label=vcapRequestId",
		"cs2=" + requestId,
		"request=" + path,
		"requestMethod=" + method,
		"src=" + sourceHost,
		"spt=" + sourcePort,
		"dst=" + dstHost,
		"dpt=" + dstPort,
	}
	fields := []string{
		"0",
		"cloud_foundry",
		"loggregator_trafficcontroller",
		"1.0",
		method + " " + path,
		method + " " + path,
		"0",
		strings.Join(extensions, " "),
	}
	return "CEF:" + strings.Join(fields, "|")
}
Пример #27
0
func dateParse(date string) (epoch float64) {
	// YYYY-MM-DDTHH:mm:ss.sssZ
	var time Time.Time
	var err error
	{
		date := date
		if match := matchDateTimeZone.FindStringSubmatch(date); match != nil {
			if match[2] == "Z" {
				date = match[1] + "+0000"
			} else {
				date = match[1] + match[3] + match[4]
			}
		}
		for _, layout := range dateLayoutList {
			time, err = Time.Parse(layout, date)
			if err == nil {
				break
			}
		}
	}
	if err != nil {
		return math.NaN()
	}
	return float64(time.UnixNano()) / (1000 * 1000) // UnixMilli()
}
Пример #28
0
// Init init the configuration file.
func InitConfig() (err error) {
	var twepoch time.Time
	MyConf = &Config{
		PidFile:      "/tmp/gosnowflake.pid",
		Dir:          "/dev/null",
		Log:          "./log/xml",
		MaxProc:      runtime.NumCPU(),
		RPCBind:      []string{"localhost:8080"},
		ThriftBind:   []string{"localhost:8081"},
		DatacenterId: 0,
		WorkerId:     []int64{0},
		Start:        "2010-11-04 09:42:54",
		ZKAddr:       []string{"localhost:2181"},
		ZKTimeout:    time.Second * 15,
		ZKPath:       "/gosnowflake-servers",
	}
	if err = goConf.Parse(confPath); err != nil {
		return
	}
	if err = goConf.Unmarshal(MyConf); err != nil {
		return
	}
	if twepoch, err = time.Parse("2006-01-02 15:04:05", MyConf.Start); err != nil {
		return
	} else {
		MyConf.Twepoch = twepoch.UnixNano() / int64(time.Millisecond)
	}
	return
}
Пример #29
0
func main() {
	var t1 time.Time

	t := time.Now()

	log.Println(t.Unix(), t.UTC().Unix(), t.Local().Unix())

	log.Println(t.Unix())
	log.Println(t.UnixNano())
	log.Println(time.Unix(0, t.UnixNano()).UTC())
	log.Println(time.Unix(t.Unix(), 0).UTC())
	log.Println(t.UTC())
	log.Println(t.UTC().Local())
	log.Println(t)
	log.Println(t.Location())
	log.Println(t.UTC().Location())
	log.Println(t.Format("2006-01-02 15:04:05"))

	log.Println(t1)
	log.Println(t1.Format("2006-01-02 15:04:05"))
	log.Println("default uinx nano ", t1.UnixNano())

	log.Println(t.Format("15小时20分钟"))

	td := time.Duration(9000000000000000000)

	log.Println(td.Hours(), td.Minutes(), td.String())

	log.Println(TranDurationToTime(td))

	log.Println(t.UTC().UnixNano())

	log.Println(time.Now().Date())
}
Пример #30
0
func (j *Jchan) WriteTime(t time.Time) error {
	i := t.UnixNano() / 1000000
	if err := binary.Write(j.w, binary.BigEndian, i); err != nil {
		return err
	}
	return nil
}