func (js *JobServ) SetStateFromCapnp(r io.Reader, fn string) { capMsg, err := capn.ReadFromStream(r, nil) if err != nil { panic(fmt.Errorf("capnp problem reading from file '%s': %s", fn, err)) } z := schema.ReadRootZ(capMsg) d3 := z.Which() if d3 != schema.Z_GOQSERVER { panic(fmt.Sprintf("expected schema.Z_GOQSERVER, got %d", d3)) } zjs := z.Goqserver() js.NextJobId = zjs.Nextjobid() runqlist := zjs.Runq().ToArray() for _, zjob := range runqlist { j := CapnpZjobToJob(zjob) // don't kill jobs prematurely just because we the server // just started back up! j.Unansweredping = 0 j.Lastpingtm = time.Now().UnixNano() js.RunQ[j.Id] = j js.KnownJobHash[j.Id] = j js.RegisterWho(j) } waitlist := zjs.Waitingjobs().ToArray() for _, zjob := range waitlist { j := CapnpZjobToJob(zjob) js.WaitingJobs = append(js.WaitingJobs, j) js.KnownJobHash[j.Id] = j // getting too many open files errors, try doing // this on demand instead of all at once: comment it out. // js.RegisterWho(j) } js.FinishedJobsCount = zjs.Finishedjobscount() js.BadSgtCount = zjs.Badsgtcount() js.CancelledJobCount = zjs.Cancelledjobcount() js.BadNonceCount = zjs.Badnoncecount() finishedlist := zjs.Finishedjobs().ToArray() for _, zjob := range finishedlist { j := CapnpZjobToJob(zjob) js.FinishedRing = append(js.FinishedRing, j) } }
func CapnpToJob(buf *bytes.Buffer) *Job { capMsg, err := capn.ReadFromStream(buf, nil) if err != nil { panic(err) } z := schema.ReadRootZ(capMsg) d3 := z.Which() if d3 != schema.Z_JOB { panic(fmt.Sprintf("expected schema.Z_JOB, got %d", d3)) } zj := z.Job() job := CapnpZjobToJob(zj) return job }