func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { expectedDigest := digest.FromBytes(expected) if digest.Digest(layer.DiffID()) != expectedDigest { t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) } ts, err := layer.TarStream() if err != nil { t.Fatal(err) } defer ts.Close() actual, err := ioutil.ReadAll(ts) if err != nil { t.Fatal(err) } if len(actual) != len(expected) { logByteDiff(t, actual, expected) t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) } actualDigest := digest.FromBytes(actual) if actualDigest != expectedDigest { logByteDiff(t, actual, expected) t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) } }
func setupSuccessfulV2Fetch(server *ghttp.Server, layer1Cached bool) { layer1Data := "banana-1-flan" layer1Dgst, _ := digest.FromBytes([]byte(layer1Data)) layer2Data := "banana-2-flan" layer2Dgst, _ := digest.FromBytes([]byte(layer2Data)) server.AppendHandlers( ghttp.CombineHandlers( ghttp.VerifyRequest("GET", "/v2/some-repo/manifests/some-tag"), http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Write([]byte(fmt.Sprintf(` { "name":"some-repo", "tag":"some-tag", "fsLayers":[ { "blobSum":"%s" }, { "blobSum":"%s" } ], "history":[ { "v1Compatibility": "{\"id\":\"banana-pie-2\", \"parent\":\"banana-pie-1\"}" }, { "v1Compatibility": "{\"id\":\"banana-pie-1\"}" } ] } `, layer2Dgst.String(), layer1Dgst.String()))) }), ), ) if !layer1Cached { server.AppendHandlers( ghttp.CombineHandlers( ghttp.VerifyRequest("GET", fmt.Sprintf("/v2/some-repo/blobs/%s", layer1Dgst)), http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Write([]byte(layer1Data)) }), ), ) } server.AppendHandlers( ghttp.CombineHandlers( ghttp.VerifyRequest("GET", fmt.Sprintf("/v2/some-repo/blobs/%s", layer2Dgst)), http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { w.Write([]byte(layer2Data)) }), ), ) }
// loadManifest loads a manifest from a byte array and verifies its content, // returning the local digest, the manifest itself, whether or not it was // verified. If ref is a digest, rather than a tag, this will be treated as // the local digest. An error will be returned if the signature verification // fails, local digest verification fails and, if provided, the remote digest // verification fails. The boolean return will only be false without error on // the failure of signatures trust check. func (s *TagStore) loadManifest(manifestBytes []byte, ref string, remoteDigest digest.Digest) (digest.Digest, *registry.ManifestData, bool, error) { payload, keys, err := unpackSignedManifest(manifestBytes) if err != nil { return "", nil, false, fmt.Errorf("error unpacking manifest: %v", err) } // TODO(stevvooe): It would be a lot better here to build up a stack of // verifiers, then push the bytes one time for signatures and digests, but // the manifests are typically small, so this optimization is not worth // hacking this code without further refactoring. var localDigest digest.Digest // Verify the local digest, if present in ref. ParseDigest will validate // that the ref is a digest and verify against that if present. Otherwize // (on error), we simply compute the localDigest and proceed. if dgst, err := digest.ParseDigest(ref); err == nil { // verify the manifest against local ref if err := verifyDigest(dgst, payload); err != nil { return "", nil, false, fmt.Errorf("verifying local digest: %v", err) } localDigest = dgst } else { // We don't have a local digest, since we are working from a tag. // Compute the digest of the payload and return that. logrus.Debugf("provided manifest reference %q is not a digest: %v", ref, err) localDigest, err = digest.FromBytes(payload) if err != nil { // near impossible logrus.Errorf("error calculating local digest during tag pull: %v", err) return "", nil, false, err } } // verify against the remote digest, if available if remoteDigest != "" { if err := verifyDigest(remoteDigest, payload); err != nil { return "", nil, false, fmt.Errorf("verifying remote digest: %v", err) } } var manifest registry.ManifestData if err := json.Unmarshal(payload, &manifest); err != nil { return "", nil, false, fmt.Errorf("error unmarshalling manifest: %s", err) } // validate the contents of the manifest if err := validateManifest(&manifest); err != nil { return "", nil, false, err } var verified bool verified, err = s.verifyTrustedKeys(manifest.Name, keys) if err != nil { return "", nil, false, fmt.Errorf("error verifying trusted keys: %v", err) } return localDigest, &manifest, verified, nil }
// Upload is called to perform the upload. func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) { if u.currentUploads != nil { defer atomic.AddInt32(u.currentUploads, -1) if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { return "", errors.New("concurrency limit exceeded") } } // Sleep a bit to simulate a time-consuming upload. for i := int64(0); i <= 10; i++ { select { case <-ctx.Done(): return "", ctx.Err() case <-time.After(10 * time.Millisecond): progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) } } if u.simulateRetries != 0 { u.simulateRetries-- return "", errors.New("simulating retry") } // For the mock implementation, use SHA256(DiffID) as the returned // digest. return digest.FromBytes([]byte(u.diffID.String())) }
func init() { schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { sm := new(SignedManifest) err := sm.UnmarshalJSON(b) if err != nil { return nil, distribution.Descriptor{}, err } desc := distribution.Descriptor{ Digest: digest.FromBytes(sm.Canonical), Size: int64(len(sm.Canonical)), MediaType: MediaTypeManifest, } return sm, desc, err } err := distribution.RegisterManifestSchema(MediaTypeManifest, schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } err = distribution.RegisterManifestSchema("", schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } err = distribution.RegisterManifestSchema("application/json", schema1Func) if err != nil { panic(fmt.Sprintf("Unable to register manifest: %s", err)) } }
func TestFSInvalidSet(t *testing.T) { tmpdir, err := ioutil.TempDir("", "images-fs-store") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpdir) fs, err := NewFSStoreBackend(tmpdir) if err != nil { t.Fatal(err) } id, err := digest.FromBytes([]byte("foobar")) if err != nil { t.Fatal(err) } err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700) if err != nil { t.Fatal(err) } _, err = fs.Set([]byte("foobar")) if err == nil { t.Fatal("Expecting error from invalid filesystem data.") } }
func schema2ToImage(manifest *schema2.DeserializedManifest, imageConfig []byte, d digest.Digest) (*api.Image, error) { mediatype, payload, err := manifest.Payload() if err != nil { return nil, err } dockerImage, err := unmarshalDockerImage(imageConfig) if err != nil { return nil, err } if len(d) > 0 { dockerImage.ID = d.String() } else { dockerImage.ID = digest.FromBytes(payload).String() } image := &api.Image{ ObjectMeta: kapi.ObjectMeta{ Name: dockerImage.ID, }, DockerImageMetadata: *dockerImage, DockerImageManifest: string(payload), DockerImageConfig: string(imageConfig), DockerImageManifestMediaType: mediatype, DockerImageMetadataVersion: "1.0", } return image, nil }
func schema1ToImage(manifest *schema1.SignedManifest, d digest.Digest) (*api.Image, error) { if len(manifest.History) == 0 { return nil, fmt.Errorf("image has no v1Compatibility history and cannot be used") } dockerImage, err := unmarshalDockerImage([]byte(manifest.History[0].V1Compatibility)) if err != nil { return nil, err } mediatype, payload, err := manifest.Payload() if err != nil { return nil, err } if len(d) > 0 { dockerImage.ID = d.String() } else { dockerImage.ID = digest.FromBytes(manifest.Canonical).String() } image := &api.Image{ ObjectMeta: kapi.ObjectMeta{ Name: dockerImage.ID, }, DockerImageMetadata: *dockerImage, DockerImageManifest: string(payload), DockerImageManifestMediaType: mediatype, DockerImageMetadataVersion: "1.0", } return image, nil }
func TestEmptyTar(t *testing.T) { // Confirm that gzippedEmptyTar expands to 1024 NULL bytes. var decompressed [2048]byte gzipReader, err := gzip.NewReader(bytes.NewReader(gzippedEmptyTar)) if err != nil { t.Fatalf("NewReader returned error: %v", err) } n, err := gzipReader.Read(decompressed[:]) if n != 1024 { t.Fatalf("read returned %d bytes; expected 1024", n) } n, err = gzipReader.Read(decompressed[1024:]) if n != 0 { t.Fatalf("read returned %d bytes; expected 0", n) } if err != io.EOF { t.Fatal("read did not return io.EOF") } gzipReader.Close() for _, b := range decompressed[:1024] { if b != 0 { t.Fatal("nonzero byte in decompressed tar") } } // Confirm that digestSHA256EmptyTar is the digest of gzippedEmptyTar. dgst := digest.FromBytes(gzippedEmptyTar) if dgst != digestSHA256GzippedEmptyTar { t.Fatalf("digest mismatch for empty tar: expected %s got %s", digestSHA256GzippedEmptyTar, dgst) } }
func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) { var ( parent layer.Layer err error ) if parentID != "" { parent, err = ls.Get(parentID) if err != nil { return nil, err } } l := &mockLayer{parent: parent} _, err = l.layerData.ReadFrom(reader) if err != nil { return nil, err } diffID, err := digest.FromBytes(l.layerData.Bytes()) if err != nil { return nil, err } l.diffID = layer.DiffID(diffID) l.chainID = createChainIDFromParent(parentID, l.diffID) ls.layers[l.chainID] = l return l, nil }
func (h *manifestSchema2Handler) Digest() (digest.Digest, error) { _, p, err := h.manifest.Payload() if err != nil { return "", err } return digest.FromBytes(p), nil }
// Build produces a final manifest from the given references. func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { m := Manifest{ Versioned: SchemaVersion, Layers: make([]distribution.Descriptor, len(mb.layers)), } copy(m.Layers, mb.layers) configDigest := digest.FromBytes(mb.configJSON) var err error m.Config, err = mb.bs.Stat(ctx, configDigest) switch err { case nil: return FromStruct(m) case distribution.ErrBlobUnknown: // nop default: return nil, err } // Add config to the blob store m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) if err != nil { return nil, err } return FromStruct(m) }
// GetRemoteCA returns the remote endpoint's CA certificate func GetRemoteCA(ctx context.Context, d digest.Digest, picker *picker.Picker) (RootCA, error) { // We need a valid picker to be able to Dial to a remote CA if picker == nil { return RootCA{}, fmt.Errorf("valid remote address picker required") } // This TLS Config is intentionally using InsecureSkipVerify. Either we're // doing TOFU, in which case we don't validate the remote CA, or we're using // a user supplied hash to check the integrity of the CA certificate. insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) opts := []grpc.DialOption{ grpc.WithTransportCredentials(insecureCreds), grpc.WithBackoffMaxDelay(10 * time.Second), grpc.WithPicker(picker)} firstAddr, err := picker.PickAddr() if err != nil { return RootCA{}, err } conn, err := grpc.Dial(firstAddr, opts...) if err != nil { return RootCA{}, err } defer conn.Close() client := api.NewCAClient(conn) response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{}) if err != nil { return RootCA{}, err } if d != "" { verifier, err := digest.NewDigestVerifier(d) if err != nil { return RootCA{}, fmt.Errorf("unexpected error getting digest verifier: %v", err) } io.Copy(verifier, bytes.NewReader(response.Certificate)) if !verifier.Verified() { return RootCA{}, fmt.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex()) } } // Check the validity of the remote Cert _, err = helpers.ParseCertificatePEM(response.Certificate) if err != nil { return RootCA{}, err } // Create a Pool with our RootCACertificate pool := x509.NewCertPool() if !pool.AppendCertsFromPEM(response.Certificate) { return RootCA{}, fmt.Errorf("failed to append certificate to cert pool") } return RootCA{Cert: response.Certificate, Digest: digest.FromBytes(response.Certificate), Pool: pool}, nil }
func (imh *imageManifestHandler) convertSchema2Manifest(schema2Manifest *schema2.DeserializedManifest) (distribution.Manifest, error) { targetDescriptor := schema2Manifest.Target() blobs := imh.Repository.Blobs(imh) configJSON, err := blobs.Get(imh, targetDescriptor.Digest) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err } ref := imh.Repository.Named() if imh.Tag != "" { ref, err = reference.WithTag(ref, imh.Tag) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeTagInvalid.WithDetail(err)) return nil, err } } builder := schema1.NewConfigManifestBuilder(imh.Repository.Blobs(imh), imh.Context.App.trustKey, ref, configJSON) for _, d := range schema2Manifest.References() { if err := builder.AppendReference(d); err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err } } manifest, err := builder.Build(imh) if err != nil { imh.Errors = append(imh.Errors, v2.ErrorCodeManifestInvalid.WithDetail(err)) return nil, err } imh.Digest = digest.FromBytes(manifest.(*schema1.SignedManifest).Canonical) return manifest, nil }
func addTestManifestWithEtag(repo, reference string, content []byte, m *testutil.RequestResponseMap, dgst string) { actualDigest, _ := digest.FromBytes(content) getReqWithEtag := testutil.Request{ Method: "GET", Route: "/v2/" + repo + "/manifests/" + reference, Headers: http.Header(map[string][]string{ "If-None-Match": {fmt.Sprintf(`"%s"`, dgst)}, }), } var getRespWithEtag testutil.Response if actualDigest.String() == dgst { getRespWithEtag = testutil.Response{ StatusCode: http.StatusNotModified, Body: []byte{}, Headers: http.Header(map[string][]string{ "Content-Length": {"0"}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), } } else { getRespWithEtag = testutil.Response{ StatusCode: http.StatusOK, Body: content, Headers: http.Header(map[string][]string{ "Content-Length": {fmt.Sprint(len(content))}, "Last-Modified": {time.Now().Add(-1 * time.Second).Format(time.ANSIC)}, }), } } *m = append(*m, testutil.RequestResponseMapping{Request: getReqWithEtag, Response: getRespWithEtag}) }
// schema2ManifestDigest computes the manifest digest, and, if pulling by // digest, ensures that it matches the requested digest. func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { _, canonical, err := mfst.Payload() if err != nil { return "", err } // If pull by digest, then verify the manifest digest. if digested, isDigested := ref.(reference.Canonical); isDigested { verifier, err := digest.NewDigestVerifier(digested.Digest()) if err != nil { return "", err } if _, err := verifier.Write(canonical); err != nil { return "", err } if !verifier.Verified() { err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) logrus.Error(err) return "", err } return digested.Digest(), nil } return digest.FromBytes(canonical), nil }
// CreateID creates an ID from v1 image, layerID and parent ID. // Used for backwards compatibility with old clients. func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { v1Image.ID = "" v1JSON, err := json.Marshal(v1Image) if err != nil { return "", err } var config map[string]*json.RawMessage if err := json.Unmarshal(v1JSON, &config); err != nil { return "", err } // FIXME: note that this is slightly incompatible with RootFS logic config["layer_id"] = rawJSON(layerID) if parent != "" { config["parent"] = rawJSON(parent) } configJSON, err := json.Marshal(config) if err != nil { return "", err } logrus.Debugf("CreateV1ID %s", configJSON) return digest.FromBytes(configJSON), nil }
// digestManifest is a modified version of: // https://github.com/docker/distribution/blob/6ba799b/registry/handlers/images.go#L228-L251 func digestManifest(manifest *manifest.SignedManifest) (digest.Digest, error) { p, err := manifest.Payload() if err != nil { return "", err } return digest.FromBytes(p) }
// Put stores the content p in the blob store, calculating the digest. If the // content is already present, only the digest will be returned. This should // only be used for small objects, such as manifests. This implemented as a convenience for other Put implementations func (bs *blobStore) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { dgst, err := digest.FromBytes(p) if err != nil { context.GetLogger(ctx).Errorf("blobStore: error digesting content: %v, %s", err, string(p)) return distribution.Descriptor{}, err } desc, err := bs.statter.Stat(ctx, dgst) if err == nil { // content already present return desc, nil } else if err != distribution.ErrBlobUnknown { context.GetLogger(ctx).Errorf("blobStore: error stating content (%v): %#v", dgst, err) // real error, return it return distribution.Descriptor{}, err } bp, err := bs.path(dgst) if err != nil { return distribution.Descriptor{}, err } // TODO(stevvooe): Write out mediatype here, as well. return distribution.Descriptor{ Size: int64(len(p)), // NOTE(stevvooe): The central blob store firewalls media types from // other users. The caller should look this up and override the value // for the specific repository. MediaType: "application/octet-stream", Digest: dgst, }, bs.driver.PutContent(ctx, bp, p) }
// Build produces a final manifest from the given references. func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { m := Manifest{ Versioned: SchemaVersion, Layers: make([]distribution.Descriptor, len(mb.layers)), } copy(m.Layers, mb.layers) configDigest := digest.FromBytes(mb.configJSON) var err error m.Config, err = mb.bs.Stat(ctx, configDigest) switch err { case nil: // Override MediaType, since Put always replaces the specified media // type with application/octet-stream in the descriptor it returns. m.Config.MediaType = MediaTypeConfig return FromStruct(m) case distribution.ErrBlobUnknown: // nop default: return nil, err } // Add config to the blob store m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) // Override MediaType, since Put always replaces the specified media // type with application/octet-stream in the descriptor it returns. m.Config.MediaType = MediaTypeConfig if err != nil { return nil, err } return FromStruct(m) }
func newRandomSchemaV1Manifest(name reference.Named, tag string, blobCount int) (*schema1.SignedManifest, digest.Digest, []byte) { blobs := make([]schema1.FSLayer, blobCount) history := make([]schema1.History, blobCount) for i := 0; i < blobCount; i++ { dgst, blob := newRandomBlob((i % 5) * 16) blobs[i] = schema1.FSLayer{BlobSum: dgst} history[i] = schema1.History{V1Compatibility: fmt.Sprintf("{\"Hex\": \"%x\"}", blob)} } m := schema1.Manifest{ Name: name.String(), Tag: tag, Architecture: "x86", FSLayers: blobs, History: history, Versioned: manifest.Versioned{ SchemaVersion: 1, }, } pk, err := libtrust.GenerateECP256PrivateKey() if err != nil { panic(err) } sm, err := schema1.Sign(&m, pk) if err != nil { panic(err) } return sm, digest.FromBytes(sm.Canonical), sm.Canonical }
func (b *bridge) createManifestEvent(action string, repo distribution.Repository, sm *manifest.SignedManifest) (*Event, error) { event := b.createEvent(action) event.Target.MediaType = manifest.ManifestMediaType event.Target.Repository = repo.Name() p, err := sm.Payload() if err != nil { return nil, err } event.Target.Length = int64(len(p)) event.Target.Digest, err = digest.FromBytes(p) if err != nil { return nil, err } // TODO(stevvooe): Currently, the is the "tag" url: once the digest url is // implemented, this should be replaced. event.Target.URL, err = b.ub.BuildManifestURL(sm.Name, sm.Tag) if err != nil { return nil, err } return event, nil }
// assumes spec is not nil func secretFromSecretSpec(spec *api.SecretSpec) *api.Secret { return &api.Secret{ ID: identity.NewID(), Spec: *spec, SecretSize: int64(len(spec.Data)), Digest: digest.FromBytes(spec.Data).String(), } }
func digestFromManifest(m *schema1.SignedManifest) (digest.Digest, error) { payload, err := m.Payload() if err != nil { return "", err } manifestDigest, err := digest.FromBytes(payload) return manifestDigest, nil }
func checkWrite(t *testing.T, cs *ContentStore, p []byte) digest.Digest { dgst := digest.FromBytes(p) if err := WriteBlob(cs, bytes.NewReader(p), int64(len(p)), dgst); err != nil { t.Fatal(err) } return dgst }
func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) { dgst := digest.FromBytes(c) s.config = c s.configDigest = dgst return dgst, nil }
func createRepository(env *testEnv, t *testing.T, imageName string, tag string) digest.Digest { unsignedManifest := &schema1.Manifest{ Versioned: manifest.Versioned{ SchemaVersion: 1, }, Name: imageName, Tag: tag, FSLayers: []schema1.FSLayer{ { BlobSum: "asdf", }, }, History: []schema1.History{ { V1Compatibility: "", }, }, } // Push 2 random layers expectedLayers := make(map[digest.Digest]io.ReadSeeker) for i := range unsignedManifest.FSLayers { rs, dgstStr, err := testutil.CreateRandomTarFile() if err != nil { t.Fatalf("error creating random layer %d: %v", i, err) } dgst := digest.Digest(dgstStr) expectedLayers[dgst] = rs unsignedManifest.FSLayers[i].BlobSum = dgst uploadURLBase, _ := startPushLayer(t, env.builder, imageName) pushLayer(t, env.builder, imageName, dgst, uploadURLBase, rs) } signedManifest, err := schema1.Sign(unsignedManifest, env.pk) if err != nil { t.Fatalf("unexpected error signing manifest: %v", err) } dgst := digest.FromBytes(signedManifest.Canonical) // Create this repository by tag to ensure the tag mapping is made in the registry manifestDigestURL, err := env.builder.BuildManifestURL(imageName, tag) checkErr(t, err, "building manifest url") location, err := env.builder.BuildManifestURL(imageName, dgst.String()) checkErr(t, err, "building location URL") resp := putManifest(t, "putting signed manifest", manifestDigestURL, signedManifest) checkResponse(t, "putting signed manifest", resp, http.StatusCreated) checkHeaders(t, resp, http.Header{ "Location": []string{location}, "Docker-Content-Digest": []string{dgst.String()}, }) return dgst }
// ChainID returns the ChainID for the top layer in RootFS. func (r *RootFS) ChainID() layer.ChainID { ids := r.DiffIDs if r.Type == TypeLayersWithBase { // Add an extra ID for the base. baseDiffID := layer.DiffID(digest.FromBytes([]byte(r.BaseLayerID()))) ids = append([]layer.DiffID{baseDiffID}, ids...) } return layer.CreateChainID(ids) }
func (bs *mockBlobService) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { d := distribution.Descriptor{ Digest: digest.FromBytes(p), Size: int64(len(p)), MediaType: mediaType, } bs.descriptors[d.Digest] = d return d, nil }
func randomLayerID(seed int64) ChainID { r := rand.New(rand.NewSource(seed)) dgst, err := digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63()))) if err != nil { panic(err) } return ChainID(dgst) }