func dbUpdateFromV15(currentVersion int, version int, d *Daemon) error { // munge all LVM-backed containers' LV names to match what is // required for snapshot support cNames, err := dbContainersList(d.db, cTypeRegular) if err != nil { return err } err = daemonConfigInit(d.db) if err != nil { return err } vgName := daemonConfig["storage.lvm_vg_name"].Get() for _, cName := range cNames { var lvLinkPath string if strings.Contains(cName, shared.SnapshotDelimiter) { lvLinkPath = shared.VarPath("snapshots", fmt.Sprintf("%s.lv", cName)) } else { lvLinkPath = shared.VarPath("containers", fmt.Sprintf("%s.lv", cName)) } if !shared.PathExists(lvLinkPath) { continue } newLVName := strings.Replace(cName, "-", "--", -1) newLVName = strings.Replace(newLVName, shared.SnapshotDelimiter, "-", -1) if cName == newLVName { shared.LogDebug("No need to rename, skipping", log.Ctx{"cName": cName, "newLVName": newLVName}) continue } shared.LogDebug("About to rename cName in lv upgrade", log.Ctx{"lvLinkPath": lvLinkPath, "cName": cName, "newLVName": newLVName}) output, err := exec.Command("lvrename", vgName, cName, newLVName).CombinedOutput() if err != nil { return fmt.Errorf("Could not rename LV '%s' to '%s': %v\noutput:%s", cName, newLVName, err, string(output)) } if err := os.Remove(lvLinkPath); err != nil { return fmt.Errorf("Couldn't remove lvLinkPath '%s'", lvLinkPath) } newLinkDest := fmt.Sprintf("/dev/%s/%s", vgName, newLVName) if err := os.Symlink(newLinkDest, lvLinkPath); err != nil { return fmt.Errorf("Couldn't recreate symlink '%s'->'%s'", lvLinkPath, newLinkDest) } } return nil }
func containerDeleteSnapshots(d *Daemon, cname string) error { shared.LogDebug("containerDeleteSnapshots", log.Ctx{"container": cname}) results, err := dbContainerGetSnapshots(d.db, cname) if err != nil { return err } for _, sname := range results { sc, err := containerLoadByName(d, sname) if err != nil { shared.LogError( "containerDeleteSnapshots: Failed to load the snapshotcontainer", log.Ctx{"container": cname, "snapshot": sname}) continue } if err := sc.Delete(); err != nil { shared.LogError( "containerDeleteSnapshots: Failed to delete a snapshotcontainer", log.Ctx{"container": cname, "snapshot": sname, "err": err}) } } return nil }
func dbContainerRename(db *sql.DB, oldName string, newName string) error { tx, err := dbBegin(db) if err != nil { return err } str := fmt.Sprintf("UPDATE containers SET name = ? WHERE name = ?") stmt, err := tx.Prepare(str) if err != nil { tx.Rollback() return err } defer stmt.Close() shared.LogDebug( "Calling SQL Query", log.Ctx{ "query": "UPDATE containers SET name = ? WHERE name = ?", "oldName": oldName, "newName": newName}) if _, err := stmt.Exec(newName, oldName); err != nil { tx.Rollback() return err } return txCommit(tx) }
func readMyCert() (string, string, error) { certf := shared.VarPath("server.crt") keyf := shared.VarPath("server.key") shared.LogDebug("Looking for existing certificates", log.Ctx{"cert": certf, "key": keyf}) err := shared.FindOrGenCert(certf, keyf, false) return certf, keyf, err }
func storageLVMCheckVolumeGroup(vgName string) error { output, err := exec.Command("vgdisplay", "-s", vgName).CombinedOutput() if err != nil { shared.LogDebug("vgdisplay failed to find vg", log.Ctx{"output": string(output)}) return fmt.Errorf("LVM volume group '%s' not found", vgName) } return nil }
// CheckTrustState returns True if the client is trusted else false. func (d *Daemon) CheckTrustState(cert x509.Certificate) bool { for k, v := range d.clientCerts { if bytes.Compare(cert.Raw, v.Raw) == 0 { shared.LogDebug("Found cert", log.Ctx{"k": k}) return true } } return false }
func (s *storageLvm) ContainerSnapshotStart(container container) error { srcName := containerNameToLVName(container.Name()) destName := containerNameToLVName(container.Name() + "/rw") shared.LogDebug( "Creating snapshot", log.Ctx{"srcName": srcName, "destName": destName}) lvpath, err := s.createSnapshotLV(destName, srcName, false) if err != nil { return fmt.Errorf("Error creating snapshot LV: %v", err) } destPath := container.Path() if !shared.PathExists(destPath) { if err := os.MkdirAll(destPath, 0755); err != nil { return fmt.Errorf("Error creating container directory: %v", err) } } // Generate a new xfs's UUID fstype := daemonConfig["storage.lvm_fstype"].Get() if fstype == "xfs" { err := xfsGenerateNewUUID(lvpath) if err != nil { s.ContainerDelete(container) return err } } mountOptions := daemonConfig["storage.lvm_mount_options"].Get() err = tryMount(lvpath, container.Path(), fstype, 0, mountOptions) if err != nil { return fmt.Errorf( "Error mounting snapshot LV path='%s': %v", container.Path(), err) } return nil }
func (s *storageLvm) createSnapshotContainer( snapshotContainer container, sourceContainer container, readonly bool) error { srcName := containerNameToLVName(sourceContainer.Name()) destName := containerNameToLVName(snapshotContainer.Name()) shared.LogDebug( "Creating snapshot", log.Ctx{"srcName": srcName, "destName": destName}) lvpath, err := s.createSnapshotLV(destName, srcName, readonly) if err != nil { return fmt.Errorf("Error creating snapshot LV: %v", err) } destPath := snapshotContainer.Path() if err := os.MkdirAll(destPath, 0755); err != nil { return fmt.Errorf("Error creating container directory: %v", err) } var mode os.FileMode if snapshotContainer.IsPrivileged() { mode = 0700 } else { mode = 0755 } err = os.Chmod(destPath, mode) if err != nil { return err } dest := fmt.Sprintf("%s.lv", snapshotContainer.Path()) err = os.Symlink(lvpath, dest) if err != nil { return err } return nil }
func (ss *storageShared) shiftRootfs(c container) error { dpath := c.Path() rpath := c.RootfsPath() shared.LogDebug("Shifting root filesystem", log.Ctx{"container": c.Name(), "rootfs": rpath}) idmapset := c.IdmapSet() if idmapset == nil { return fmt.Errorf("IdmapSet of container '%s' is nil", c.Name()) } err := idmapset.ShiftRootfs(rpath) if err != nil { shared.LogDebugf("Shift of rootfs %s failed: %s", rpath, err) return err } /* Set an acl so the container root can descend the container dir */ // TODO: i changed this so it calls ss.setUnprivUserAcl, which does // the acl change only if the container is not privileged, think thats right. return ss.setUnprivUserAcl(c, dpath) }
func (s *storageZfs) MigrationSink(live bool, container container, snapshots []*Snapshot, conn *websocket.Conn, srcIdmap *shared.IdmapSet, op *operation) error { zfsRecv := func(zfsName string, writeWrapper func(io.WriteCloser) io.WriteCloser) error { zfsFsName := fmt.Sprintf("%s/%s", s.zfsPool, zfsName) args := []string{"receive", "-F", "-u", zfsFsName} cmd := exec.Command("zfs", args...) stdin, err := cmd.StdinPipe() if err != nil { return err } stderr, err := cmd.StderrPipe() if err != nil { return err } if err := cmd.Start(); err != nil { return err } writePipe := io.WriteCloser(stdin) if writeWrapper != nil { writePipe = writeWrapper(stdin) } <-shared.WebsocketRecvStream(writePipe, conn) output, err := ioutil.ReadAll(stderr) if err != nil { shared.LogDebug("problem reading zfs recv stderr %s", log.Ctx{"err": err}) } err = cmd.Wait() if err != nil { shared.LogError("problem with zfs recv", log.Ctx{"output": string(output)}) } return err } /* In some versions of zfs we can write `zfs recv -F` to mounted * filesystems, and in some versions we can't. So, let's always unmount * this fs (it's empty anyway) before we zfs recv. N.B. that `zfs recv` * of a snapshot also needs tha actual fs that it has snapshotted * unmounted, so we do this before receiving anything. */ zfsName := fmt.Sprintf("containers/%s", container.Name()) err := s.zfsUnmount(zfsName) if err != nil { return err } for _, snap := range snapshots { args := snapshotProtobufToContainerArgs(container.Name(), snap) _, err := containerCreateEmptySnapshot(container.Daemon(), args) if err != nil { return err } wrapper := StorageProgressWriter(op, "fs_progress", snap.GetName()) name := fmt.Sprintf("containers/%s@snapshot-%s", container.Name(), snap.GetName()) if err := zfsRecv(name, wrapper); err != nil { return err } err = os.MkdirAll(shared.VarPath(fmt.Sprintf("snapshots/%s", container.Name())), 0700) if err != nil { return err } err = os.Symlink("on-zfs", shared.VarPath(fmt.Sprintf("snapshots/%s/%s.zfs", container.Name(), snap.GetName()))) if err != nil { return err } } defer func() { /* clean up our migration-send snapshots that we got from recv. */ zfsSnapshots, err := s.zfsListSnapshots(fmt.Sprintf("containers/%s", container.Name())) if err != nil { shared.LogError("failed listing snapshots post migration", log.Ctx{"err": err}) return } for _, snap := range zfsSnapshots { // If we received a bunch of snapshots, remove the migration-send-* ones, if not, wipe any snapshot we got if snapshots != nil && len(snapshots) > 0 && !strings.HasPrefix(snap, "migration-send") { continue } s.zfsSnapshotDestroy(fmt.Sprintf("containers/%s", container.Name()), snap) } }() /* finally, do the real container */ wrapper := StorageProgressWriter(op, "fs_progress", container.Name()) if err := zfsRecv(zfsName, wrapper); err != nil { return err } if live { /* and again for the post-running snapshot if this was a live migration */ wrapper := StorageProgressWriter(op, "fs_progress", container.Name()) if err := zfsRecv(zfsName, wrapper); err != nil { return err } } /* Sometimes, zfs recv mounts this anyway, even if we pass -u * (https://forums.freebsd.org/threads/zfs-receive-u-shouldnt-mount-received-filesystem-right.36844/) * but sometimes it doesn't. Let's try to mount, but not complain about * failure. */ s.zfsMount(zfsName) return nil }
func (d *Daemon) createCmd(version string, c Command) { var uri string if c.name == "" { uri = fmt.Sprintf("/%s", version) } else { uri = fmt.Sprintf("/%s/%s", version, c.name) } d.mux.HandleFunc(uri, func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") if d.isTrustedClient(r) { shared.LogDebug( "handling", log.Ctx{"method": r.Method, "url": r.URL.RequestURI(), "ip": r.RemoteAddr}) } else if r.Method == "GET" && c.untrustedGet { shared.LogDebug( "allowing untrusted GET", log.Ctx{"url": r.URL.RequestURI(), "ip": r.RemoteAddr}) } else if r.Method == "POST" && c.untrustedPost { shared.LogDebug( "allowing untrusted POST", log.Ctx{"url": r.URL.RequestURI(), "ip": r.RemoteAddr}) } else { shared.LogWarn( "rejecting request from untrusted client", log.Ctx{"ip": r.RemoteAddr}) Forbidden.Render(w) return } if debug && r.Method != "GET" && isJSONRequest(r) { newBody := &bytes.Buffer{} captured := &bytes.Buffer{} multiW := io.MultiWriter(newBody, captured) if _, err := io.Copy(multiW, r.Body); err != nil { InternalError(err).Render(w) return } r.Body = shared.BytesReadCloser{Buf: newBody} shared.DebugJson(captured) } var resp Response resp = NotImplemented switch r.Method { case "GET": if c.get != nil { resp = c.get(d, r) } case "PUT": if c.put != nil { resp = c.put(d, r) } case "POST": if c.post != nil { resp = c.post(d, r) } case "DELETE": if c.delete != nil { resp = c.delete(d, r) } case "PATCH": if c.patch != nil { resp = c.patch(d, r) } default: resp = NotFound } if err := resp.Render(w); err != nil { err := InternalError(err).Render(w) if err != nil { shared.LogErrorf("Failed writing error for error, giving up") } } /* * When we create a new lxc.Container, it adds a finalizer (via * SetFinalizer) that frees the struct. However, it sometimes * takes the go GC a while to actually free the struct, * presumably since it is a small amount of memory. * Unfortunately, the struct also keeps the log fd open, so if * we leave too many of these around, we end up running out of * fds. So, let's explicitly do a GC to collect these at the * end of each request. */ runtime.GC() }) }
// ImageDownload checks if we have that Image Fingerprint else // downloads the image from a remote server. func (d *Daemon) ImageDownload(op *operation, server string, protocol string, certificate string, secret string, alias string, forContainer bool, autoUpdate bool) (string, error) { var err error var ss *shared.SimpleStreams var ctxMap log.Ctx if protocol == "" { protocol = "lxd" } fp := alias // Expand aliases if protocol == "simplestreams" { imageStreamCacheLock.Lock() entry, _ := imageStreamCache[server] if entry == nil || entry.expiry.Before(time.Now()) { refresh := func() (*imageStreamCacheEntry, error) { // Setup simplestreams client ss, err = shared.SimpleStreamsClient(server, d.proxy) if err != nil { return nil, err } // Get all aliases aliases, err := ss.ListAliases() if err != nil { return nil, err } // Get all fingerprints images, err := ss.ListImages() if err != nil { return nil, err } fingerprints := []string{} for _, image := range images { fingerprints = append(fingerprints, image.Fingerprint) } // Generate cache entry entry = &imageStreamCacheEntry{ss: ss, Aliases: aliases, Fingerprints: fingerprints, expiry: time.Now().Add(time.Hour)} imageStreamCache[server] = entry imageSaveStreamCache() return entry, nil } newEntry, err := refresh() if err == nil { // Cache refreshed entry = newEntry } else if entry != nil { // Failed to fetch entry but existing cache shared.LogWarn("Unable to refresh cache, using stale entry", log.Ctx{"server": server}) entry.expiry = time.Now().Add(time.Hour) } else { // Failed to fetch entry and nothing in cache imageStreamCacheLock.Unlock() return "", err } } else { shared.LogDebug("Using SimpleStreams cache entry", log.Ctx{"server": server, "expiry": entry.expiry}) ss = entry.ss } imageStreamCacheLock.Unlock() // Expand aliases for _, alias := range entry.Aliases { if alias.Name != fp { continue } fp = alias.Target break } // Expand fingerprint for _, fingerprint := range entry.Fingerprints { if !strings.HasPrefix(fingerprint, fp) { continue } if fp == alias { alias = fingerprint } fp = fingerprint break } } else if protocol == "lxd" { target, err := remoteGetImageFingerprint(d, server, certificate, fp) if err == nil && target != "" { fp = target } } if _, _, err := dbImageGet(d.db, fp, false, false); err == nil { shared.LogDebug("Image already exists in the db", log.Ctx{"image": fp}) // already have it return fp, nil } // Now check if we already downloading the image d.imagesDownloadingLock.RLock() if waitChannel, ok := d.imagesDownloading[fp]; ok { // We already download the image d.imagesDownloadingLock.RUnlock() shared.LogDebug( "Already downloading the image, waiting for it to succeed", log.Ctx{"image": fp}) // Wait until the download finishes (channel closes) if _, ok := <-waitChannel; ok { shared.LogWarnf("Value transmitted over image lock semaphore?") } if _, _, err := dbImageGet(d.db, fp, false, true); err != nil { shared.LogError( "Previous download didn't succeed", log.Ctx{"image": fp}) return "", fmt.Errorf("Previous download didn't succeed") } shared.LogDebug( "Previous download succeeded", log.Ctx{"image": fp}) return fp, nil } d.imagesDownloadingLock.RUnlock() if op == nil { ctxMap = log.Ctx{"alias": alias, "server": server} } else { ctxMap = log.Ctx{"trigger": op.url, "image": fp, "operation": op.id, "alias": alias, "server": server} } shared.LogInfo("Downloading image", ctxMap) // Add the download to the queue d.imagesDownloadingLock.Lock() d.imagesDownloading[fp] = make(chan bool) d.imagesDownloadingLock.Unlock() // Unlock once this func ends. defer func() { d.imagesDownloadingLock.Lock() if waitChannel, ok := d.imagesDownloading[fp]; ok { close(waitChannel) delete(d.imagesDownloading, fp) } d.imagesDownloadingLock.Unlock() }() exporturl := server var info shared.ImageInfo info.Fingerprint = fp destDir := shared.VarPath("images") destName := filepath.Join(destDir, fp) if shared.PathExists(destName) { d.Storage.ImageDelete(fp) } progress := func(progressInt int64, speedInt int64) { if op == nil { return } meta := op.metadata if meta == nil { meta = make(map[string]interface{}) } progress := fmt.Sprintf("%d%% (%s/s)", progressInt, shared.GetByteSizeString(speedInt)) if meta["download_progress"] != progress { meta["download_progress"] = progress op.UpdateMetadata(meta) } } if protocol == "lxd" { /* grab the metadata from /1.0/images/%s */ var url string if secret != "" { url = fmt.Sprintf( "%s/%s/images/%s?secret=%s", server, shared.APIVersion, fp, secret) } else { url = fmt.Sprintf("%s/%s/images/%s", server, shared.APIVersion, fp) } resp, err := d.httpGetSync(url, certificate) if err != nil { shared.LogError( "Failed to download image metadata", log.Ctx{"image": fp, "err": err}) return "", err } if err := json.Unmarshal(resp.Metadata, &info); err != nil { return "", err } /* now grab the actual file from /1.0/images/%s/export */ if secret != "" { exporturl = fmt.Sprintf( "%s/%s/images/%s/export?secret=%s", server, shared.APIVersion, fp, secret) } else { exporturl = fmt.Sprintf( "%s/%s/images/%s/export", server, shared.APIVersion, fp) } } else if protocol == "simplestreams" { err := ss.Download(fp, "meta", destName, nil) if err != nil { return "", err } err = ss.Download(fp, "root", destName+".rootfs", progress) if err != nil { return "", err } info, err := ss.GetImageInfo(fp) if err != nil { return "", err } info.Public = false info.AutoUpdate = autoUpdate _, err = imageBuildFromInfo(d, *info) if err != nil { return "", err } if alias != fp { id, _, err := dbImageGet(d.db, fp, false, true) if err != nil { return "", err } err = dbImageSourceInsert(d.db, id, server, protocol, "", alias) if err != nil { return "", err } } shared.LogInfo("Image downloaded", ctxMap) if forContainer { return fp, dbImageLastAccessInit(d.db, fp) } return fp, nil } raw, err := d.httpGetFile(exporturl, certificate) if err != nil { shared.LogError( "Failed to download image", log.Ctx{"image": fp, "err": err}) return "", err } info.Size = raw.ContentLength ctype, ctypeParams, err := mime.ParseMediaType(raw.Header.Get("Content-Type")) if err != nil { ctype = "application/octet-stream" } body := &shared.ProgressReader{ ReadCloser: raw.Body, Tracker: &shared.ProgressTracker{ Length: raw.ContentLength, Handler: progress, }, } if ctype == "multipart/form-data" { // Parse the POST data mr := multipart.NewReader(body, ctypeParams["boundary"]) // Get the metadata tarball part, err := mr.NextPart() if err != nil { shared.LogError( "Invalid multipart image", log.Ctx{"image": fp, "err": err}) return "", err } if part.FormName() != "metadata" { shared.LogError( "Invalid multipart image", log.Ctx{"image": fp, "err": err}) return "", fmt.Errorf("Invalid multipart image") } destName = filepath.Join(destDir, info.Fingerprint) f, err := os.Create(destName) if err != nil { shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) return "", err } _, err = io.Copy(f, part) f.Close() if err != nil { shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) return "", err } // Get the rootfs tarball part, err = mr.NextPart() if err != nil { shared.LogError( "Invalid multipart image", log.Ctx{"image": fp, "err": err}) return "", err } if part.FormName() != "rootfs" { shared.LogError( "Invalid multipart image", log.Ctx{"image": fp}) return "", fmt.Errorf("Invalid multipart image") } destName = filepath.Join(destDir, info.Fingerprint+".rootfs") f, err = os.Create(destName) if err != nil { shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) return "", err } _, err = io.Copy(f, part) f.Close() if err != nil { shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) return "", err } } else { destName = filepath.Join(destDir, info.Fingerprint) f, err := os.Create(destName) if err != nil { shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) return "", err } _, err = io.Copy(f, body) f.Close() if err != nil { shared.LogError( "Failed to save image", log.Ctx{"image": fp, "err": err}) return "", err } } if protocol == "direct" { imageMeta, err := getImageMetadata(destName) if err != nil { return "", err } info.Architecture = imageMeta.Architecture info.CreationDate = time.Unix(imageMeta.CreationDate, 0) info.ExpiryDate = time.Unix(imageMeta.ExpiryDate, 0) info.Properties = imageMeta.Properties } // By default, make all downloaded images private info.Public = false if alias != fp && secret == "" { info.AutoUpdate = autoUpdate } _, err = imageBuildFromInfo(d, info) if err != nil { shared.LogError( "Failed to create image", log.Ctx{"image": fp, "err": err}) return "", err } if alias != fp { id, _, err := dbImageGet(d.db, fp, false, true) if err != nil { return "", err } err = dbImageSourceInsert(d.db, id, server, protocol, "", alias) if err != nil { return "", err } } shared.LogInfo("Image downloaded", ctxMap) if forContainer { return fp, dbImageLastAccessInit(d.db, fp) } return fp, nil }
func autoUpdateImages(d *Daemon) { shared.LogInfof("Updating images") images, err := dbImagesGet(d.db, false) if err != nil { shared.LogError("Unable to retrieve the list of images", log.Ctx{"err": err}) return } for _, fp := range images { id, info, err := dbImageGet(d.db, fp, false, true) if err != nil { shared.LogError("Error loading image", log.Ctx{"err": err, "fp": fp}) continue } if !info.AutoUpdate { continue } _, source, err := dbImageSourceGet(d.db, id) if err != nil { continue } shared.LogDebug("Processing image", log.Ctx{"fp": fp, "server": source.Server, "protocol": source.Protocol, "alias": source.Alias}) hash, err := d.ImageDownload(nil, source.Server, source.Protocol, "", "", source.Alias, false, true) if hash == fp { shared.LogDebug("Already up to date", log.Ctx{"fp": fp}) continue } else if err != nil { shared.LogError("Failed to update the image", log.Ctx{"err": err, "fp": fp}) continue } newId, _, err := dbImageGet(d.db, hash, false, true) if err != nil { shared.LogError("Error loading image", log.Ctx{"err": err, "fp": hash}) continue } err = dbImageLastAccessUpdate(d.db, hash, info.LastUsedDate) if err != nil { shared.LogError("Error setting last use date", log.Ctx{"err": err, "fp": hash}) continue } err = dbImageAliasesMove(d.db, id, newId) if err != nil { shared.LogError("Error moving aliases", log.Ctx{"err": err, "fp": hash}) continue } err = doDeleteImage(d, fp) if err != nil { shared.LogError("Error deleting image", log.Ctx{"err": err, "fp": fp}) } } shared.LogInfof("Done updating images") }
func createFromCopy(d *Daemon, req *containerPostReq) Response { if req.Source.Source == "" { return BadRequest(fmt.Errorf("must specify a source container")) } source, err := containerLoadByName(d, req.Source.Source) if err != nil { return SmartError(err) } // Config override sourceConfig := source.LocalConfig() if req.Config == nil { req.Config = make(map[string]string) } for key, value := range sourceConfig { if len(key) > 8 && key[0:8] == "volatile" && key[9:] != "base_image" { shared.LogDebug("Skipping volatile key from copy source", log.Ctx{"key": key}) continue } _, exists := req.Config[key] if exists { continue } req.Config[key] = value } // Profiles override if req.Profiles == nil { req.Profiles = source.Profiles() } args := containerArgs{ Architecture: source.Architecture(), BaseImage: req.Source.BaseImage, Config: req.Config, Ctype: cTypeRegular, Devices: source.LocalDevices(), Ephemeral: req.Ephemeral, Name: req.Name, Profiles: req.Profiles, } run := func(op *operation) error { _, err := containerCreateAsCopy(d, args, source) if err != nil { return err } return nil } resources := map[string][]string{} resources["containers"] = []string{req.Name, req.Source.Source} op, err := operationCreate(operationClassTask, resources, nil, run, nil, nil) if err != nil { return InternalError(err) } return OperationResponse(op) }