Beispiel #1
0
// REST: /pipelines
func (s *HttpServer) CreatePipeline(resp http.ResponseWriter, req *http.Request) {
	body, err := ioutil.ReadAll(req.Body)
	if err != nil {
		log.Warnf("Failed to read request body : %v", err)
		http.Error(resp, err.Error(), http.StatusInternalServerError)
		return
	}
	log.Println(string(body[:]))
	var pipeline structs.Pipeline
	if err := yaml.Unmarshal(body, &pipeline); err != nil {
		log.Warnf("Failed to unmarshal request : %v", err)
		http.Error(resp, err.Error(), http.StatusBadRequest)
		return
	}
	if err := req.ParseForm(); err != nil {
		log.Warnf("Failed to parse form: %v", err)
		http.Error(resp, err.Error(), http.StatusInternalServerError)
		return
	}
	err1 := s.db.Update(func(tx *bolt.Tx) error {
		b := tx.Bucket([]byte("pipelines"))
		log.Printf("Creating pipeline: %s", pipeline.Name)
		return b.Put([]byte(pipeline.Name), body)
	})
	if err1 != nil {
		log.Warnf("Failed to create pipeline: %v", err1)
		http.Error(resp, err1.Error(), http.StatusInternalServerError)
		return
	}
}
Beispiel #2
0
func (m DefaultManager) uploadUsage() {
	id := "anon"
	ifaces, err := net.Interfaces()
	if err == nil {
		for _, iface := range ifaces {
			if iface.Name != "lo" {
				hw := iface.HardwareAddr.String()
				id = strings.Replace(hw, ":", "", -1)
				break
			}
		}
	}
	usage := &shipyard.Usage{
		ID:      id,
		Version: version.Version,
	}
	b, err := json.Marshal(usage)
	if err != nil {
		log.Warnf("error serializing usage info: %s", err)
	}
	buf := bytes.NewBuffer(b)
	if _, err := http.Post(fmt.Sprintf("%s/update", trackerHost), "application/json", buf); err != nil {
		log.Warnf("error sending usage info: %s", err)
	}
}
Beispiel #3
0
func (c *controller) cleanupLocalEndpoints() {
	nl, err := c.getNetworksForScope(datastore.LocalScope)
	if err != nil {
		log.Warnf("Could not get list of networks during endpoint cleanup: %v", err)
		return
	}

	for _, n := range nl {
		epl, err := n.getEndpointsFromStore()
		if err != nil {
			log.Warnf("Could not get list of endpoints in network %s during endpoint cleanup: %v", n.name, err)
			continue
		}

		for _, ep := range epl {
			log.Infof("Removing stale endpoint %s (%s)", ep.name, ep.id)
			if err := ep.Delete(true); err != nil {
				log.Warnf("Could not delete local endpoint %s during endpoint cleanup: %v", ep.name, err)
			}
		}

		epl, err = n.getEndpointsFromStore()
		if err != nil {
			log.Warnf("Could not get list of endpoints in network %s for count update: %v", n.name, err)
			continue
		}

		epCnt := n.getEpCnt().EndpointCnt()
		if epCnt != uint64(len(epl)) {
			log.Infof("Fixing inconsistent endpoint_cnt for network %s. Expected=%d, Actual=%d", n.name, len(epl), epCnt)
			n.getEpCnt().setCnt(uint64(len(epl)))
		}
	}
}
Beispiel #4
0
func getRootUserDetails(readPasswdCmd []string) (uid, gid, shell string) {
	uid = "0"
	gid = "0"
	shell = "/bin/sh"

	cmd := exec.Command(readPasswdCmd[0], readPasswdCmd[1:]...)
	cmdBuffer := &bytes.Buffer{}
	cmd.Stdout = cmdBuffer
	if err := cmd.Run(); err != nil {
		log.Warnf(
			"getRootUserDetails(): error running read passwd command %q: %s",
			strings.Join(readPasswdCmd, " "),
			err,
		)
		return
	}

	entries, err := passwd.ParseReader(cmdBuffer)
	if err != nil {
		log.Warnf("getRootUserDetails(): error parsing passwd: %s", err)
		return
	}

	entry, ok := entries["root"]
	if !ok {
		log.Warnf("getRootUserDetails(): no root entry in passwd")
		return
	}

	return entry.Uid, entry.Gid, entry.Shell
}
Beispiel #5
0
func (l *Layer0) Remove(id string) error {
	if !l.isLayer0(id) {
		return l.Driver.Remove(l.realID(id))
	}
	l.Lock()
	defer l.Unlock()
	var err error
	v, ok := l.volumes[id]

	if ok {
		atomic.AddInt32(&v.ref, -1)
		if v.ref == 0 {
			// Save the upper dir and blow away the rest.
			upperDir := path.Join(path.Join(l.home, l.realID(id)), "upper")
			err := os.Rename(upperDir, path.Join(v.path, "upper"))
			if err != nil {
				log.Warnf("Failed in rename(%v): %v", id, err)
			}
			l.Driver.Remove(l.realID(id))
			err = l.volDriver.Unmount(v.volumeID, v.path)
			if l.volDriver.Type()&api.Block != 0 {
				_ = l.volDriver.Detach(v.volumeID)
			}
			err = os.RemoveAll(v.path)
			delete(l.volumes, v.id)
		}
	} else {
		log.Warnf("Failed to find layer0 vol for id %v", id)
	}
	return err
}
Beispiel #6
0
// adaptContainerSettings is called during container creation to modify any
// settings necessary in the HostConfig structure.
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
	if adjustCPUShares && hostConfig.CPUShares > 0 {
		// Handle unsupported CPUShares
		if hostConfig.CPUShares < linuxMinCPUShares {
			logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares)
			hostConfig.CPUShares = linuxMinCPUShares
		} else if hostConfig.CPUShares > linuxMaxCPUShares {
			logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares)
			hostConfig.CPUShares = linuxMaxCPUShares
		}
	}
	if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 {
		// By default, MemorySwap is set to twice the size of Memory.
		hostConfig.MemorySwap = hostConfig.Memory * 2
	}
	if hostConfig.ShmSize == 0 {
		hostConfig.ShmSize = container.DefaultSHMSize
	}
	var err error
	opts, err := daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode, hostConfig.Privileged)
	if err != nil {
		return err
	}
	hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...)
	if hostConfig.MemorySwappiness == nil {
		defaultSwappiness := int64(-1)
		hostConfig.MemorySwappiness = &defaultSwappiness
	}
	if hostConfig.OomKillDisable == nil {
		defaultOomKillDisable := false
		hostConfig.OomKillDisable = &defaultOomKillDisable
	}

	return nil
}
Beispiel #7
0
// New returns a new SysInfo, using the filesystem to detect which features the kernel supports.
func New(quiet bool) *SysInfo {
	sysInfo := &SysInfo{}
	if cgroupMemoryMountpoint, err := cgroups.FindCgroupMountpoint("memory"); err != nil {
		if !quiet {
			logrus.Warnf("Your kernel does not support cgroup memory limit: %v", err)
		}
	} else {
		// If memory cgroup is mounted, MemoryLimit is always enabled.
		sysInfo.MemoryLimit = true

		_, err1 := ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.memsw.limit_in_bytes"))
		sysInfo.SwapLimit = err1 == nil
		if !sysInfo.SwapLimit && !quiet {
			logrus.Warn("Your kernel does not support swap memory limit.")
		}

		_, err = ioutil.ReadFile(path.Join(cgroupMemoryMountpoint, "memory.oom_control"))
		sysInfo.OomKillDisable = err == nil
		if !sysInfo.OomKillDisable && !quiet {
			logrus.Warnf("Your kernel does not support oom control.")
		}
	}

	if cgroupCpuMountpoint, err := cgroups.FindCgroupMountpoint("cpu"); err != nil {
		if !quiet {
			logrus.Warnf("%v", err)
		}
	} else {
		_, err1 := ioutil.ReadFile(path.Join(cgroupCpuMountpoint, "cpu.cfs_quota_us"))
		sysInfo.CpuCfsQuota = err1 == nil
		if !sysInfo.CpuCfsQuota && !quiet {
			logrus.Warn("Your kernel does not support cgroup cfs quotas")
		}
	}

	// Checek if ipv4_forward is disabled.
	if data, err := ioutil.ReadFile("/proc/sys/net/ipv4/ip_forward"); os.IsNotExist(err) {
		sysInfo.IPv4ForwardingDisabled = true
	} else {
		if enabled, _ := strconv.Atoi(strings.TrimSpace(string(data))); enabled == 0 {
			sysInfo.IPv4ForwardingDisabled = true
		} else {
			sysInfo.IPv4ForwardingDisabled = false
		}
	}

	// Check if AppArmor is supported.
	if _, err := os.Stat("/sys/kernel/security/apparmor"); os.IsNotExist(err) {
		sysInfo.AppArmor = false
	} else {
		sysInfo.AppArmor = true
	}

	// Check if Devices cgroup is mounted, it is hard requirement for container security.
	if _, err := cgroups.FindCgroupMountpoint("devices"); err != nil {
		logrus.Fatalf("Error mounting devices cgroup: %v", err)
	}

	return sysInfo
}
func setupIPv6Forwarding(config *networkConfiguration, i *bridgeInterface) error {
	// Get current IPv6 default forwarding setup
	ipv6ForwardDataDefault, err := ioutil.ReadFile(ipv6ForwardConfDefault)
	if err != nil {
		return fmt.Errorf("Cannot read IPv6 default forwarding setup: %v", err)
	}
	// Enable IPv6 default forwarding only if it is not already enabled
	if ipv6ForwardDataDefault[0] != '1' {
		if err := ioutil.WriteFile(ipv6ForwardConfDefault, []byte{'1', '\n'}, ipv6ForwardConfPerm); err != nil {
			logrus.Warnf("Unable to enable IPv6 default forwarding: %v", err)
		}
	}

	// Get current IPv6 all forwarding setup
	ipv6ForwardDataAll, err := ioutil.ReadFile(ipv6ForwardConfAll)
	if err != nil {
		return fmt.Errorf("Cannot read IPv6 all forwarding setup: %v", err)
	}
	// Enable IPv6 all forwarding only if it is not already enabled
	if ipv6ForwardDataAll[0] != '1' {
		if err := ioutil.WriteFile(ipv6ForwardConfAll, []byte{'1', '\n'}, ipv6ForwardConfPerm); err != nil {
			logrus.Warnf("Unable to enable IPv6 all forwarding: %v", err)
		}
	}

	return nil
}
Beispiel #9
0
// New creates a new instance of network controller.
func New(configFile string) (NetworkController, error) {
	c := &controller{
		networks:  networkTable{},
		sandboxes: sandboxTable{},
		drivers:   driverTable{}}
	if err := initDrivers(c); err != nil {
		return nil, err
	}

	if err := c.initConfig(configFile); err == nil {
		if err := c.initDataStore(); err != nil {
			// Failing to initalize datastore is a bad situation to be in.
			// But it cannot fail creating the Controller
			log.Warnf("Failed to Initialize Datastore due to %v. Operating in non-clustered mode", err)
		}
		if err := c.initDiscovery(); err != nil {
			// Failing to initalize discovery is a bad situation to be in.
			// But it cannot fail creating the Controller
			log.Warnf("Failed to Initialize Discovery : %v", err)
		}
	} else {
		// Missing Configuration file is not a failure scenario
		// But without that, datastore cannot be initialized.
		log.Debugf("Unable to Parse LibNetwork Config file : %v", err)
	}

	return c, nil
}
Beispiel #10
0
func UpdateGitHub(gh *github.Client, cache *Cache) {
	for {
		logrus.Infof("Fetching GitHub...")
		for cache.Manifest == nil || len(cache.Manifest.Images) == 0 {
			time.Sleep(time.Second)
		}
		changes := 0
		for _, image := range cache.Manifest.Images {
			if _, found := cache.GithubRepos[image.RepoPath()]; !found {
				repo, err := image.GithubGetRepo(gh)
				if err != nil {
					logrus.Warnf("Failed to fetch repo %q: %v", image.RepoPath(), err)
				} else {
					cache.GithubRepos[image.RepoPath()] = repo
					changes++
				}
			}
			if _, found := cache.GithubLastRefs[image.RepoPath()]; !found {
				ref, err := image.GithubGetLastRef(gh)
				if err != nil {
					logrus.Warnf("Failed to fetch repo last reference %q: %v", image.RepoPath(), err)
				} else {
					cache.GithubLastRefs[image.RepoPath()] = ref
					changes++
				}
			}
		}
		if changes > 0 {
			cache.MapImages()
		}
		time.Sleep(5 * time.Minute)
	}
}
//Write a set of annotations associated with a piece of content. Any annotations
//already there will be removed
func (s service) Write(contentUUID string, thing interface{}) (err error) {
	annotationsToWrite := thing.(annotations)

	if contentUUID == "" {
		return errors.New("Content uuid is required")
	}
	if err := validateAnnotations(&annotationsToWrite); err != nil {
		log.Warnf("Validation of supplied annotations failed")
		return err
	}

	if len(annotationsToWrite) == 0 {
		log.Warnf("No new annotations supplied for content uuid: %s", contentUUID)
	}

	queries := append([]*neoism.CypherQuery{}, dropAllAnnotationsQuery(contentUUID, s.platformVersion))

	var statements = []string{}
	for _, annotationToWrite := range annotationsToWrite {
		query, err := createAnnotationQuery(contentUUID, annotationToWrite, s.platformVersion)
		if err != nil {
			return err
		}
		statements = append(statements, query.Statement)
		queries = append(queries, query)
	}
	log.Infof("Updated Annotations for content uuid: %s", contentUUID)
	log.Debugf("For update, ran statements: %+v", statements)

	return s.conn.CypherBatch(queries)
}
Beispiel #12
0
func bridgeCleanup(config *networkConfiguration, logErr bool) {
	var err error

	bridgeName := config.BridgeName
	tableName := "bridge_nw_subnets"
	gwName := fmt.Sprintf("%s_gw0", bridgeName)
	gwIP := config.AddressIPv4.String()
	pfAnchor := fmt.Sprintf("_auto/docker/%s", bridgeName)
	tableAnchor := fmt.Sprintf("_auto/docker/%s", tableName)

	err = exec.Command("/usr/sbin/pfctl", "-a", pfAnchor, "-F", "all").Run()
	if err != nil && logErr {
		logrus.Warnf("cannot flush firewall rules")
	}
	err = exec.Command("/usr/sbin/ifconfig", gwName, "unplumb").Run()
	if err != nil && logErr {
		logrus.Warnf("cannot remove gateway interface")
	}
	err = exec.Command("/usr/sbin/dladm", "delete-vnic",
		"-t", gwName).Run()
	if err != nil && logErr {
		logrus.Warnf("cannot delete vnic")
	}
	err = exec.Command("/usr/sbin/dladm", "delete-etherstub",
		"-t", config.BridgeNameInternal).Run()
	if err != nil && logErr {
		logrus.Warnf("cannot delete etherstub")
	}
	err = exec.Command("/usr/sbin/pfctl", "-a", tableAnchor, "-t", tableName, "-T", "delete", gwIP).Run()
	if err != nil && logErr {
		logrus.Warnf("cannot remove bridge network '%s' from PF table", bridgeName)
	}
}
Beispiel #13
0
func (r *remote) getLastEventTimestamp() time.Time {
	t := time.Now()

	fi, err := os.Stat(r.eventTsPath)
	if os.IsNotExist(err) || fi.Size() == 0 {
		return t
	}

	f, err := os.Open(r.eventTsPath)
	if err != nil {
		logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err)
		return t
	}
	defer f.Close()

	b := make([]byte, fi.Size())
	n, err := f.Read(b)
	if err != nil || n != len(b) {
		logrus.Warnf("libcontainerd: Unable to read last event ts: %v", err)
		return t
	}

	t.UnmarshalText(b)

	return t
}
Beispiel #14
0
// adaptContainerSettings is called during container creation to modify any
// settings necessary in the HostConfig structure.
func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig, adjustCPUShares bool) error {
	if adjustCPUShares && hostConfig.CPUShares > 0 {
		// Handle unsupported CPUShares
		if hostConfig.CPUShares < linuxMinCPUShares {
			logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares)
			hostConfig.CPUShares = linuxMinCPUShares
		} else if hostConfig.CPUShares > linuxMaxCPUShares {
			logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares)
			hostConfig.CPUShares = linuxMaxCPUShares
		}
	}
	if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 {
		// By default, MemorySwap is set to twice the size of Memory.
		hostConfig.MemorySwap = hostConfig.Memory * 2
	}
	if hostConfig.ShmSize == nil {
		shmSize := container.DefaultSHMSize
		hostConfig.ShmSize = &shmSize
	}
	var err error
	if hostConfig.SecurityOpt == nil {
		hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode)
		if err != nil {
			return err
		}
	}
	if hostConfig.MemorySwappiness == nil {
		defaultSwappiness := int64(-1)
		hostConfig.MemorySwappiness = &defaultSwappiness
	}

	return nil
}
Beispiel #15
0
func (p peer) handleIncoming() {
	defer p.isRunning.Done()
	for {
		var msg map[string]interface{}
		err := p.conn.ReadJSON(&msg)
		if err != nil {
			logrus.Warnf("%s|%s (%s) disconnected: %s", p.remote, p.component.Name, p.component.Type, err)
			p.conn.Close()
			p.component.StateSink <- fmt.Errorf("Connection lost")
			return
		}
		logrus.Infof("%s|%s (%s) sent: %s", p.remote, p.component.Name, p.component.Type, msg)

		switch msg["topic"] {
		case "state_changed":
			p.component.StateSink <- msg["state"]

		case "register_service":
			err = p.handleServiceRegistration(msg)
			if err != nil {
				logrus.Warnf("Component: %s failed to register a service call: %s", p.component.URN(), err)
			}

		default:
			logrus.Warnf("%s|%s (%s) sent bogus topic: %s", p.remote, p.component.Name, p.component.Type, msg["topic"])
		}
	}
}
Beispiel #16
0
func byteArrayToSubnets(ba []byte) map[subnetKey]*SubnetInfo {
	m := map[subnetKey]*SubnetInfo{}

	if ba == nil || len(ba) == 0 {
		return m
	}

	var mm map[string]string
	err := json.Unmarshal(ba, &mm)
	if err != nil {
		log.Warnf("Failed to decode subnets byte array: %v", err)
		return m
	}
	for ks, vs := range mm {
		sk := subnetKey{}
		if err := sk.FromString(ks); err != nil {
			log.Warnf("Failed to decode subnets map entry: (%s, %s)", ks, vs)
			continue
		}
		si := &SubnetInfo{}
		_, nw, err := net.ParseCIDR(vs)
		if err != nil {
			log.Warnf("Failed to decode subnets map entry value: (%s, %s)", ks, vs)
			continue
		}
		si.Subnet = nw
		m[sk] = si
	}
	return m
}
Beispiel #17
0
// init compiles checks and sanatizes the conditon before returning itself
func (c *Condition) init(groupBy *event.TagSet) {
	c.checks = c.compileChecks()
	c.eventTrackers = make(map[string]*eventTracker)

	// fixes issue where occurences are hit, even when the event doesn't satisify the condition
	if c.Occurences < 1 {
		logrus.Warnf("Occurences must be > 1. %d given. Occurences for this condition will be set to 1.", c.Occurences)
		c.Occurences = 1
	}

	// if we have no trackers already, make an empty map of them
	if c.eventTrackers == nil {
		c.eventTrackers = make(map[string]*eventTracker)
	}

	// WindowSize must be above 2. At least one piece of data is needed for historical checks.
	if c.WindowSize < 2 {
		logrus.Warnf("WindowSize must be >= 1. %d given. Window size for this condition will be set to %d", c.WindowSize, DEFAULT_WINDOW_SIZE)
		c.WindowSize = DEFAULT_WINDOW_SIZE
	}

	// decide which tracking method we will use
	c.trackFunc = getTrackingFunc(c)

	c.ready = true
}
Beispiel #18
0
func (c *containerBase) kill(ctx context.Context) error {
	// make sure we have vm
	if c.vm == nil {
		return NotYetExistError{c.ExecConfig.ID}
	}

	wait := 10 * time.Second // default
	sig := string(ssh.SIGKILL)
	log.Infof("sending kill -%s %s", sig, c.ExecConfig.ID)

	err := c.startGuestProgram(ctx, "kill", sig)
	if err == nil {
		log.Infof("waiting %s for %s to power off", wait, c.ExecConfig.ID)
		timeout, err := c.waitForPowerState(ctx, wait, types.VirtualMachinePowerStatePoweredOff)
		if err == nil {
			return nil // VM has powered off
		}

		if timeout {
			log.Warnf("timeout (%s) waiting for %s to power off via SIG%s", wait, c.ExecConfig.ID, sig)
		}
	}

	if err != nil {
		log.Warnf("killing %s attempt resulted in: %s", c.ExecConfig.ID, err)
	}

	log.Warnf("killing %s via hard power off", c.ExecConfig.ID)

	return c.poweroff(ctx)
}
Beispiel #19
0
// RealAPIContext returns a CommandContext with a configured API
func RealAPIContext() *CommandContext {
	if os.Getenv("TEST_WITH_REAL_API") == "0" {
		return nil
	}
	config, err := config.GetConfig()
	if err != nil {
		logrus.Warnf("RealAPIContext: failed to call config.GetConfig(): %v", err)
		return nil
	}

	apiClient, err := api.NewScalewayAPI(config.Organization, config.Token, scwversion.UserAgent(), "par1")
	if err != nil {
		logrus.Warnf("RealAPIContext: failed to call api.NewScalewayAPI(): %v", err)
		return nil
	}

	stdout := bytes.Buffer{}
	stderr := bytes.Buffer{}

	ctx := CommandContext{
		Streams: Streams{
			Stdin:  os.Stdin,
			Stdout: &stdout,
			Stderr: &stderr,
		},
		Env: []string{
			"HOME" + os.Getenv("HOME"),
		},
		RawArgs: []string{},
		API:     apiClient,
	}
	return &ctx
}
Beispiel #20
0
func procesImage(path string, f os.FileInfo, err error) error {
	if f.IsDir() {
		return nil
	}

	log.Debugf("Processing %s", path)

	extension := filepath.Ext(f.Name())
	if !isSupportPhotoType(strings.ToLower(extension)) {
		log.Warnf("%s's file type %s is unsupported", path, extension)
		return nil
	}

	reader := exif.New()

	err = reader.Open(path)
	if err != nil {
		log.Fatal(err)
	}

	str := fmt.Sprintf("%s", reader.Tags["Date and Time"])
	t := f.ModTime()

	if len(str) == 0 {
		log.Warnf("Date and Time EXIF tag missing for %s", path)
	} else {
		layout := "2006:01:02 15:04:05"
		t, err = time.Parse(layout, str)
		if err != nil {
			log.Fatal(err)
		}
	}

	newDir := fmt.Sprintf("%s/%4d/%02d/%02d", destPath, t.Year(), t.Month(), t.Day())

	err = os.MkdirAll(newDir, 0777)
	if err != nil {
		log.Fatal(err)
	}

	newFile := fmt.Sprintf("%s/%s", newDir, f.Name())

	if mode == "move" {
		log.Debugf("Moving %s %s", path, newFile)
		err = os.Rename(path, newFile)
	} else {
		if _, err := os.Stat(newFile); err == nil {
			log.Warnf("Photo %s already exists", newFile)
		} else {
			log.Debugf("Copying %s %s", path, newFile)
			err = copyFile(path, newFile)
		}
	}

	if err != nil {
		log.Fatal(err)
	}

	return nil
}
Beispiel #21
0
func doWithRetry(reqS requestSupplier, retries int) (*http.Response, error) {
	req, err := reqS()
	if err != nil {
		return nil, err
	}
	resp, err := http.DefaultClient.Do(req)
	if err == nil && resp.StatusCode == 429 {
		if retries < maxRetries {
			// too many requests, retry if we can after a timeout
			// the timeout is in the Retry-After and it is in number of seconds
			timeout, err := strconv.Atoi(resp.Header.Get("Retry-After"))
			if err != nil {
				return nil, err
			}
			resp.Body.Close()
			if timeout > 60 {
				log.Warnf("Long timeout of %d seconds for url %s. Giving up.", timeout, req.URL.String())
				return resp, err
			}
			time.Sleep(time.Duration(timeout) * time.Second)
			return doWithRetry(reqS, retries+1)
		} else {
			log.Warnf("Too many retries. Giving up %s", req.URL.String())
			return resp, err
		}
	}
	return resp, err
}
Beispiel #22
0
// IsAciConfigured returns true if aci is configured on netmaster.
func IsAciConfigured() (res bool, err error) {
	// Get the state driver
	stateDriver, uErr := utils.GetStateDriver()
	if uErr != nil {
		log.Warnf("Couldn't read global config %v", uErr)
		return false, uErr
	}

	// read global config
	masterGc := &mastercfg.GlobConfig{}
	masterGc.StateDriver = stateDriver
	uErr = masterGc.Read("config")
	if core.ErrIfKeyExists(uErr) != nil {
		log.Errorf("Couldn't read global config %v", uErr)
		return false, uErr
	}

	if uErr != nil {
		log.Warnf("Couldn't read global config %v", uErr)
		return false, nil
	}

	if masterGc.NwInfraType != "aci" {
		log.Debugf("NwInfra type is %v, no ACI", masterGc.NwInfraType)
		return false, nil
	}

	return true, nil
}
func (a *volumeDriverAdapter) getCapabilities() volume.Capability {
	if a.capabilities != nil {
		return *a.capabilities
	}
	cap, err := a.proxy.Capabilities()
	if err != nil {
		// `GetCapabilities` is a not a required endpoint.
		// On error assume it's a local-only driver
		logrus.Warnf("Volume driver %s returned an error while trying to query its capabilities, using default capabilties: %v", a.name, err)
		return volume.Capability{Scope: volume.LocalScope}
	}

	// don't spam the warn log below just because the plugin didn't provide a scope
	if len(cap.Scope) == 0 {
		cap.Scope = volume.LocalScope
	}

	cap.Scope = strings.ToLower(cap.Scope)
	if cap.Scope != volume.LocalScope && cap.Scope != volume.GlobalScope {
		logrus.Warnf("Volume driver %q returned an invalid scope: %q", a.Name(), cap.Scope)
		cap.Scope = volume.LocalScope
	}

	a.capabilities = &cap
	return cap
}
Beispiel #24
0
func parseQuery(queryStr string) (readFilters, error) {
	manager := GetQPManager()
	keys, parsers := manager.GetActiveParsers()
	for idx, parser := range parsers {
		if parser.CanParseString(queryStr) {
			log.Debugf("parser.CanParseString(%s) true for %s\n", queryStr, keys[idx])
			rf, err := parser.ParseString(queryStr)
			if err != nil {
				log.Warnf("Parser.ParseString(%s) failed with:%s", queryStr, err.Error())
				return rf, queryParseError{
					StatusCode: http.StatusBadRequest,
					Message:    err.Error(),
				}
			}
			return rf, nil
		}
		log.Debugf("parser.CanParseString(%s) false for %s\n", queryStr, keys[idx])

	}

	log.Warnf("parseQuery - Failed to find parser for:%s. Tried %s\n", queryStr, keys)

	return newReadFilters(), queryParseError{
		StatusCode: http.StatusBadRequest,
		Message:    "No QueryFormats can parse input",
	}
}
Beispiel #25
0
// waitProcessExitCode will wait for the given process to exit and return its error code.
func (ctr *container) waitProcessExitCode(process *process) int {
	// Block indefinitely for the process to exit.
	err := process.hcsProcess.Wait()
	if err != nil {
		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
			logrus.Warnf("Wait failed (container may have been killed): %s", err)
		}
		// Fall through here, do not return. This ensures we attempt to continue the
		// shutdown in HCS and tell the docker engine that the process/container
		// has exited to avoid a container being dropped on the floor.
	}

	exitCode, err := process.hcsProcess.ExitCode()
	if err != nil {
		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE {
			logrus.Warnf("Unable to get exit code from container %s", ctr.containerID)
		}
		// Since we got an error retrieving the exit code, make sure that the code we return
		// doesn't incorrectly indicate success.
		exitCode = -1

		// Fall through here, do not return. This ensures we attempt to continue the
		// shutdown in HCS and tell the docker engine that the process/container
		// has exited to avoid a container being dropped on the floor.
	}

	if err := process.hcsProcess.Close(); err != nil {
		logrus.Error(err)
	}

	return exitCode
}
Beispiel #26
0
// Init registers a new instance of bridge driver
func Init(dc driverapi.DriverCallback, config map[string]interface{}) error {
	if _, err := os.Stat("/proc/sys/net/bridge"); err != nil {
		if out, err := exec.Command("modprobe", "-va", "bridge", "br_netfilter").CombinedOutput(); err != nil {
			logrus.Warnf("Running modprobe bridge br_netfilter failed with message: %s, error: %v", out, err)
		}
	}
	if out, err := exec.Command("modprobe", "-va", "nf_nat").CombinedOutput(); err != nil {
		logrus.Warnf("Running modprobe nf_nat failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err)
	}
	if out, err := exec.Command("modprobe", "-va", "xt_conntrack").CombinedOutput(); err != nil {
		logrus.Warnf("Running modprobe xt_conntrack failed with message: `%s`, error: %v", strings.TrimSpace(string(out)), err)
	}
	if err := iptables.FirewalldInit(); err != nil {
		logrus.Debugf("Fail to initialize firewalld: %v, using raw iptables instead", err)
	}

	d := newDriver()
	if err := d.configure(config); err != nil {
		return err
	}

	c := driverapi.Capability{
		DataScope: datastore.LocalScope,
	}
	return dc.RegisterDriver(networkType, d, c)
}
Beispiel #27
0
func (sb *sandbox) Refresh(options ...SandboxOption) error {
	// Store connected endpoints
	epList := sb.getConnectedEndpoints()

	// Detach from all endpoints
	for _, ep := range epList {
		if err := ep.Leave(sb); err != nil {
			log.Warnf("Failed detaching sandbox %s from endpoint %s: %v\n", sb.ID(), ep.ID(), err)
		}
	}

	// Re-apply options
	sb.config = containerConfig{}
	sb.processOptions(options...)

	// Setup discovery files
	if err := sb.setupResolutionFiles(); err != nil {
		return err
	}

	// Re -connect to all endpoints
	for _, ep := range epList {
		if err := ep.Join(sb); err != nil {
			log.Warnf("Failed attach sandbox %s to endpoint %s: %v\n", sb.ID(), ep.ID(), err)
		}
	}

	return nil
}
Beispiel #28
0
func runPs(cmd *Command, rawArgs []string) error {
	if psHelp {
		return cmd.PrintUsage()
	}
	if len(rawArgs) != 0 {
		return cmd.PrintShortUsage()
	}

	args := commands.PsArgs{
		All:     psA,
		Latest:  psL,
		Quiet:   psQ,
		NoTrunc: psNoTrunc,
		NLast:   psN,
		Filters: make(map[string]string, 0),
	}
	if psFilters != "" {
		for _, filter := range strings.Split(psFilters, " ") {
			parts := strings.SplitN(filter, "=", 2)
			if len(parts) != 2 {
				logrus.Warnf("Invalid filter '%s', should be in the form 'key=value'", filter)
				continue
			}
			if _, ok := args.Filters[parts[0]]; ok {
				logrus.Warnf("Duplicated filter: %q", parts[0])
			} else {
				args.Filters[parts[0]] = parts[1]
			}
		}
	}
	ctx := cmd.GetContext(rawArgs)
	return commands.RunPs(ctx, args)
}
Beispiel #29
0
func (ep *endpoint) releaseAddress() {
	n := ep.getNetwork()
	if n.hasSpecialDriver() {
		return
	}

	log.Debugf("Releasing addresses for endpoint %s's interface on network %s", ep.Name(), n.Name())

	ipam, _, err := n.getController().getIPAMDriver(n.ipamType)
	if err != nil {
		log.Warnf("Failed to retrieve ipam driver to release interface address on delete of endpoint %s (%s): %v", ep.Name(), ep.ID(), err)
		return
	}

	if ep.iface.addr != nil {
		if err := ipam.ReleaseAddress(ep.iface.v4PoolID, ep.iface.addr.IP); err != nil {
			log.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addr.IP, ep.Name(), ep.ID(), err)
		}
	}

	if ep.iface.addrv6 != nil && ep.iface.addrv6.IP.IsGlobalUnicast() {
		if err := ipam.ReleaseAddress(ep.iface.v6PoolID, ep.iface.addrv6.IP); err != nil {
			log.Warnf("Failed to release ip address %s on delete of endpoint %s (%s): %v", ep.iface.addrv6.IP, ep.Name(), ep.ID(), err)
		}
	}
}
Beispiel #30
0
// CreateService creates a new service in a managed swarm cluster.
func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (*apitypes.ServiceCreateResponse, error) {
	c.RLock()
	defer c.RUnlock()

	if !c.isActiveManager() {
		return nil, c.errNoManager()
	}

	ctx, cancel := c.getRequestContext()
	defer cancel()

	err := c.populateNetworkID(ctx, c.client, &s)
	if err != nil {
		return nil, err
	}

	serviceSpec, err := convert.ServiceSpecToGRPC(s)
	if err != nil {
		return nil, err
	}

	ctnr := serviceSpec.Task.GetContainer()
	if ctnr == nil {
		return nil, fmt.Errorf("service does not use container tasks")
	}

	if encodedAuth != "" {
		ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth}
	}

	// retrieve auth config from encoded auth
	authConfig := &apitypes.AuthConfig{}
	if encodedAuth != "" {
		if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuth))).Decode(authConfig); err != nil {
			logrus.Warnf("invalid authconfig: %v", err)
		}
	}

	resp := &apitypes.ServiceCreateResponse{}

	// pin image by digest
	if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" {
		digestImage, err := c.imageWithDigestString(ctx, ctnr.Image, authConfig)
		if err != nil {
			logrus.Warnf("unable to pin image %s to digest: %s", ctnr.Image, err.Error())
			resp.Warnings = append(resp.Warnings, fmt.Sprintf("unable to pin image %s to digest: %s", ctnr.Image, err.Error()))
		} else {
			logrus.Debugf("pinning image %s by digest: %s", ctnr.Image, digestImage)
			ctnr.Image = digestImage
		}
	}

	r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec})
	if err != nil {
		return nil, err
	}

	resp.ID = r.Service.ID
	return resp, nil
}