Beispiel #1
0
// Allocate a network interface
func Allocate(job *engine.Job) engine.Status {
	var (
		ip          *net.IP
		err         error
		id          = job.Args[0]
		requestedIP = net.ParseIP(job.Getenv("RequestedIP"))
	)

	if requestedIP != nil {
		ip, err = ipallocator.RequestIP(bridgeNetwork, &requestedIP)
	} else {
		ip, err = ipallocator.RequestIP(bridgeNetwork, nil)
	}
	if err != nil {
		return job.Error(err)
	}

	out := engine.Env{}
	out.Set("IP", ip.String())
	out.Set("Mask", bridgeNetwork.Mask.String())
	out.Set("Gateway", bridgeNetwork.IP.String())
	out.Set("Bridge", bridgeIface)

	size, _ := bridgeNetwork.Mask.Size()
	out.SetInt("IPPrefixLen", size)

	currentInterfaces.Set(id, &networkInterface{
		IP: *ip,
	})

	out.WriteTo(job.Stdout)

	return engine.StatusOK
}
Beispiel #2
0
// Allocate a network interface
func Allocate(job *engine.Job) engine.Status {
	var (
		ip          net.IP
		mac         net.HardwareAddr
		err         error
		id          = job.Args[0]
		requestedIP = net.ParseIP(job.Getenv("RequestedIP"))
	)

	if requestedIP != nil {
		ip, err = ipallocator.RequestIP(bridgeNetwork, requestedIP)
	} else {
		ip, err = ipallocator.RequestIP(bridgeNetwork, nil)
	}
	if err != nil {
		return job.Error(err)
	}

	// If no explicit mac address was given, generate a random one.
	if mac, err = net.ParseMAC(job.Getenv("RequestedMac")); err != nil {
		mac = generateMacAddr(ip)
	}

	out := engine.Env{}
	out.Set("IP", ip.String())
	out.Set("Mask", bridgeNetwork.Mask.String())
	out.Set("Gateway", bridgeNetwork.IP.String())
	out.Set("MacAddress", mac.String())
	out.Set("Bridge", bridgeIface)

	size, _ := bridgeNetwork.Mask.Size()
	out.SetInt("IPPrefixLen", size)

	currentInterfaces.Set(id, &networkInterface{
		IP: ip,
	})

	out.WriteTo(job.Stdout)

	return engine.StatusOK
}
Beispiel #3
0
func (t *TapManager) NewTap(uid, gid int) (*Tap, error) {
	tap := &Tap{Name: "flynntap." + random.String(5), bridge: t.bridge}

	if err := createTap(tap.Name, uid, gid); err != nil {
		return nil, err
	}

	var err error
	tap.LocalIP, err = ipallocator.RequestIP(t.bridge.ipNet, nil)
	if err != nil {
		tap.Close()
		return nil, err
	}

	tap.RemoteIP, err = ipallocator.RequestIP(t.bridge.ipNet, nil)
	if err != nil {
		tap.Close()
		return nil, err
	}

	iface, err := net.InterfaceByName(tap.Name)
	if err != nil {
		tap.Close()
		return nil, err
	}
	if err := netlink.NetworkLinkAddIp(iface, *tap.LocalIP, t.bridge.ipNet); err != nil {
		tap.Close()
		return nil, err
	}
	if err := netlink.NetworkLinkUp(iface); err != nil {
		tap.Close()
		return nil, err
	}
	if err := netlink.AddToBridge(iface, t.bridge.iface); err != nil {
		tap.Close()
		return nil, err
	}

	return tap, nil
}
func (l *LibvirtLXCBackend) Run(job *host.Job) (err error) {
	g := grohl.NewContext(grohl.Data{"backend": "libvirt-lxc", "fn": "run", "job.id": job.ID})
	g.Log(grohl.Data{"at": "start", "job.artifact.uri": job.Artifact.URI, "job.cmd": job.Config.Cmd})

	ip, err := ipallocator.RequestIP(defaultNet, nil)
	if err != nil {
		g.Log(grohl.Data{"at": "request_ip", "status": "error", "err": err})
		return err
	}
	container := &libvirtContainer{
		l:    l,
		job:  job,
		IP:   *ip,
		done: make(chan struct{}),
	}
	defer func() {
		if err != nil {
			go container.cleanup()
		}
	}()

	g.Log(grohl.Data{"at": "pull_image"})
	layers, err := pinkerton.Pull(job.Artifact.URI)
	if err != nil {
		g.Log(grohl.Data{"at": "pull_image", "status": "error", "err": err})
		return err
	}
	imageID, err := pinkerton.ImageID(job.Artifact.URI)
	if err == pinkerton.ErrNoImageID && len(layers) > 0 {
		imageID = layers[len(layers)-1].ID
	} else if err != nil {
		g.Log(grohl.Data{"at": "image_id", "status": "error", "err": err})
		return err
	}

	g.Log(grohl.Data{"at": "read_config"})
	imageConfig, err := readDockerImageConfig(imageID)
	if err != nil {
		g.Log(grohl.Data{"at": "read_config", "status": "error", "err": err})
		return err
	}

	g.Log(grohl.Data{"at": "checkout"})
	rootPath, err := pinkerton.Checkout(job.ID, imageID)
	if err != nil {
		g.Log(grohl.Data{"at": "checkout", "status": "error", "err": err})
		return err
	}
	container.RootPath = rootPath

	g.Log(grohl.Data{"at": "mount"})
	if err := bindMount(l.InitPath, filepath.Join(rootPath, ".containerinit"), false, true); err != nil {
		g.Log(grohl.Data{"at": "mount", "file": ".containerinit", "status": "error", "err": err})
		return err
	}
	if err := os.MkdirAll(filepath.Join(rootPath, "etc"), 0755); err != nil {
		g.Log(grohl.Data{"at": "mkdir", "dir": "etc", "status": "error", "err": err})
		return err
	}
	if err := bindMount("/etc/resolv.conf", filepath.Join(rootPath, "etc/resolv.conf"), false, true); err != nil {
		g.Log(grohl.Data{"at": "mount", "file": "resolv.conf", "status": "error", "err": err})
		return err
	}
	if err := writeHostname(filepath.Join(rootPath, "etc/hosts"), job.ID); err != nil {
		g.Log(grohl.Data{"at": "write_hosts", "status": "error", "err": err})
		return err
	}
	if err := os.MkdirAll(filepath.Join(rootPath, ".container-shared"), 0700); err != nil {
		g.Log(grohl.Data{"at": "mkdir", "dir": ".container-shared", "status": "error", "err": err})
		return err
	}
	for i, m := range job.Config.Mounts {
		if err := os.MkdirAll(filepath.Join(rootPath, m.Location), 0755); err != nil {
			g.Log(grohl.Data{"at": "mkdir_mount", "dir": m.Location, "status": "error", "err": err})
			return err
		}
		if m.Target == "" {
			m.Target = filepath.Join(l.VolPath, cluster.RandomJobID(""))
			job.Config.Mounts[i].Target = m.Target
			if err := os.MkdirAll(m.Target, 0755); err != nil {
				g.Log(grohl.Data{"at": "mkdir_vol", "dir": m.Target, "status": "error", "err": err})
				return err
			}
		}
		if err := bindMount(m.Target, filepath.Join(rootPath, m.Location), m.Writeable, true); err != nil {
			g.Log(grohl.Data{"at": "mount", "target": m.Target, "location": m.Location, "status": "error", "err": err})
			return err
		}
	}

	if job.Config.Env == nil {
		job.Config.Env = make(map[string]string)
	}
	for i, p := range job.Config.Ports {
		if p.Proto != "tcp" && p.Proto != "udp" {
			return fmt.Errorf("unknown port proto %q", p.Proto)
		}

		var port uint16
		if p.Port <= 0 {
			job.Config.Ports[i].RangeEnd = 0
			port, err = l.ports[p.Proto].Get()
		} else {
			port, err = l.ports[p.Proto].GetPort(uint16(p.Port))
		}
		if err != nil {
			g.Log(grohl.Data{"at": "alloc_port", "status": "error", "err": err})
			return err
		}
		job.Config.Ports[i].Port = int(port)
		if job.Config.Ports[i].RangeEnd == 0 {
			job.Config.Ports[i].RangeEnd = int(port)
		}

		if i == 0 {
			job.Config.Env["PORT"] = strconv.Itoa(int(port))
		}
		job.Config.Env[fmt.Sprintf("PORT_%d", i)] = strconv.Itoa(int(port))
	}

	g.Log(grohl.Data{"at": "write_env"})
	err = writeContainerEnv(filepath.Join(rootPath, ".containerenv"),
		map[string]string{
			"PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
			"TERM": "xterm",
			"HOME": "/",
		},
		job.Config.Env,
		map[string]string{
			"HOSTNAME": job.ID,
		},
	)
	if err != nil {
		g.Log(grohl.Data{"at": "write_env", "status": "error", "err": err})
		return err
	}

	args := []string{
		"-i", ip.String() + "/24",
		"-g", defaultGW.String(),
	}
	if job.Config.TTY {
		args = append(args, "-tty")
	}
	if job.Config.Stdin {
		args = append(args, "-stdin")
	}
	if job.Config.WorkingDir != "" {
		args = append(args, "-w", job.Config.WorkingDir)
	} else if imageConfig.WorkingDir != "" {
		args = append(args, "-w", imageConfig.WorkingDir)
	}
	if job.Config.Uid > 0 {
		args = append(args, "-u", strconv.Itoa(job.Config.Uid))
	} else if imageConfig.User != "" {
		// TODO: check and lookup user from image config
	}
	if len(job.Config.Entrypoint) > 0 {
		args = append(args, job.Config.Entrypoint...)
		args = append(args, job.Config.Cmd...)
	} else {
		args = append(args, imageConfig.Entrypoint...)
		if len(job.Config.Cmd) > 0 {
			args = append(args, job.Config.Cmd...)
		} else {
			args = append(args, imageConfig.Cmd...)
		}
	}

	l.state.AddJob(job)
	l.state.SetInternalIP(job.ID, ip.String())
	domain := &lt.Domain{
		Type:   "lxc",
		Name:   job.ID,
		Memory: lt.UnitInt{Value: 1, Unit: "GiB"},
		VCPU:   1,
		OS: lt.OS{
			Type:     lt.OSType{Value: "exe"},
			Init:     "/.containerinit",
			InitArgs: args,
		},
		Devices: lt.Devices{
			Filesystems: []lt.Filesystem{{
				Type:   "mount",
				Source: lt.FSRef{Dir: rootPath},
				Target: lt.FSRef{Dir: "/"},
			}},
			Interfaces: []lt.Interface{{
				Type:   "network",
				Source: lt.InterfaceSrc{Network: "default"},
			}},
			Consoles: []lt.Console{{Type: "pty"}},
		},
		OnPoweroff: "preserve",
		OnCrash:    "preserve",
	}

	g.Log(grohl.Data{"at": "define_domain"})
	vd, err := l.libvirt.DomainDefineXML(string(domain.XML()))
	if err != nil {
		g.Log(grohl.Data{"at": "define_domain", "status": "error", "err": err})
		return err
	}

	g.Log(grohl.Data{"at": "create_domain"})
	if err := vd.Create(); err != nil {
		g.Log(grohl.Data{"at": "create_domain", "status": "error", "err": err})
		return err
	}
	uuid, err := vd.GetUUIDString()
	if err != nil {
		g.Log(grohl.Data{"at": "get_domain_uuid", "status": "error", "err": err})
		return err
	}
	g.Log(grohl.Data{"at": "get_uuid", "uuid": uuid})
	l.state.SetContainerID(job.ID, uuid)

	domainXML, err := vd.GetXMLDesc(0)
	if err != nil {
		g.Log(grohl.Data{"at": "get_domain_xml", "status": "error", "err": err})
		return err
	}
	domain = &lt.Domain{}
	if err := xml.Unmarshal([]byte(domainXML), domain); err != nil {
		g.Log(grohl.Data{"at": "unmarshal_domain_xml", "status": "error", "err": err})
		return err
	}

	if len(domain.Devices.Interfaces) == 0 || domain.Devices.Interfaces[0].Target == nil ||
		domain.Devices.Interfaces[0].Target.Dev == "" {
		err = errors.New("domain config missing interface")
		g.Log(grohl.Data{"at": "enable_hairpin", "status": "error", "err": err})
		return err
	}
	iface := domain.Devices.Interfaces[0].Target.Dev
	if err := enableHairpinMode(iface); err != nil {
		g.Log(grohl.Data{"at": "enable_hairpin", "status": "error", "err": err})
		return err
	}

	for _, p := range job.Config.Ports {
		if err := l.forwarder.Add(&net.TCPAddr{IP: *ip, Port: p.Port}, p.RangeEnd, p.Proto); err != nil {
			g.Log(grohl.Data{"at": "forward_port", "port": p.Port, "status": "error", "err": err})
			return err
		}
	}

	go container.watch(nil)

	g.Log(grohl.Data{"at": "finish"})
	return nil
}