func (d *Daemon) Init() error { /* Initialize some variables */ d.imagesDownloading = map[string]chan bool{} d.readyChan = make(chan bool) d.shutdownChan = make(chan bool) /* Set the executable path */ absPath, err := os.Readlink("/proc/self/exe") if err != nil { return err } d.execPath = absPath /* Set the LVM environment */ err = os.Setenv("LVM_SUPPRESS_FD_WARNINGS", "1") if err != nil { return err } /* Setup logging if that wasn't done before */ if shared.Log == nil { shared.Log, err = logging.GetLogger("", "", true, true, nil) if err != nil { return err } } /* Print welcome message */ if d.MockMode { shared.Log.Info("LXD is starting in mock mode", log.Ctx{"path": shared.VarPath("")}) } else if d.SetupMode { shared.Log.Info("LXD is starting in setup mode", log.Ctx{"path": shared.VarPath("")}) } else { shared.Log.Info("LXD is starting in normal mode", log.Ctx{"path": shared.VarPath("")}) } /* Detect user namespaces */ runningInUserns = shared.RunningInUserNS() /* Detect AppArmor support */ if aaAvailable && os.Getenv("LXD_SECURITY_APPARMOR") == "false" { aaAvailable = false aaAdmin = false shared.Log.Warn("AppArmor support has been manually disabled") } if aaAvailable && !shared.IsDir("/sys/kernel/security/apparmor") { aaAvailable = false aaAdmin = false shared.Log.Warn("AppArmor support has been disabled because of lack of kernel support") } _, err = exec.LookPath("apparmor_parser") if aaAvailable && err != nil { aaAvailable = false aaAdmin = false shared.Log.Warn("AppArmor support has been disabled because 'apparmor_parser' couldn't be found") } /* Detect AppArmor admin support */ if aaAdmin && !haveMacAdmin() { aaAdmin = false shared.Log.Warn("Per-container AppArmor profiles are disabled because the mac_admin capability is missing.") } if aaAdmin && runningInUserns { aaAdmin = false shared.Log.Warn("Per-container AppArmor profiles are disabled because LXD is running in an unprivileged container.") } /* Detect AppArmor confinment */ if !aaConfined { profile := aaProfile() if profile != "unconfined" && profile != "" { aaConfined = true shared.Log.Warn("Per-container AppArmor profiles are disabled because LXD is already protected by AppArmor.") } } /* Detect CGroup support */ cgBlkioController = shared.PathExists("/sys/fs/cgroup/blkio/") if !cgBlkioController { shared.Log.Warn("Couldn't find the CGroup blkio controller, I/O limits will be ignored.") } cgCpuController = shared.PathExists("/sys/fs/cgroup/cpu/") if !cgCpuController { shared.Log.Warn("Couldn't find the CGroup CPU controller, CPU time limits will be ignored.") } cgCpusetController = shared.PathExists("/sys/fs/cgroup/cpuset/") if !cgCpusetController { shared.Log.Warn("Couldn't find the CGroup CPUset controller, CPU pinning will be ignored.") } cgDevicesController = shared.PathExists("/sys/fs/cgroup/devices/") if !cgDevicesController { shared.Log.Warn("Couldn't find the CGroup devices controller, device access control won't work.") } cgMemoryController = shared.PathExists("/sys/fs/cgroup/memory/") if !cgMemoryController { shared.Log.Warn("Couldn't find the CGroup memory controller, memory limits will be ignored.") } cgNetPrioController = shared.PathExists("/sys/fs/cgroup/net_prio/") if !cgNetPrioController { shared.Log.Warn("Couldn't find the CGroup network class controller, network limits will be ignored.") } cgPidsController = shared.PathExists("/sys/fs/cgroup/pids/") if !cgPidsController { shared.Log.Warn("Couldn't find the CGroup pids controller, process limits will be ignored.") } cgSwapAccounting = shared.PathExists("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes") if !cgSwapAccounting { shared.Log.Warn("CGroup memory swap accounting is disabled, swap limits will be ignored.") } /* Get the list of supported architectures */ var architectures = []int{} architectureName, err := shared.ArchitectureGetLocal() if err != nil { return err } architecture, err := shared.ArchitectureId(architectureName) if err != nil { return err } architectures = append(architectures, architecture) personalities, err := shared.ArchitecturePersonalities(architecture) if err != nil { return err } for _, personality := range personalities { architectures = append(architectures, personality) } d.architectures = architectures /* Set container path */ d.lxcpath = shared.VarPath("containers") /* Make sure all our directories are available */ if err := os.MkdirAll(shared.VarPath("containers"), 0711); err != nil { return err } if err := os.MkdirAll(shared.VarPath("devices"), 0711); err != nil { return err } if err := os.MkdirAll(shared.VarPath("devlxd"), 0755); err != nil { return err } if err := os.MkdirAll(shared.VarPath("images"), 0700); err != nil { return err } if err := os.MkdirAll(shared.LogPath(), 0700); err != nil { return err } if err := os.MkdirAll(shared.VarPath("security"), 0700); err != nil { return err } if err := os.MkdirAll(shared.VarPath("shmounts"), 0711); err != nil { return err } if err := os.MkdirAll(shared.VarPath("snapshots"), 0700); err != nil { return err } /* Detect the filesystem */ d.BackingFs, err = filesystemDetect(d.lxcpath) if err != nil { shared.Log.Error("Error detecting backing fs", log.Ctx{"err": err}) } /* Read the uid/gid allocation */ d.IdmapSet, err = shared.DefaultIdmapSet() if err != nil { shared.Log.Warn("Error reading idmap", log.Ctx{"err": err.Error()}) shared.Log.Warn("Only privileged containers will be able to run") } else { shared.Log.Info("Default uid/gid map:") for _, lxcmap := range d.IdmapSet.ToLxcString() { shared.Log.Info(strings.TrimRight(" - "+lxcmap, "\n")) } } /* Initialize the database */ err = initializeDbObject(d, shared.VarPath("lxd.db")) if err != nil { return err } /* Setup the storage driver */ if !d.MockMode { err = d.SetupStorageDriver() if err != nil { return fmt.Errorf("Failed to setup storage: %s", err) } } /* Load all config values from the database */ _, err = d.ConfigValuesGet() if err != nil { return err } /* set the initial proxy function based on config values in the DB */ d.updateProxy() /* Setup /dev/lxd */ d.devlxd, err = createAndBindDevLxd() if err != nil { return err } if err := setupSharedMounts(); err != nil { return err } if !d.MockMode { /* Start the scheduler */ go deviceEventListener(d) /* Setup the TLS authentication */ certf, keyf, err := readMyCert() if err != nil { return err } cert, err := tls.LoadX509KeyPair(certf, keyf) if err != nil { return err } tlsConfig := &tls.Config{ InsecureSkipVerify: true, ClientAuth: tls.RequestClientCert, Certificates: []tls.Certificate{cert}, MinVersion: tls.VersionTLS12, MaxVersion: tls.VersionTLS12, CipherSuites: []uint16{ tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256}, PreferServerCipherSuites: true, } tlsConfig.BuildNameToCertificate() d.tlsConfig = tlsConfig readSavedClientCAList(d) } /* Setup the web server */ d.mux = mux.NewRouter() d.mux.StrictSlash(false) d.mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") SyncResponse(true, []string{"/1.0"}).Render(w) }) for _, c := range api10 { d.createCmd("1.0", c) } for _, c := range apiInternal { d.createCmd("internal", c) } d.mux.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { shared.Log.Debug("Sending top level 404", log.Ctx{"url": r.URL}) w.Header().Set("Content-Type", "application/json") NotFound.Render(w) }) listeners, err := activation.Listeners(false) if err != nil { return err } if len(listeners) > 0 { shared.Log.Info("LXD is socket activated") for _, listener := range listeners { if shared.PathExists(listener.Addr().String()) { d.UnixSocket = &Socket{Socket: listener, CloseOnExit: false} } else { tlsListener := tls.NewListener(listener, d.tlsConfig) d.TCPSocket = &Socket{Socket: tlsListener, CloseOnExit: false} } } } else { shared.Log.Info("LXD isn't socket activated") localSocketPath := shared.VarPath("unix.socket") // If the socket exists, let's try to connect to it and see if there's // a lxd running. if shared.PathExists(localSocketPath) { _, err := lxd.NewClient(&lxd.DefaultConfig, "local") if err != nil { shared.Log.Debug("Detected stale unix socket, deleting") // Connecting failed, so let's delete the socket and // listen on it ourselves. err = os.Remove(localSocketPath) if err != nil { return err } } else { return fmt.Errorf("LXD is already running.") } } unixAddr, err := net.ResolveUnixAddr("unix", localSocketPath) if err != nil { return fmt.Errorf("cannot resolve unix socket address: %v", err) } unixl, err := net.ListenUnix("unix", unixAddr) if err != nil { return fmt.Errorf("cannot listen on unix socket: %v", err) } if err := os.Chmod(localSocketPath, 0660); err != nil { return err } var gid int if d.group != "" { gid, err = shared.GroupId(d.group) if err != nil { return err } } else { gid = os.Getgid() } if err := os.Chown(localSocketPath, os.Getuid(), gid); err != nil { return err } d.UnixSocket = &Socket{Socket: unixl, CloseOnExit: true} } listenAddr, err := d.ConfigValueGet("core.https_address") if err != nil { return err } if listenAddr != "" { _, _, err := net.SplitHostPort(listenAddr) if err != nil { listenAddr = fmt.Sprintf("%s:%s", listenAddr, shared.DefaultPort) } tcpl, err := tls.Listen("tcp", listenAddr, d.tlsConfig) if err != nil { shared.Log.Error("cannot listen on https socket, skipping...", log.Ctx{"err": err}) } else { if d.TCPSocket != nil { shared.Log.Info("Replacing systemd TCP socket by configure one") d.TCPSocket.Socket.Close() } d.TCPSocket = &Socket{Socket: tcpl, CloseOnExit: true} } } d.tomb.Go(func() error { shared.Log.Info("REST API daemon:") if d.UnixSocket != nil { shared.Log.Info(" - binding Unix socket", log.Ctx{"socket": d.UnixSocket.Socket.Addr()}) d.tomb.Go(func() error { return http.Serve(d.UnixSocket.Socket, &lxdHttpServer{d.mux, d}) }) } if d.TCPSocket != nil { shared.Log.Info(" - binding TCP socket", log.Ctx{"socket": d.TCPSocket.Socket.Addr()}) d.tomb.Go(func() error { return http.Serve(d.TCPSocket.Socket, &lxdHttpServer{d.mux, d}) }) } d.tomb.Go(func() error { server := devLxdServer(d) return server.Serve(d.devlxd) }) return nil }) if !d.MockMode && !d.SetupMode { err := d.Ready() if err != nil { return err } } return nil }
func (d *Daemon) Init() error { d.shutdownChan = make(chan bool) /* Set the executable path */ absPath, err := os.Readlink("/proc/self/exe") if err != nil { return err } d.execPath = absPath /* Set the LVM environment */ err = os.Setenv("LVM_SUPPRESS_FD_WARNINGS", "1") if err != nil { return err } /* Setup logging if that wasn't done before */ if shared.Log == nil { shared.Log, err = logging.GetLogger("", "", true, true, nil) if err != nil { return err } } if !d.IsMock { shared.Log.Info("LXD is starting", log.Ctx{"path": shared.VarPath("")}) } else { shared.Log.Info("Mock LXD is starting", log.Ctx{"path": shared.VarPath("")}) } /* Detect user namespaces */ runningInUserns = shared.RunningInUserNS() /* Detect AppArmor support */ if aaAvailable && os.Getenv("LXD_SECURITY_APPARMOR") == "false" { aaAvailable = false aaAdmin = false shared.Log.Warn("AppArmor support has been manually disabled") } if aaAvailable && !shared.IsDir("/sys/kernel/security/apparmor") { aaAvailable = false aaAdmin = false shared.Log.Warn("AppArmor support has been disabled because of lack of kernel support") } _, err = exec.LookPath("apparmor_parser") if aaAvailable && err != nil { aaAvailable = false aaAdmin = false shared.Log.Warn("AppArmor support has been disabled because 'apparmor_parser' couldn't be found") } /* Detect AppArmor admin support */ if aaAdmin && !haveMacAdmin() { aaAdmin = false shared.Log.Warn("Per-container AppArmor profiles are disabled because the mac_admin capability is missing.") } if aaAdmin && runningInUserns { aaAdmin = false shared.Log.Warn("Per-container AppArmor profiles are disabled because LXD is running in an unprivileged container.") } /* Detect AppArmor confinment */ if !aaConfined { profile := aaProfile() if profile != "unconfined" && profile != "" { aaConfined = true shared.Log.Warn("Per-container AppArmor profiles are disabled because LXD is already protected by AppArmor.") } } /* Detect CGroup support */ cgCpuController = shared.PathExists("/sys/fs/cgroup/cpu/") if !cgCpuController { shared.Log.Warn("Couldn't find the CGroup CPU controller, CPU time limits will be ignored.") } cgCpusetController = shared.PathExists("/sys/fs/cgroup/cpuset/") if !cgCpusetController { shared.Log.Warn("Couldn't find the CGroup CPUset controller, CPU pinning will be ignored.") } cgMemoryController = shared.PathExists("/sys/fs/cgroup/memory/") if !cgMemoryController { shared.Log.Warn("Couldn't find the CGroup memory controller, memory limits will be ignored.") } cgSwapAccounting = shared.PathExists("/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes") if !cgSwapAccounting { shared.Log.Warn("CGroup memory swap accounting is disabled, swap limits will be ignored.") } /* Get the list of supported architectures */ var architectures = []int{} uname := syscall.Utsname{} if err := syscall.Uname(&uname); err != nil { return err } architectureName := "" for _, c := range uname.Machine { if c == 0 { break } architectureName += string(byte(c)) } architecture, err := shared.ArchitectureId(architectureName) if err != nil { return err } architectures = append(architectures, architecture) personalities, err := shared.ArchitecturePersonalities(architecture) if err != nil { return err } for _, personality := range personalities { architectures = append(architectures, personality) } d.architectures = architectures /* Set container path */ d.lxcpath = shared.VarPath("containers") /* Make sure all our directories are available */ if err := os.MkdirAll(shared.VarPath("containers"), 0711); err != nil { return err } if err := os.MkdirAll(shared.VarPath("devices"), 0711); err != nil { return err } if err := os.MkdirAll(shared.VarPath("devlxd"), 0755); err != nil { return err } if err := os.MkdirAll(shared.VarPath("images"), 0700); err != nil { return err } if err := os.MkdirAll(shared.VarPath("security"), 0700); err != nil { return err } if err := os.MkdirAll(shared.VarPath("shmounts"), 0711); err != nil { return err } if err := os.MkdirAll(shared.VarPath("snapshots"), 0700); err != nil { return err } /* Detect the filesystem */ d.BackingFs, err = filesystemDetect(d.lxcpath) if err != nil { shared.Log.Error("Error detecting backing fs", log.Ctx{"err": err}) } /* Read the uid/gid allocation */ d.IdmapSet, err = shared.DefaultIdmapSet() if err != nil { shared.Log.Warn("Error reading idmap", log.Ctx{"err": err.Error()}) shared.Log.Warn("Only privileged containers will be able to run") } else { shared.Log.Info("Default uid/gid map:") for _, lxcmap := range d.IdmapSet.ToLxcString() { shared.Log.Info(strings.TrimRight(" - "+lxcmap, "\n")) } } /* Initialize the database */ err = initializeDbObject(d, shared.VarPath("lxd.db")) if err != nil { return err } /* Prune images */ d.pruneChan = make(chan bool) go func() { d.pruneExpiredImages() for { timer := time.NewTimer(24 * time.Hour) timeChan := timer.C select { case <-timeChan: /* run once per day */ d.pruneExpiredImages() case <-d.pruneChan: /* run when image.remote_cache_expiry is changed */ d.pruneExpiredImages() timer.Stop() } } }() /* Setup /dev/lxd */ d.devlxd, err = createAndBindDevLxd() if err != nil { return err } if err := setupSharedMounts(); err != nil { return err } var tlsConfig *tls.Config if !d.IsMock { err = d.SetupStorageDriver() if err != nil { return fmt.Errorf("Failed to setup storage: %s", err) } /* Restart containers */ go func() { containersRestart(d) }() /* Start the scheduler */ go deviceTaskScheduler(d) /* Setup the TLS authentication */ certf, keyf, err := readMyCert() if err != nil { return err } d.certf = certf d.keyf = keyf readSavedClientCAList(d) tlsConfig, err = shared.GetTLSConfig(d.certf, d.keyf) if err != nil { return err } } /* Setup the web server */ d.mux = mux.NewRouter() d.mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") SyncResponse(true, []string{"/1.0"}).Render(w) }) for _, c := range api10 { d.createCmd("1.0", c) } for _, c := range apiInternal { d.createCmd("internal", c) } d.mux.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { shared.Log.Debug("Sending top level 404", log.Ctx{"url": r.URL}) w.Header().Set("Content-Type", "application/json") NotFound.Render(w) }) listeners, err := activation.Listeners(false) if err != nil { return err } var sockets []Socket if len(listeners) > 0 { shared.Log.Info("LXD is socket activated") for _, listener := range listeners { if shared.PathExists(listener.Addr().String()) { sockets = append(sockets, Socket{Socket: listener, CloseOnExit: false}) } else { tlsListener := tls.NewListener(listener, tlsConfig) sockets = append(sockets, Socket{Socket: tlsListener, CloseOnExit: false}) } } } else { shared.Log.Info("LXD isn't socket activated") localSocketPath := shared.VarPath("unix.socket") // If the socket exists, let's try to connect to it and see if there's // a lxd running. if shared.PathExists(localSocketPath) { c, err := lxd.NewClient(&lxd.DefaultConfig, "local") if err != nil { return err } err = c.Finger() if err != nil { shared.Log.Debug("Detected stale unix socket, deleting") // Connecting failed, so let's delete the socket and // listen on it ourselves. err = os.Remove(localSocketPath) if err != nil { return err } } else { return fmt.Errorf("LXD is already running.") } } unixAddr, err := net.ResolveUnixAddr("unix", localSocketPath) if err != nil { return fmt.Errorf("cannot resolve unix socket address: %v", err) } unixl, err := net.ListenUnix("unix", unixAddr) if err != nil { return fmt.Errorf("cannot listen on unix socket: %v", err) } if err := os.Chmod(localSocketPath, 0660); err != nil { return err } var gid int if d.group != "" { gid, err = shared.GroupId(d.group) if err != nil { return err } } else { gid = os.Getgid() } if err := os.Chown(localSocketPath, os.Getuid(), gid); err != nil { return err } sockets = append(sockets, Socket{Socket: unixl, CloseOnExit: true}) } listenAddr, err := d.ConfigValueGet("core.https_address") if err != nil { return err } if listenAddr != "" { _, _, err := net.SplitHostPort(listenAddr) if err != nil { listenAddr = fmt.Sprintf("%s:%s", listenAddr, shared.DefaultPort) } tcpl, err := tls.Listen("tcp", listenAddr, tlsConfig) if err != nil { shared.Log.Error("cannot listen on https socket, skipping...", log.Ctx{"err": err}) } else { sockets = append(sockets, Socket{Socket: tcpl, CloseOnExit: true}) } } if !d.IsMock { d.Sockets = sockets } else { d.Sockets = []Socket{} } d.tomb.Go(func() error { shared.Log.Info("REST API daemon:") for _, socket := range d.Sockets { shared.Log.Info(" - binding socket", log.Ctx{"socket": socket.Socket.Addr()}) current_socket := socket d.tomb.Go(func() error { return http.Serve(current_socket.Socket, d.mux) }) } d.tomb.Go(func() error { server := devLxdServer(d) return server.Serve(d.devlxd) }) return nil }) return nil }
func (d *Daemon) Init() error { /* Setup logging */ if shared.Log == nil { shared.SetLogger("", "", true, true) } if !d.IsMock { shared.Log.Info("LXD is starting", log.Ctx{"path": shared.VarPath("")}) } else { shared.Log.Info("Mock LXD is starting", log.Ctx{"path": shared.VarPath("")}) } /* Detect user namespaces */ runningInUserns = shared.RunningInUserNS() /* Detect apparmor support */ if aaEnabled && os.Getenv("LXD_SECURITY_APPARMOR") == "false" { aaEnabled = false shared.Log.Warn("Per-container AppArmor profiles have been manually disabled") } if aaEnabled && !shared.IsDir("/sys/kernel/security/apparmor") { aaEnabled = false shared.Log.Warn("Per-container AppArmor profiles disabled because of lack of kernel support") } if aaEnabled && !haveMacAdmin() { shared.Log.Warn("Per-container AppArmor profiles are disabled because mac_admin capability is missing.") aaEnabled = false } _, err := exec.LookPath("apparmor_parser") if aaEnabled && err != nil { aaEnabled = false shared.Log.Warn("Per-container AppArmor profiles disabled because 'apparmor_parser' couldn't be found") } if aaEnabled && runningInUserns { aaEnabled = false shared.Log.Warn("Per-container AppArmor profiles disabled because LXD is running inside a user namespace") } /* Get the list of supported architectures */ var architectures = []int{} uname := syscall.Utsname{} if err := syscall.Uname(&uname); err != nil { return err } architectureName := "" for _, c := range uname.Machine { if c == 0 { break } architectureName += string(byte(c)) } architecture, err := shared.ArchitectureId(architectureName) if err != nil { return err } architectures = append(architectures, architecture) personalities, err := shared.ArchitecturePersonalities(architecture) if err != nil { return err } for _, personality := range personalities { architectures = append(architectures, personality) } d.architectures = architectures /* Create required paths */ d.lxcpath = shared.VarPath("containers") err = os.MkdirAll(d.lxcpath, 0755) if err != nil { return err } // Create default directories if err := os.MkdirAll(shared.VarPath("images"), 0700); err != nil { return err } if err := os.MkdirAll(shared.VarPath("snapshots"), 0700); err != nil { return err } if err := os.MkdirAll(shared.VarPath("devlxd"), 0755); err != nil { return err } /* Detect the filesystem */ d.BackingFs, err = filesystemDetect(d.lxcpath) if err != nil { shared.Log.Error("Error detecting backing fs", log.Ctx{"err": err}) } /* Read the uid/gid allocation */ d.IdmapSet, err = shared.DefaultIdmapSet() if err != nil { shared.Log.Warn("Error reading idmap", log.Ctx{"err": err.Error()}) shared.Log.Warn("Only privileged containers will be able to run") } else { shared.Log.Info("Default uid/gid map:") for _, lxcmap := range d.IdmapSet.ToLxcString() { shared.Log.Info(strings.TrimRight(" - "+lxcmap, "\n")) } } /* Initialize the database */ err = initializeDbObject(d, shared.VarPath("lxd.db")) if err != nil { return err } /* Prune images */ d.pruneChan = make(chan bool) go func() { for { expiryStr, err := dbImageExpiryGet(d.db) var expiry int if err != nil { expiry = 10 } else { expiry, err = strconv.Atoi(expiryStr) if err != nil { expiry = 10 } if expiry <= 0 { expiry = 1 } } timer := time.NewTimer(time.Duration(expiry) * 24 * time.Hour) timeChan := timer.C select { case <-timeChan: d.pruneExpiredImages() case <-d.pruneChan: d.pruneExpiredImages() timer.Stop() } } }() /* Setup /dev/lxd */ d.devlxd, err = createAndBindDevLxd() if err != nil { return err } if err := setupSharedMounts(); err != nil { return err } var tlsConfig *tls.Config if !d.IsMock { err = d.SetupStorageDriver() if err != nil { return fmt.Errorf("Failed to setup storage: %s", err) } /* Restart containers */ containersRestart(d) containersWatch(d) /* Setup the TLS authentication */ certf, keyf, err := readMyCert() if err != nil { return err } d.certf = certf d.keyf = keyf readSavedClientCAList(d) tlsConfig, err = shared.GetTLSConfig(d.certf, d.keyf) if err != nil { return err } } /* Setup the web server */ d.mux = mux.NewRouter() d.mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") SyncResponse(true, []string{"/1.0"}).Render(w) }) for _, c := range api10 { d.createCmd("1.0", c) } d.mux.NotFoundHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { shared.Log.Debug("Sending top level 404", log.Ctx{"url": r.URL}) w.Header().Set("Content-Type", "application/json") NotFound.Render(w) }) listeners, err := activation.Listeners(false) if err != nil { return err } var sockets []Socket if len(listeners) > 0 { shared.Log.Info("LXD is socket activated") for _, listener := range listeners { if shared.PathExists(listener.Addr().String()) { sockets = append(sockets, Socket{Socket: listener, CloseOnExit: false}) } else { tlsListener := tls.NewListener(listener, tlsConfig) sockets = append(sockets, Socket{Socket: tlsListener, CloseOnExit: false}) } } } else { shared.Log.Info("LXD isn't socket activated") localSocketPath := shared.VarPath("unix.socket") // If the socket exists, let's try to connect to it and see if there's // a lxd running. if shared.PathExists(localSocketPath) { c := &lxd.Config{Remotes: map[string]lxd.RemoteConfig{}} _, err := lxd.NewClient(c, "") if err != nil { shared.Log.Debug("Detected stale unix socket, deleting") // Connecting failed, so let's delete the socket and // listen on it ourselves. err = os.Remove(localSocketPath) if err != nil { return err } } } unixAddr, err := net.ResolveUnixAddr("unix", localSocketPath) if err != nil { return fmt.Errorf("cannot resolve unix socket address: %v", err) } unixl, err := net.ListenUnix("unix", unixAddr) if err != nil { return fmt.Errorf("cannot listen on unix socket: %v", err) } if err := os.Chmod(localSocketPath, 0660); err != nil { return err } gid, err := shared.GroupId(*group) if err != nil { return err } if err := os.Chown(localSocketPath, os.Getuid(), gid); err != nil { return err } sockets = append(sockets, Socket{Socket: unixl, CloseOnExit: true}) } listenAddr, err := d.ConfigValueGet("core.https_address") if err != nil { return err } if listenAddr != "" { _, _, err := net.SplitHostPort(listenAddr) if err != nil { listenAddr = fmt.Sprintf("%s:%s", listenAddr, shared.DefaultPort) } tcpl, err := tls.Listen("tcp", listenAddr, tlsConfig) if err != nil { shared.Log.Error("cannot listen on https socket, skipping...", log.Ctx{"err": err}) } else { sockets = append(sockets, Socket{Socket: tcpl, CloseOnExit: true}) } } if !d.IsMock { d.Sockets = sockets } else { d.Sockets = []Socket{} } d.tomb.Go(func() error { shared.Log.Info("REST API daemon:") for _, socket := range d.Sockets { shared.Log.Info(" - binding socket", log.Ctx{"socket": socket.Socket.Addr()}) current_socket := socket d.tomb.Go(func() error { return http.Serve(current_socket.Socket, d.mux) }) } d.tomb.Go(func() error { server := devLxdServer(d) return server.Serve(d.devlxd) }) return nil }) return nil }
func cmdInit() error { var defaultPrivileged int // controls whether we set security.privileged=true var storageBackend string // dir or zfs var storageMode string // existing, loop or device var storageLoopSize int // Size in GB var storageDevice string // Path var storagePool string // pool name var networkAddress string // Address var networkPort int // Port var trustPassword string // Trust password // Detect userns defaultPrivileged = -1 runningInUserns = shared.RunningInUserNS() // Only root should run this if os.Geteuid() != 0 { return fmt.Errorf("This must be run as root") } backendsAvailable := []string{"dir"} backendsSupported := []string{"dir", "zfs"} // Detect zfs out, err := exec.LookPath("zfs") if err == nil && len(out) != 0 { backendsAvailable = append(backendsAvailable, "zfs") } reader := bufio.NewReader(os.Stdin) askBool := func(question string) bool { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") if shared.StringInSlice(strings.ToLower(input), []string{"yes", "y"}) { return true } else if shared.StringInSlice(strings.ToLower(input), []string{"no", "n"}) { return false } fmt.Printf("Invalid input, try again.\n\n") } } askChoice := func(question string, choices []string) string { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") if shared.StringInSlice(input, choices) { return input } fmt.Printf("Invalid input, try again.\n\n") } } askInt := func(question string, min int, max int) int { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") intInput, err := strconv.Atoi(input) if err == nil && (min == -1 || intInput >= min) && (max == -1 || intInput <= max) { return intInput } fmt.Printf("Invalid input, try again.\n\n") } } askString := func(question string) string { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") if len(input) != 0 { return input } fmt.Printf("Invalid input, try again.\n\n") } } askPassword := func(question string) string { for { fmt.Printf(question) pwd, _ := terminal.ReadPassword(0) fmt.Printf("\n") inFirst := string(pwd) inFirst = strings.TrimSuffix(inFirst, "\n") fmt.Printf("Again: ") pwd, _ = terminal.ReadPassword(0) fmt.Printf("\n") inSecond := string(pwd) inSecond = strings.TrimSuffix(inSecond, "\n") if inFirst == inSecond { return inFirst } fmt.Printf("Invalid input, try again.\n\n") } } // Confirm that LXD is online c, err := lxd.NewClient(&lxd.DefaultConfig, "local") if err != nil { return fmt.Errorf("Unable to talk to LXD: %s", err) } // Check that we have no containers or images in the store containers, err := c.ListContainers() if err != nil { return fmt.Errorf("Unable to list the LXD containers: %s", err) } images, err := c.ListImages() if err != nil { return fmt.Errorf("Unable to list the LXD images: %s", err) } if len(containers) > 0 || len(images) > 0 { return fmt.Errorf("You have existing containers or images. lxd init requires an empty LXD.") } if *argAuto { if *argStorageBackend == "" { *argStorageBackend = "dir" } // Do a bunch of sanity checks if !shared.StringInSlice(*argStorageBackend, backendsSupported) { return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", *argStorageBackend) } if !shared.StringInSlice(*argStorageBackend, backendsAvailable) { return fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", *argStorageBackend) } if *argStorageBackend == "dir" { if *argStorageCreateLoop != -1 || *argStorageCreateDevice != "" || *argStoragePool != "" { return fmt.Errorf("None of --storage-pool, --storage-create-device or --storage-create-pool may be used with the 'dir' backend.") } } if *argStorageBackend == "zfs" { if *argStorageCreateLoop != -1 && *argStorageCreateDevice != "" { return fmt.Errorf("Only one of --storage-create-device or --storage-create-pool can be specified with the 'zfs' backend.") } if *argStoragePool == "" { return fmt.Errorf("--storage-pool must be specified with the 'zfs' backend.") } } if *argNetworkAddress == "" { if *argNetworkPort != -1 { return fmt.Errorf("--network-port cannot be used without --network-address.") } if *argTrustPassword != "" { return fmt.Errorf("--trust-password cannot be used without --network-address.") } } // Set the local variables if *argStorageCreateDevice != "" { storageMode = "device" } else if *argStorageCreateLoop != -1 { storageMode = "loop" } else { storageMode = "existing" } storageBackend = *argStorageBackend storageLoopSize = *argStorageCreateLoop storageDevice = *argStorageCreateDevice storagePool = *argStoragePool networkAddress = *argNetworkAddress networkPort = *argNetworkPort trustPassword = *argTrustPassword } else { if *argStorageBackend != "" || *argStorageCreateDevice != "" || *argStorageCreateLoop != -1 || *argStoragePool != "" || *argNetworkAddress != "" || *argNetworkPort != -1 || *argTrustPassword != "" { return fmt.Errorf("Init configuration is only valid with --auto") } storageBackend = askChoice("Name of the storage backend to use (dir or zfs): ", backendsSupported) if !shared.StringInSlice(storageBackend, backendsSupported) { return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", storageBackend) } if !shared.StringInSlice(storageBackend, backendsAvailable) { return fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", storageBackend) } if storageBackend == "zfs" { if askBool("Create a new ZFS pool (yes/no)? ") { storagePool = askString("Name of the new ZFS pool: ") if askBool("Would you like to use an existing block device (yes/no)? ") { storageDevice = askString("Path to the existing block device: ") storageMode = "device" } else { storageLoopSize = askInt("Size in GB of the new loop device (1GB minimum): ", 1, -1) storageMode = "loop" } } else { storagePool = askString("Name of the existing ZFS pool or dataset: ") storageMode = "existing" } } if runningInUserns { fmt.Printf(` We detected that you are running inside an unprivileged container. This means that unless you manually configured your host otherwise, you will not have enough uid and gid to allocate to your containers. LXD can re-use your container's own allocation to avoid the problem. Doing so makes your nested containers slightly less safe as they could in theory attack their parent container and gain more privileges than they otherwise would. `) if askBool("Would you like to have your containers share their parent's allocation (yes/no)? ") { defaultPrivileged = 1 } else { defaultPrivileged = 0 } } if askBool("Would you like LXD to be available over the network (yes/no)? ") { networkAddress = askString("Address to bind LXD to (not including port): ") networkPort = askInt("Port to bind LXD to (8443 recommended): ", 1, 65535) trustPassword = askPassword("Trust password for new clients: ") } } if !shared.StringInSlice(storageBackend, []string{"dir", "zfs"}) { return fmt.Errorf("Invalid storage backend: %s", storageBackend) } // Unset all storage keys, core.https_address and core.trust_password for _, key := range []string{"storage.zfs_pool_name", "core.https_address", "core.trust_password"} { _, err = c.SetServerConfig(key, "") if err != nil { return err } } // Destroy any existing loop device for _, file := range []string{"zfs.img"} { os.Remove(shared.VarPath(file)) } if storageBackend == "zfs" { _ = exec.Command("modprobe", "zfs").Run() if storageMode == "loop" { storageDevice = shared.VarPath("zfs.img") f, err := os.Create(storageDevice) if err != nil { return fmt.Errorf("Failed to open %s: %s", storageDevice, err) } err = f.Chmod(0600) if err != nil { return fmt.Errorf("Failed to chmod %s: %s", storageDevice, err) } err = f.Truncate(int64(storageLoopSize * 1024 * 1024 * 1024)) if err != nil { return fmt.Errorf("Failed to create sparse file %s: %s", storageDevice, err) } err = f.Close() if err != nil { return fmt.Errorf("Failed to close %s: %s", storageDevice, err) } } if shared.StringInSlice(storageMode, []string{"loop", "device"}) { output, err := exec.Command( "zpool", "create", storagePool, storageDevice, "-f", "-m", "none").CombinedOutput() if err != nil { return fmt.Errorf("Failed to create the ZFS pool: %s", output) } } // Configure LXD to use the pool _, err = c.SetServerConfig("storage.zfs_pool_name", storagePool) if err != nil { return err } } if defaultPrivileged == 0 { err = c.SetProfileConfigItem("default", "security.privileged", "") if err != nil { return err } } else if defaultPrivileged == 1 { err = c.SetProfileConfigItem("default", "security.privileged", "true") if err != nil { } } if networkAddress != "" { _, err = c.SetServerConfig("core.https_address", fmt.Sprintf("%s:%d", networkAddress, networkPort)) if err != nil { return err } if trustPassword != "" { _, err = c.SetServerConfig("core.trust_password", trustPassword) if err != nil { return err } } } fmt.Printf("LXD has been successfully configured.\n") return nil }
func cmdInit() error { var defaultPrivileged int // controls whether we set security.privileged=true var storageBackend string // dir or zfs var storageMode string // existing, loop or device var storageLoopSize int64 // Size in GB var storageDevice string // Path var storagePool string // pool name var networkAddress string // Address var networkPort int64 // Port var trustPassword string // Trust password var imagesAutoUpdate bool // controls whether we set images.auto_update_interval to 0 var bridgeName string // Bridge name var bridgeIPv4 string // IPv4 address var bridgeIPv4Nat bool // IPv4 address var bridgeIPv6 string // IPv6 address var bridgeIPv6Nat bool // IPv6 address // Detect userns defaultPrivileged = -1 runningInUserns = shared.RunningInUserNS() imagesAutoUpdate = true // Only root should run this if os.Geteuid() != 0 { return fmt.Errorf("This must be run as root") } backendsAvailable := []string{"dir"} backendsSupported := []string{"dir", "zfs"} // Detect zfs out, err := exec.LookPath("zfs") if err == nil && len(out) != 0 && !runningInUserns { _ = loadModule("zfs") err := shared.RunCommand("zpool", "list") if err == nil { backendsAvailable = append(backendsAvailable, "zfs") } } reader := bufio.NewReader(os.Stdin) askBool := func(question string, default_ string) bool { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") if input == "" { input = default_ } if shared.StringInSlice(strings.ToLower(input), []string{"yes", "y"}) { return true } else if shared.StringInSlice(strings.ToLower(input), []string{"no", "n"}) { return false } fmt.Printf("Invalid input, try again.\n\n") } } askChoice := func(question string, choices []string, default_ string) string { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") if input == "" { input = default_ } if shared.StringInSlice(input, choices) { return input } fmt.Printf("Invalid input, try again.\n\n") } } askInt := func(question string, min int64, max int64, default_ string) int64 { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") if input == "" { input = default_ } intInput, err := strconv.ParseInt(input, 10, 64) if err == nil && (min == -1 || intInput >= min) && (max == -1 || intInput <= max) { return intInput } fmt.Printf("Invalid input, try again.\n\n") } } askString := func(question string, default_ string, validate func(string) error) string { for { fmt.Printf(question) input, _ := reader.ReadString('\n') input = strings.TrimSuffix(input, "\n") if input == "" { input = default_ } if validate != nil { result := validate(input) if result != nil { fmt.Printf("Invalid input: %s\n\n", result) continue } } if len(input) != 0 { return input } fmt.Printf("Invalid input, try again.\n\n") } } askPassword := func(question string) string { for { fmt.Printf(question) pwd, _ := terminal.ReadPassword(0) fmt.Printf("\n") inFirst := string(pwd) inFirst = strings.TrimSuffix(inFirst, "\n") fmt.Printf("Again: ") pwd, _ = terminal.ReadPassword(0) fmt.Printf("\n") inSecond := string(pwd) inSecond = strings.TrimSuffix(inSecond, "\n") if inFirst == inSecond { return inFirst } fmt.Printf("Invalid input, try again.\n\n") } } // Confirm that LXD is online c, err := lxd.NewClient(&lxd.DefaultConfig, "local") if err != nil { return fmt.Errorf("Unable to talk to LXD: %s", err) } // Check that we have no containers or images in the store containers, err := c.ListContainers() if err != nil { return fmt.Errorf("Unable to list the LXD containers: %s", err) } images, err := c.ListImages() if err != nil { return fmt.Errorf("Unable to list the LXD images: %s", err) } if len(containers) > 0 || len(images) > 0 { return fmt.Errorf("You have existing containers or images. lxd init requires an empty LXD.") } if *argAuto { if *argStorageBackend == "" { *argStorageBackend = "dir" } // Do a bunch of sanity checks if !shared.StringInSlice(*argStorageBackend, backendsSupported) { return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", *argStorageBackend) } if !shared.StringInSlice(*argStorageBackend, backendsAvailable) { return fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", *argStorageBackend) } if *argStorageBackend == "dir" { if *argStorageCreateLoop != -1 || *argStorageCreateDevice != "" || *argStoragePool != "" { return fmt.Errorf("None of --storage-pool, --storage-create-device or --storage-create-loop may be used with the 'dir' backend.") } } if *argStorageBackend == "zfs" { if *argStorageCreateLoop != -1 && *argStorageCreateDevice != "" { return fmt.Errorf("Only one of --storage-create-device or --storage-create-loop can be specified with the 'zfs' backend.") } if *argStoragePool == "" { return fmt.Errorf("--storage-pool must be specified with the 'zfs' backend.") } } if *argNetworkAddress == "" { if *argNetworkPort != -1 { return fmt.Errorf("--network-port cannot be used without --network-address.") } if *argTrustPassword != "" { return fmt.Errorf("--trust-password cannot be used without --network-address.") } } // Set the local variables if *argStorageCreateDevice != "" { storageMode = "device" } else if *argStorageCreateLoop != -1 { storageMode = "loop" } else { storageMode = "existing" } storageBackend = *argStorageBackend storageLoopSize = *argStorageCreateLoop storageDevice = *argStorageCreateDevice storagePool = *argStoragePool networkAddress = *argNetworkAddress networkPort = *argNetworkPort trustPassword = *argTrustPassword } else { if *argStorageBackend != "" || *argStorageCreateDevice != "" || *argStorageCreateLoop != -1 || *argStoragePool != "" || *argNetworkAddress != "" || *argNetworkPort != -1 || *argTrustPassword != "" { return fmt.Errorf("Init configuration is only valid with --auto") } defaultStorage := "dir" if shared.StringInSlice("zfs", backendsAvailable) { defaultStorage = "zfs" } storageBackend = askChoice(fmt.Sprintf("Name of the storage backend to use (dir or zfs) [default=%s]: ", defaultStorage), backendsSupported, defaultStorage) if !shared.StringInSlice(storageBackend, backendsSupported) { return fmt.Errorf("The requested backend '%s' isn't supported by lxd init.", storageBackend) } if !shared.StringInSlice(storageBackend, backendsAvailable) { return fmt.Errorf("The requested backend '%s' isn't available on your system (missing tools).", storageBackend) } if storageBackend == "zfs" { if askBool("Create a new ZFS pool (yes/no) [default=yes]? ", "yes") { storagePool = askString("Name of the new ZFS pool [default=lxd]: ", "lxd", nil) if askBool("Would you like to use an existing block device (yes/no) [default=no]? ", "no") { deviceExists := func(path string) error { if !shared.IsBlockdevPath(path) { return fmt.Errorf("'%s' is not a block device", path) } return nil } storageDevice = askString("Path to the existing block device: ", "", deviceExists) storageMode = "device" } else { st := syscall.Statfs_t{} err := syscall.Statfs(shared.VarPath(), &st) if err != nil { return fmt.Errorf("couldn't statfs %s: %s", shared.VarPath(), err) } /* choose 15 GB < x < 100GB, where x is 20% of the disk size */ def := uint64(st.Frsize) * st.Blocks / (1024 * 1024 * 1024) / 5 if def > 100 { def = 100 } if def < 15 { def = 15 } q := fmt.Sprintf("Size in GB of the new loop device (1GB minimum) [default=%d]: ", def) storageLoopSize = askInt(q, 1, -1, fmt.Sprintf("%d", def)) storageMode = "loop" } } else { storagePool = askString("Name of the existing ZFS pool or dataset: ", "", nil) storageMode = "existing" } } if runningInUserns { fmt.Printf(` We detected that you are running inside an unprivileged container. This means that unless you manually configured your host otherwise, you will not have enough uid and gid to allocate to your containers. LXD can re-use your container's own allocation to avoid the problem. Doing so makes your nested containers slightly less safe as they could in theory attack their parent container and gain more privileges than they otherwise would. `) if askBool("Would you like to have your containers share their parent's allocation (yes/no) [default=yes]? ", "yes") { defaultPrivileged = 1 } else { defaultPrivileged = 0 } } if askBool("Would you like LXD to be available over the network (yes/no) [default=no]? ", "no") { isIPAddress := func(s string) error { if s != "all" && net.ParseIP(s) == nil { return fmt.Errorf("'%s' is not an IP address", s) } return nil } networkAddress = askString("Address to bind LXD to (not including port) [default=all]: ", "all", isIPAddress) if networkAddress == "all" { networkAddress = "::" } if net.ParseIP(networkAddress).To4() == nil { networkAddress = fmt.Sprintf("[%s]", networkAddress) } networkPort = askInt("Port to bind LXD to [default=8443]: ", 1, 65535, "8443") trustPassword = askPassword("Trust password for new clients: ") } if !askBool("Would you like stale cached images to be updated automatically (yes/no) [default=yes]? ", "yes") { imagesAutoUpdate = false } if askBool("Would you like to create a new network bridge (yes/no) [default=yes]? ", "yes") { bridgeName = askString("What should the new bridge be called [default=lxdbr0]? ", "lxdbr0", networkValidName) bridgeIPv4 = askString("What IPv4 subnet should be used (CIDR notation, “auto” or “none”) [default=auto]? ", "auto", func(value string) error { if shared.StringInSlice(value, []string{"auto", "none"}) { return nil } return networkValidAddressCIDRV4(value) }) if !shared.StringInSlice(bridgeIPv4, []string{"auto", "none"}) { bridgeIPv4Nat = askBool("Would you like LXD to NAT IPv4 traffic on your bridge? [default=yes]? ", "yes") } bridgeIPv6 = askString("What IPv6 subnet should be used (CIDR notation, “auto” or “none”) [default=auto]? ", "auto", func(value string) error { if shared.StringInSlice(value, []string{"auto", "none"}) { return nil } return networkValidAddressCIDRV6(value) }) if !shared.StringInSlice(bridgeIPv6, []string{"auto", "none"}) { bridgeIPv6Nat = askBool("Would you like LXD to NAT IPv6 traffic on your bridge? [default=yes]? ", "yes") } } } if !shared.StringInSlice(storageBackend, []string{"dir", "zfs"}) { return fmt.Errorf("Invalid storage backend: %s", storageBackend) } // Unset all storage keys, core.https_address and core.trust_password for _, key := range []string{"storage.zfs_pool_name", "core.https_address", "core.trust_password"} { _, err = c.SetServerConfig(key, "") if err != nil { return err } } // Destroy any existing loop device for _, file := range []string{"zfs.img"} { os.Remove(shared.VarPath(file)) } if storageBackend == "zfs" { if storageMode == "loop" { storageDevice = shared.VarPath("zfs.img") f, err := os.Create(storageDevice) if err != nil { return fmt.Errorf("Failed to open %s: %s", storageDevice, err) } err = f.Chmod(0600) if err != nil { return fmt.Errorf("Failed to chmod %s: %s", storageDevice, err) } err = f.Truncate(int64(storageLoopSize * 1024 * 1024 * 1024)) if err != nil { return fmt.Errorf("Failed to create sparse file %s: %s", storageDevice, err) } err = f.Close() if err != nil { return fmt.Errorf("Failed to close %s: %s", storageDevice, err) } } if shared.StringInSlice(storageMode, []string{"loop", "device"}) { output, err := exec.Command( "zpool", "create", storagePool, storageDevice, "-f", "-m", "none", "-O", "compression=on").CombinedOutput() if err != nil { return fmt.Errorf("Failed to create the ZFS pool: %s", output) } } // Configure LXD to use the pool _, err = c.SetServerConfig("storage.zfs_pool_name", storagePool) if err != nil { return err } } if defaultPrivileged == 0 { err = c.SetProfileConfigItem("default", "security.privileged", "") if err != nil { return err } } else if defaultPrivileged == 1 { err = c.SetProfileConfigItem("default", "security.privileged", "true") if err != nil { } } if imagesAutoUpdate { ss, err := c.ServerStatus() if err != nil { return err } if val, ok := ss.Config["images.auto_update_interval"]; ok && val == "0" { _, err = c.SetServerConfig("images.auto_update_interval", "") if err != nil { return err } } } else { _, err = c.SetServerConfig("images.auto_update_interval", "0") if err != nil { return err } } if networkAddress != "" { _, err = c.SetServerConfig("core.https_address", fmt.Sprintf("%s:%d", networkAddress, networkPort)) if err != nil { return err } if trustPassword != "" { _, err = c.SetServerConfig("core.trust_password", trustPassword) if err != nil { return err } } } if bridgeName != "" { bridgeConfig := map[string]string{} bridgeConfig["ipv4.address"] = bridgeIPv4 bridgeConfig["ipv6.address"] = bridgeIPv6 if bridgeIPv4Nat { bridgeConfig["ipv4.nat"] = "true" } if bridgeIPv6Nat { bridgeConfig["ipv6.nat"] = "true" } err = c.NetworkCreate(bridgeName, bridgeConfig) if err != nil { return err } props := []string{"nictype=bridged", fmt.Sprintf("parent=%s", bridgeName)} _, err = c.ProfileDeviceAdd("default", "eth0", "nic", props) if err != nil { return err } } fmt.Printf("LXD has been successfully configured.\n") return nil }
// init prepares the LXContainer for this LXD Container // TODO: This gets called on each load of the container, // we might be able to split this is up into c.Start(). func (c *containerLXD) init() error { templateConfBase := "ubuntu" templateConfDir := os.Getenv("LXD_LXC_TEMPLATE_CONFIG") if templateConfDir == "" { templateConfDir = "/usr/share/lxc/config" } cc, err := lxc.NewContainer(c.NameGet(), c.daemon.lxcpath) if err != nil { return err } c.c = cc logfile := c.LogFilePathGet() if err := os.MkdirAll(filepath.Dir(logfile), 0700); err != nil { return err } if err = c.c.SetLogFile(logfile); err != nil { return err } personality, err := shared.ArchitecturePersonality(c.architecture) if err == nil { if err := c.c.SetConfigItem("lxc.arch", personality); err != nil { return err } } err = c.c.SetConfigItem("lxc.include", fmt.Sprintf("%s/%s.common.conf", templateConfDir, templateConfBase)) if err != nil { return err } if err := c.c.SetConfigItem("lxc.rootfs", c.RootfsPathGet()); err != nil { return err } if err := c.c.SetConfigItem("lxc.loglevel", "0"); err != nil { return err } if err := c.c.SetConfigItem("lxc.utsname", c.NameGet()); err != nil { return err } if err := c.c.SetConfigItem("lxc.tty", "0"); err != nil { return err } if err := setupDevLxdMount(c.c); err != nil { return err } for _, p := range c.profiles { if err := c.applyProfile(p); err != nil { return err } } // base per-container config should override profile config, so we apply it second if err := c.applyConfig(c.baseConfig); err != nil { return err } if !c.IsPrivileged() || shared.RunningInUserNS() { err = c.c.SetConfigItem("lxc.include", fmt.Sprintf("%s/%s.userns.conf", templateConfDir, templateConfBase)) if err != nil { return err } } if c.IsNesting() { shared.Debugf("Setting up %s for nesting", c.name) orig := c.c.ConfigItem("lxc.mount.auto") auto := "" if len(orig) == 1 { auto = orig[0] } if !strings.Contains(auto, "cgroup") { auto = fmt.Sprintf("%s %s", auto, "cgroup:mixed") err = c.c.SetConfigItem("lxc.mount.auto", auto) if err != nil { return err } } /* * mount extra /proc and /sys to work around kernel * restrictions on remounting them when covered */ err = c.c.SetConfigItem("lxc.mount.entry", "proc dev/.lxc/proc proc create=dir,optional") if err != nil { return err } err = c.c.SetConfigItem("lxc.mount.entry", "sys dev/.lxc/sys sysfs create=dir,optional") if err != nil { return err } } /* * Until stacked apparmor profiles are possible, we have to run nested * containers unconfined */ if aaEnabled { if aaConfined() { curProfile := aaProfile() shared.Debugf("Running %s in current profile %s (nested container)", c.name, curProfile) curProfile = strings.TrimSuffix(curProfile, " (enforce)") if err := c.c.SetConfigItem("lxc.aa_profile", curProfile); err != nil { return err } } else if err := c.c.SetConfigItem("lxc.aa_profile", AAProfileName(c)); err != nil { return err } } if err := c.c.SetConfigItem("lxc.seccomp", SeccompProfilePath(c)); err != nil { return err } if err := c.setupMacAddresses(); err != nil { return err } // Allow overwrites of devices for k, v := range c.baseDevices { c.devices[k] = v } /* now add the lxc.* entries for the configured devices */ if err := c.applyDevices(); err != nil { return err } if !c.IsPrivileged() { if c.daemon.IdmapSet == nil { return fmt.Errorf("user has no subuids") } c.idmapset = c.daemon.IdmapSet // TODO - per-tenant idmaps } if err := c.mountShared(); err != nil { return err } if err := c.applyIdmapSet(); err != nil { return err } if err := c.applyPostDeviceConfig(); err != nil { return err } return nil }
func run() error { gnuflag.Usage = func() { fmt.Printf("Usage: lxd [command] [options]\n\nOptions:\n") gnuflag.PrintDefaults() fmt.Printf("\nCommands:\n") fmt.Printf(" shutdown\n") fmt.Printf(" Perform a clean shutdown of LXD and all running containers\n") fmt.Printf(" activateifneeded\n") fmt.Printf(" Check if LXD should be started (at boot) and if so, spawn it through socket activation\n") fmt.Printf("\nInternal commands (don't call directly):\n") fmt.Printf(" forkgetfile\n") fmt.Printf(" Grab a file from a running container\n") fmt.Printf(" forkputfile\n") fmt.Printf(" Pushes a file to a running container\n") fmt.Printf(" forkstart\n") fmt.Printf(" Start a container\n") fmt.Printf(" forkmigrate\n") fmt.Printf(" Restore a container after migration\n") } gnuflag.Parse(true) if *help { // The user asked for help via --help, so we shouldn't print to // stderr. gnuflag.SetOut(os.Stdout) gnuflag.Usage() return nil } if *version { fmt.Println(shared.Version) return nil } // Configure logging syslog := "" if *syslogFlag { syslog = "lxd" } err := shared.SetLogger(syslog, *logfile, *verbose, *debug) if err != nil { fmt.Printf("%s", err) return nil } // Process sub-commands if len(os.Args) > 1 { // "forkputfile" and "forkgetfile" are handled specially in copyfile.go switch os.Args[1] { case "forkstart": return startContainer(os.Args[1:]) case "forkmigrate": return MigrateContainer(os.Args[1:]) case "shutdown": return cleanShutdown() case "activateifneeded": return activateIfNeeded() } } if gnuflag.NArg() != 0 { gnuflag.Usage() return fmt.Errorf("Unknown arguments") } if *cpuProfile != "" { f, err := os.Create(*cpuProfile) if err != nil { fmt.Printf("Error opening cpu profile file: %s\n", err) return nil } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } if *memProfile != "" { go memProfiler() } neededPrograms := []string{"setfacl", "rsync", "tar", "xz"} for _, p := range neededPrograms { _, err := exec.LookPath(p) if err != nil { return err } } _, err = exec.LookPath("apparmor_parser") if err == nil && shared.IsDir("/sys/kernel/security/apparmor") { aaEnabled = true } else { shared.Log.Warn("apparmor_parser binary not found or apparmor " + "fs not mounted. AppArmor disabled.") } if aaEnabled && os.Getenv("LXD_SECURITY_APPARMOR") == "false" { aaEnabled = false shared.Log.Warn("per-container apparmor profiles have been manually disabled") } runningInUserns = shared.RunningInUserNS() if aaEnabled && runningInUserns { aaEnabled = false shared.Log.Warn("per-container apparmor profiles disabled because we are in a user namespace") } /* Can we create devices? */ checkCanMknod() if *printGoroutines > 0 { go func() { for { time.Sleep(time.Duration(*printGoroutines) * time.Second) shared.PrintStack() } }() } d, err := startDaemon() if err != nil { if d != nil && d.db != nil { d.db.Close() } return err } var ret error var wg sync.WaitGroup wg.Add(1) go func() { ch := make(chan os.Signal) signal.Notify(ch, syscall.SIGPWR) sig := <-ch shared.Log.Info( fmt.Sprintf("Received '%s signal', shutting down containers.", sig)) containersShutdown(d) ret = d.Stop() wg.Done() }() go func() { ch := make(chan os.Signal) signal.Notify(ch, syscall.SIGINT) signal.Notify(ch, syscall.SIGQUIT) signal.Notify(ch, syscall.SIGTERM) sig := <-ch shared.Log.Info(fmt.Sprintf("Received '%s signal', exiting.\n", sig)) ret = d.Stop() wg.Done() }() wg.Wait() return ret }