Beispiel #1
0
func (c *ApplyCommand) Run(args []string) int {
	var destroyForce, refresh bool
	args = c.Meta.process(args, true)

	cmdName := "apply"
	if c.Destroy {
		cmdName = "destroy"
	}

	cmdFlags := c.Meta.flagSet(cmdName)
	if c.Destroy {
		cmdFlags.BoolVar(&destroyForce, "force", false, "force")
	}
	cmdFlags.BoolVar(&refresh, "refresh", true, "refresh")
	cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path")
	cmdFlags.StringVar(&c.Meta.stateOutPath, "state-out", "", "path")
	cmdFlags.StringVar(&c.Meta.backupPath, "backup", "", "path")
	cmdFlags.Usage = func() { c.Ui.Error(c.Help()) }
	if err := cmdFlags.Parse(args); err != nil {
		return 1
	}

	pwd, err := os.Getwd()
	if err != nil {
		c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err))
		return 1
	}

	var configPath string
	maybeInit := true
	args = cmdFlags.Args()
	if len(args) > 1 {
		c.Ui.Error("The apply command expects at most one argument.")
		cmdFlags.Usage()
		return 1
	} else if len(args) == 1 {
		configPath = args[0]
	} else {
		configPath = pwd
		maybeInit = false
	}

	// Prepare the extra hooks to count resources
	countHook := new(CountHook)
	stateHook := new(StateHook)
	c.Meta.extraHooks = []terraform.Hook{countHook, stateHook}

	if !c.Destroy && maybeInit {
		// Do a detect to determine if we need to do an init + apply.
		if detected, err := module.Detect(configPath, pwd); err != nil {
			c.Ui.Error(fmt.Sprintf(
				"Invalid path: %s", err))
			return 1
		} else if !strings.HasPrefix(detected, "file") {
			// If this isn't a file URL then we're doing an init +
			// apply.
			var init InitCommand
			init.Meta = c.Meta
			if code := init.Run([]string{detected}); code != 0 {
				return code
			}

			// Change the config path to be the cwd
			configPath = pwd
		}
	}

	// Build the context based on the arguments given
	ctx, planned, err := c.Context(contextOpts{
		Destroy:   c.Destroy,
		Path:      configPath,
		StatePath: c.Meta.statePath,
	})
	if err != nil {
		c.Ui.Error(err.Error())
		return 1
	}
	if c.Destroy && planned {
		c.Ui.Error(fmt.Sprintf(
			"Destroy can't be called with a plan file."))
		return 1
	}
	if !destroyForce && c.Destroy {
		v, err := c.UIInput().Input(&terraform.InputOpts{
			Id:    "destroy",
			Query: "Do you really want to destroy?",
			Description: "Terraform will delete all your managed infrastructure.\n" +
				"There is no undo. Only 'yes' will be accepted to confirm.",
		})
		if err != nil {
			c.Ui.Error(fmt.Sprintf("Error asking for confirmation: %s", err))
			return 1
		}
		if v != "yes" {
			c.Ui.Output("Destroy cancelled.")
			return 1
		}
	}
	if !planned {
		if err := ctx.Input(c.InputMode()); err != nil {
			c.Ui.Error(fmt.Sprintf("Error configuring: %s", err))
			return 1
		}
	}
	if !validateContext(ctx, c.Ui) {
		return 1
	}

	// Plan if we haven't already
	if !planned {
		if refresh {
			if _, err := ctx.Refresh(); err != nil {
				c.Ui.Error(fmt.Sprintf("Error refreshing state: %s", err))
				return 1
			}
		}

		if _, err := ctx.Plan(); err != nil {
			c.Ui.Error(fmt.Sprintf(
				"Error creating plan: %s", err))
			return 1
		}
	}

	// Setup the state hook for continous state updates
	{
		state, err := c.State()
		if err != nil {
			c.Ui.Error(fmt.Sprintf(
				"Error reading state: %s", err))
			return 1
		}

		stateHook.State = state
	}

	// Start the apply in a goroutine so that we can be interrupted.
	var state *terraform.State
	var applyErr error
	doneCh := make(chan struct{})
	go func() {
		defer close(doneCh)
		state, applyErr = ctx.Apply()
	}()

	// Wait for the apply to finish or for us to be interrupted so
	// we can handle it properly.
	err = nil
	select {
	case <-c.ShutdownCh:
		c.Ui.Output("Interrupt received. Gracefully shutting down...")

		// Stop execution
		go ctx.Stop()

		// Still get the result, since there is still one
		select {
		case <-c.ShutdownCh:
			c.Ui.Error(
				"Two interrupts received. Exiting immediately. Note that data\n" +
					"loss may have occurred.")
			return 1
		case <-doneCh:
		}
	case <-doneCh:
	}

	// Persist the state
	if state != nil {
		if err := c.Meta.PersistState(state); err != nil {
			c.Ui.Error(fmt.Sprintf("Failed to save state: %s", err))
			return 1
		}
	}

	if applyErr != nil {
		c.Ui.Error(fmt.Sprintf(
			"Error applying plan:\n\n"+
				"%s\n\n"+
				"Terraform does not automatically rollback in the face of errors.\n"+
				"Instead, your Terraform state file has been partially updated with\n"+
				"any resources that successfully completed. Please address the error\n"+
				"above and apply again to incrementally change your infrastructure.",
			multierror.Flatten(applyErr)))
		return 1
	}

	c.Ui.Output(c.Colorize().Color(fmt.Sprintf(
		"[reset][bold][green]\n"+
			"Apply complete! Resources: %d added, %d changed, %d destroyed.",
		countHook.Added,
		countHook.Changed,
		countHook.Removed)))

	if countHook.Added > 0 || countHook.Changed > 0 {
		c.Ui.Output(c.Colorize().Color(fmt.Sprintf(
			"[reset]\n"+
				"The state of your infrastructure has been saved to the path\n"+
				"below. This state is required to modify and destroy your\n"+
				"infrastructure, so keep it safe. To inspect the complete state\n"+
				"use the `terraform show` command.\n\n"+
				"State path: %s",
			c.Meta.StateOutPath())))
	}

	if !c.Destroy {
		if outputs := outputsAsString(state); outputs != "" {
			c.Ui.Output(c.Colorize().Color(outputs))
		}
	}

	return 0
}
Beispiel #2
0
// compileImports takes a File, loads all the imports, and merges them
// into the File.
func compileImports(
	root *File,
	importOpts *compileImportOpts,
	opts *CompileOpts) error {
	// If we have no imports, short-circuit the whole thing
	if len(root.Imports) == 0 {
		return nil
	}

	// Pull these out into variables so they're easier to reference
	storage := importOpts.Storage
	cache := importOpts.Cache
	cacheLock := importOpts.CacheLock

	// A graph is used to track for cycles
	var graphLock sync.Mutex
	graph := new(dag.AcyclicGraph)
	graph.Add("root")

	// Since we run the import in parallel, multiple errors can happen
	// at the same time. We use multierror and a lock to keep track of errors.
	var resultErr error
	var resultErrLock sync.Mutex

	// Forward declarations for some nested functions we use. The docs
	// for these functions are above each.
	var importSingle func(parent string, f *File) bool
	var downloadSingle func(string, *sync.WaitGroup, *sync.Mutex, []*File, int)

	// importSingle is responsible for kicking off the imports and merging
	// them for a single file. This will return true on success, false on
	// failure. On failure, it is expected that any errors are appended to
	// resultErr.
	importSingle = func(parent string, f *File) bool {
		var wg sync.WaitGroup

		// Build the list of files we'll merge later
		var mergeLock sync.Mutex
		merge := make([]*File, len(f.Imports))

		// Go through the imports and kick off the download
		for idx, i := range f.Imports {
			source, err := module.Detect(i.Source, filepath.Dir(f.Path))
			if err != nil {
				resultErrLock.Lock()
				defer resultErrLock.Unlock()
				resultErr = multierror.Append(resultErr, fmt.Errorf(
					"Error loading import source: %s", err))
				return false
			}

			// Add this to the graph and check now if there are cycles
			graphLock.Lock()
			graph.Add(source)
			graph.Connect(dag.BasicEdge(parent, source))
			cycles := graph.Cycles()
			graphLock.Unlock()
			if len(cycles) > 0 {
				for _, cycle := range cycles {
					names := make([]string, len(cycle))
					for i, v := range cycle {
						names[i] = dag.VertexName(v)
					}

					resultErrLock.Lock()
					defer resultErrLock.Unlock()
					resultErr = multierror.Append(resultErr, fmt.Errorf(
						"Cycle found: %s", strings.Join(names, ", ")))
					return false
				}
			}

			wg.Add(1)
			go downloadSingle(source, &wg, &mergeLock, merge, idx)
		}

		// Wait for completion
		wg.Wait()

		// Go through the merge list and look for any nil entries, which
		// means that download failed. In that case, return immediately.
		// We assume any errors were put into resultErr.
		for _, importF := range merge {
			if importF == nil {
				return false
			}
		}

		for _, importF := range merge {
			// We need to copy importF here so that we don't poison
			// the cache by modifying the same pointer.
			importFCopy := *importF
			importF = &importFCopy
			source := importF.ID
			importF.ID = ""
			importF.Path = ""

			// Merge it into our file!
			if err := f.Merge(importF); err != nil {
				resultErrLock.Lock()
				defer resultErrLock.Unlock()
				resultErr = multierror.Append(resultErr, fmt.Errorf(
					"Error merging import %s: %s", source, err))
				return false
			}
		}

		return true
	}

	// downloadSingle is used to download a single import and parse the
	// Appfile. This is a separate function because it is generally run
	// in a goroutine so we can parallelize grabbing the imports.
	downloadSingle = func(source string, wg *sync.WaitGroup, l *sync.Mutex, result []*File, idx int) {
		defer wg.Done()

		// Read from the cache if we have it
		cacheLock.Lock()
		cached, ok := cache[source]
		cacheLock.Unlock()
		if ok {
			log.Printf("[DEBUG] cache hit on import: %s", source)
			l.Lock()
			defer l.Unlock()
			result[idx] = cached
			return
		}

		// Call the callback if we have one
		log.Printf("[DEBUG] loading import: %s", source)
		if opts.Callback != nil {
			opts.Callback(&CompileEventImport{
				Source: source,
			})
		}

		// Download the dependency
		if err := storage.Get(source, source, true); err != nil {
			resultErrLock.Lock()
			defer resultErrLock.Unlock()
			resultErr = multierror.Append(resultErr, fmt.Errorf(
				"Error loading import source: %s", err))
			return
		}
		dir, _, err := storage.Dir(source)
		if err != nil {
			resultErrLock.Lock()
			defer resultErrLock.Unlock()
			resultErr = multierror.Append(resultErr, fmt.Errorf(
				"Error loading import source: %s", err))
			return
		}

		// Parse the Appfile
		importF, err := ParseFile(filepath.Join(dir, "Appfile"))
		if err != nil {
			resultErrLock.Lock()
			defer resultErrLock.Unlock()
			resultErr = multierror.Append(resultErr, fmt.Errorf(
				"Error parsing Appfile in %s: %s", source, err))
			return
		}

		// We use the ID to store the source, but we clear it
		// when we actually merge.
		importF.ID = source

		// Import the imports in this
		if !importSingle(source, importF) {
			return
		}

		// Once we're done, acquire the lock and write it
		l.Lock()
		result[idx] = importF
		l.Unlock()

		// Write this into the cache.
		cacheLock.Lock()
		cache[source] = importF
		cacheLock.Unlock()
	}

	importSingle("root", root)
	return resultErr
}
Beispiel #3
0
func (c *InitCommand) Run(args []string) int {
	var remoteBackend string
	args = c.Meta.process(args, false)
	remoteConfig := make(map[string]string)
	cmdFlags := flag.NewFlagSet("init", flag.ContinueOnError)
	cmdFlags.StringVar(&remoteBackend, "backend", "", "")
	cmdFlags.Var((*FlagKV)(&remoteConfig), "backend-config", "config")
	cmdFlags.Usage = func() { c.Ui.Error(c.Help()) }
	if err := cmdFlags.Parse(args); err != nil {
		return 1
	}

	var path string
	args = cmdFlags.Args()
	if len(args) > 2 {
		c.Ui.Error("The init command expects at most two arguments.\n")
		cmdFlags.Usage()
		return 1
	} else if len(args) < 1 {
		c.Ui.Error("The init command expects at least one arguments.\n")
		cmdFlags.Usage()
		return 1
	}

	if len(args) == 2 {
		path = args[1]
	} else {
		var err error
		path, err = os.Getwd()
		if err != nil {
			c.Ui.Error(fmt.Sprintf("Error getting pwd: %s", err))
		}
	}

	source := args[0]

	// Get our pwd since we need it
	pwd, err := os.Getwd()
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Error reading working directory: %s", err))
		return 1
	}

	// Verify the directory is empty
	if empty, err := config.IsEmptyDir(path); err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Error checking on destination path: %s", err))
		return 1
	} else if !empty {
		c.Ui.Error(
			"The destination path has Terraform configuration files. The\n" +
				"init command can only be used on a directory without existing Terraform\n" +
				"files.")
		return 1
	}

	// Detect
	source, err = module.Detect(source, pwd)
	if err != nil {
		c.Ui.Error(fmt.Sprintf(
			"Error with module source: %s", err))
		return 1
	}

	// Get it!
	if err := module.GetCopy(path, source); err != nil {
		c.Ui.Error(err.Error())
		return 1
	}

	// Handle remote state if configured
	if remoteBackend != "" {
		var remoteConf terraform.RemoteState
		remoteConf.Type = remoteBackend
		remoteConf.Config = remoteConfig

		state, err := c.State()
		if err != nil {
			c.Ui.Error(fmt.Sprintf("Error checking for state: %s", err))
			return 1
		}
		if state != nil {
			s := state.State()
			if !s.Empty() {
				c.Ui.Error(fmt.Sprintf(
					"State file already exists and is not empty! Please remove this\n" +
						"state file before initializing. Note that removing the state file\n" +
						"may result in a loss of information since Terraform uses this\n" +
						"to track your infrastructure."))
				return 1
			}
			if s.IsRemote() {
				c.Ui.Error(fmt.Sprintf(
					"State file already exists with remote state enabled! Please remove this\n" +
						"state file before initializing. Note that removing the state file\n" +
						"may result in a loss of information since Terraform uses this\n" +
						"to track your infrastructure."))
				return 1
			}
		}

		// Initialize a blank state file with remote enabled
		remoteCmd := &RemoteConfigCommand{
			Meta:       c.Meta,
			remoteConf: remoteConf,
		}
		return remoteCmd.initBlankState()
	}
	return 0
}
Beispiel #4
0
func compileDependencies(
	storage module.Storage,
	importOpts *compileImportOpts,
	graph *dag.AcyclicGraph,
	opts *CompileOpts,
	root *CompiledGraphVertex) error {
	// Make a map to keep track of the dep source to vertex mapping
	vertexMap := make(map[string]*CompiledGraphVertex)

	// Store ourselves in the map
	key, err := module.Detect(".", filepath.Dir(root.File.Path))
	if err != nil {
		return err
	}
	vertexMap[key] = root

	// Make a queue for the other vertices we need to still get
	// dependencies for. We arbitrarily make the cap for this slice
	// 30, since that is a ton of dependencies and we don't expect the
	// average case to have more than this.
	queue := make([]*CompiledGraphVertex, 1, 30)
	queue[0] = root

	// While we still have dependencies to get, continue loading them.
	// TODO: parallelize
	for len(queue) > 0 {
		var current *CompiledGraphVertex
		current, queue = queue[len(queue)-1], queue[:len(queue)-1]

		log.Printf("[DEBUG] compiling dependencies for: %s", current.Name())
		for _, dep := range current.File.Application.Dependencies {
			key, err := module.Detect(dep.Source, filepath.Dir(current.File.Path))
			if err != nil {
				return fmt.Errorf(
					"Error loading source: %s", err)
			}

			vertex := vertexMap[key]
			if vertex == nil {
				log.Printf("[DEBUG] loading dependency: %s", key)

				// Call the callback if we have one
				if opts.Callback != nil {
					opts.Callback(&CompileEventDep{
						Source: key,
					})
				}

				// Download the dependency
				if err := storage.Get(key, key, true); err != nil {
					return err
				}
				dir, _, err := storage.Dir(key)
				if err != nil {
					return err
				}

				// Parse a default
				fDef, err := Default(dir, opts.Detect)
				if err != nil {
					return fmt.Errorf(
						"Error detecting defaults in %s: %s", key, err)
				}

				// Parse the Appfile
				f, err := ParseFile(filepath.Join(dir, "Appfile"))
				if err != nil {
					return fmt.Errorf(
						"Error parsing Appfile in %s: %s", key, err)
				}

				// Set the source
				f.Source = key

				// If it doesn't have an otto ID then we can't do anything
				hasID, err := f.hasID()
				if err != nil {
					return fmt.Errorf(
						"Error checking for ID file for Appfile in %s: %s",
						key, err)
				}
				if !hasID {
					return fmt.Errorf(
						"Dependency '%s' doesn't have an Otto ID yet!\n\n"+
							"An Otto ID is generated on the first compilation of the Appfile.\n"+
							"It is a globally unique ID that is used to track the application\n"+
							"across multiple deploys. It is required for the application to be\n"+
							"used as a dependency. To fix this, check out that application and\n"+
							"compile the Appfile with `otto compile` once. Make sure you commit\n"+
							"the .ottoid file into version control, and then try this command\n"+
							"again.",
						key)
				}

				// Realize all the imports for this file
				if err := compileImports(f, importOpts, opts); err != nil {
					return err
				}

				// Merge the files
				log.Printf("DEF: %#v", fDef)
				log.Printf("WHAT: %#v", f)
				if err := fDef.Merge(f); err != nil {
					return fmt.Errorf(
						"Error merging default Appfile for dependency %s: %s",
						key, err)
				}
				f = fDef

				// We merge the root infrastructure choice upwards to
				// all dependencies.
				f.Infrastructure = root.File.Infrastructure
				f.Project.Infrastructure = root.File.Project.Infrastructure

				// Build the vertex for this
				vertex = &CompiledGraphVertex{
					File:      f,
					Dir:       dir,
					NameValue: f.Application.Name,
				}

				// Add the vertex since it is new, store the mapping, and
				// queue it to be loaded later.
				graph.Add(vertex)
				vertexMap[key] = vertex
				queue = append(queue, vertex)
			}

			// Connect the dependencies
			graph.Connect(dag.BasicEdge(current, vertex))
		}
	}

	return nil
}