func (f Filter) FilterBasedOnSize(c config.Cpi, node rackhdapi.Node) (bool, error) {
	size, ok := f.data.(int)
	if !ok {
		return false, fmt.Errorf("error converting disk size: disk size must be convertible to int")
	}

	catalog, err := rackhdapi.GetNodeCatalog(c, node.ID)
	if err != nil {
		return false, fmt.Errorf("error getting catalog of VM: %s", node.ID)
	}

	persistentDiskSize := catalog.Data.BlockDevices[rackhdapi.PersistentDiskLocation].Size
	if persistentDiskSize == "" {
		return false, fmt.Errorf("error creating disk for node %s: no disk found at %s", node.ID, rackhdapi.PersistentDiskLocation)
	}
	availableSpaceInKB, err := strconv.Atoi(persistentDiskSize)
	if err != nil {
		return false, fmt.Errorf("error creating disk for node %s: %v", node.ID, err)
	}

	if availableSpaceInKB < size*1024 {
		return false, fmt.Errorf("error creating disk with size %vMB for node %s: insufficient available disk space", size, node.ID)
	}

	return true, nil
}
func CreateDisk(c config.Cpi, extInput bosh.MethodArguments) (string, error) {
	diskSizeInMB, vmCID, err := parseCreateDiskInput(extInput)
	if err != nil {
		return "", err
	}

	var node rackhdapi.Node
	if vmCID != "" {
		node, err = rackhdapi.GetNodeByVMCID(c, vmCID)
		if err != nil {
			return "", err
		}

		if node.PersistentDisk.DiskCID != "" {
			return "", fmt.Errorf("error creating disk: VM %s already has a persistent disk", vmCID)
		}
	} else {
		nodeID, err := SelectNodeFromRackHD(c, "")
		if err != nil {
			return "", err
		}
		node.ID = nodeID
	}

	catalog, err := rackhdapi.GetNodeCatalog(c, node.ID)
	if err != nil {
		return "", fmt.Errorf("error getting catalog of VM: %s", vmCID)
	}

	availableSpaceInKB, err := strconv.Atoi(catalog.Data.BlockDevices[rackhdapi.PersistentDiskLocation].Size)
	if err != nil {
		return "", fmt.Errorf("error creating disk for VM %s: disk not found", vmCID)
	}

	if availableSpaceInKB < diskSizeInMB*1024 {
		return "", fmt.Errorf("error creating disk with size %vMB for VM %s: insufficient available disk space", diskSizeInMB, vmCID)
	}

	container := rackhdapi.PersistentDiskSettingsContainer{
		PersistentDisk: rackhdapi.PersistentDiskSettings{
			DiskCID:    node.ID,
			Location:   fmt.Sprintf("/dev/%s", rackhdapi.PersistentDiskLocation),
			IsAttached: false,
		},
	}

	bodyBytes, err := json.Marshal(container)
	if err != nil {
		return "", fmt.Errorf("error marshalling persistent disk information for VM %s", vmCID)
	}

	err = rackhdapi.PatchNode(c, node.ID, bodyBytes)
	if err != nil {
		return "", err
	}

	return node.ID, nil
}
func CreateVM(c config.Cpi, extInput bosh.MethodArguments) (string, error) {
	agentID, stemcellCID, publicKey, boshNetworks, nodeID, err := parseCreateVMInput(extInput)
	if err != nil {
		return "", err
	}

	nodeID, err = TryReservation(c, nodeID, SelectNodeFromRackHD, ReserveNodeFromRackHD)
	if err != nil {
		return "", err
	}

	var netSpec bosh.Network
	var netName string
	for k, v := range boshNetworks {
		netName = k
		netSpec = v
	}

	nodeCatalog, err := rackhdapi.GetNodeCatalog(c, nodeID)
	if err != nil {
		return "", err
	}

	if netSpec.NetworkType == bosh.ManualNetworkType {
		netSpec, err = attachMAC(nodeCatalog.Data.NetworkData.Networks, netSpec)
		if err != nil {
			return "", err
		}
	}

	node, err := rackhdapi.GetNode(c, nodeID)
	if err != nil {
		return "", err
	}

	var diskCID string
	if node.PersistentDisk.DiskCID == "" {
		diskCID = fmt.Sprintf("%s-%s", nodeID, c.RequestID)

		container := rackhdapi.PersistentDiskSettingsContainer{
			PersistentDisk: rackhdapi.PersistentDiskSettings{
				PregeneratedDiskCID: diskCID,
			},
		}

		bodyBytes, err := json.Marshal(container)
		if err != nil {
			return "", fmt.Errorf("error marshalling persistent disk information for agent %s", agentID)
		}

		err = rackhdapi.PatchNode(c, node.ID, bodyBytes)
		if err != nil {
			return "", err
		}
	} else {
		diskCID = node.PersistentDisk.DiskCID
	}

	persistentMetadata := map[string]interface{}{}
	if _, sdbFound := nodeCatalog.Data.BlockDevices["sdb"]; sdbFound {
		persistentMetadata = map[string]interface{}{
			diskCID: map[string]string{
				"path": "/dev/sdb",
			},
		}
	}

	env := bosh.AgentEnv{
		AgentID:   agentID,
		Blobstore: c.Agent.Blobstore,
		Disks: map[string]interface{}{
			"system":     "/dev/sda",
			"persistent": persistentMetadata,
		},
		Mbus:     c.Agent.Mbus,
		Networks: map[string]bosh.Network{netName: netSpec},
		NTP:      c.Agent.Ntp,
		VM: map[string]string{
			"id":   nodeID,
			"name": nodeID,
		},
		PublicKey: publicKey,
	}

	envBytes, err := json.Marshal(env)
	if err != nil {
		return "", fmt.Errorf("error marshalling agent env %s", err)
	}
	envReader := bytes.NewReader(envBytes)
	vmCID, err := rackhdapi.UploadFile(c, nodeID, envReader, int64(len(envBytes)))
	if err != nil {
		return "", err
	}
	defer rackhdapi.DeleteFile(c, nodeID)

	workflowName, err := workflows.PublishProvisionNodeWorkflow(c)
	if err != nil {
		return "", fmt.Errorf("error publishing provision workflow: %s", err)
	}

	wipeDisk := (nodeID == "")

	err = workflows.RunProvisionNodeWorkflow(c, nodeID, workflowName, vmCID, stemcellCID, wipeDisk)
	if err != nil {
		return "", fmt.Errorf("error running provision workflow: %s", err)
	}

	return vmCID, nil
}
	})

	Describe("Getting catalog", func() {
		It("returns a catalog", func() {
			expectedNodeCatalog := helpers.LoadNodeCatalog("../spec_assets/dummy_node_catalog_response.json")
			expectedNodeCatalogData, err := json.Marshal(expectedNodeCatalog)
			testNodeID := "55e79eb14e66816f6152fffb"
			Expect(err).ToNot(HaveOccurred())
			server.AppendHandlers(
				ghttp.CombineHandlers(
					ghttp.VerifyRequest("GET", fmt.Sprintf("/api/common/nodes/%s/catalogs/ohai", testNodeID)),
					ghttp.RespondWith(http.StatusOK, expectedNodeCatalogData),
				),
			)

			catalog, err := rackhdapi.GetNodeCatalog(cpiConfig, testNodeID)

			Expect(err).ToNot(HaveOccurred())
			Expect(server.ReceivedRequests()).To(HaveLen(1))
			Expect(catalog).To(Equal(expectedNodeCatalog))
		})
	})

	Describe("blocking nodes", func() {
		It("sends a request to block a node", func() {
			nodes := helpers.LoadNodes("../spec_assets/dummy_two_node_response.json")

			server.AppendHandlers(
				ghttp.CombineHandlers(
					ghttp.VerifyRequest("PATCH", fmt.Sprintf("/api/common/nodes/%s", nodes[0].ID)),
					ghttp.VerifyJSON(fmt.Sprintf(`{"status": "%s", "status_reason": "%s"}`, "blocked", "Node has missing disks")),