From 1798bade566af756d6675f5dcb1257a0ace18578 Mon Sep 17 00:00:00 2001 From: Lucas Hahn Date: Fri, 17 Apr 2020 16:18:37 +0200 Subject: [PATCH] added support to reboot vm after creation or clone --- proxmox/virtual_environment_nodes.go | 9 +- proxmox/virtual_environment_nodes_types.go | 5 +- proxmox/virtual_environment_vm.go | 37 +++++++- proxmox/virtual_environment_vm_types.go | 3 + proxmoxtf/resource_virtual_environment_vm.go | 89 ++++++++++++++------ 5 files changed, 112 insertions(+), 31 deletions(-) diff --git a/proxmox/virtual_environment_nodes.go b/proxmox/virtual_environment_nodes.go index dd8468af..b3dd78d6 100644 --- a/proxmox/virtual_environment_nodes.go +++ b/proxmox/virtual_environment_nodes.go @@ -186,7 +186,14 @@ func (c *VirtualEnvironmentClient) WaitForNodeTask(nodeName string, upid string, if int64(timeElapsed.Seconds())%timeDelay == 0 { status, err := c.GetNodeTaskStatus(nodeName, upid) - if err == nil && status.Status != "running" { + if err != nil { + return err + } + + if status.Status != "running" { + if status.ExitCode != "OK" { + return fmt.Errorf("Task \"%s\" on node \"%s\" failed to complete with error: %s", upid, nodeName, status.ExitCode) + } return nil } diff --git a/proxmox/virtual_environment_nodes_types.go b/proxmox/virtual_environment_nodes_types.go index 262398d2..f8b7d08e 100644 --- a/proxmox/virtual_environment_nodes_types.go +++ b/proxmox/virtual_environment_nodes_types.go @@ -36,8 +36,9 @@ type VirtualEnvironmentNodeGetTaskStatusResponseBody struct { // VirtualEnvironmentNodeGetTaskStatusResponseData contains the data from a node get task status response. type VirtualEnvironmentNodeGetTaskStatusResponseData struct { - PID int `json:"pid,omitempty"` - Status string `json:"status,omitempty"` + PID int `json:"pid,omitempty"` + Status string `json:"status,omitempty"` + ExitCode string `json:"exitstatus,omitempty"` } // VirtualEnvironmentNodeListResponseBody contains the body from a node list response. diff --git a/proxmox/virtual_environment_vm.go b/proxmox/virtual_environment_vm.go index c35bf5ff..7f45a1df 100644 --- a/proxmox/virtual_environment_vm.go +++ b/proxmox/virtual_environment_vm.go @@ -24,8 +24,30 @@ var ( ) // CloneVM clones a virtual machine. -func (c *VirtualEnvironmentClient) CloneVM(nodeName string, vmID int, d *VirtualEnvironmentVMCloneRequestBody) error { - return c.DoRequest(hmPOST, fmt.Sprintf("nodes/%s/qemu/%d/clone", url.PathEscape(nodeName), vmID), d, nil) +func (c *VirtualEnvironmentClient) CloneVM(nodeName string, vmID int, retries int, d *VirtualEnvironmentVMCloneRequestBody) error { + resBody := &VirtualEnvironmentVMMoveDiskResponseBody{} + var err error + + for i := 0; i < retries; i++ { + err = c.DoRequest(hmPOST, fmt.Sprintf("nodes/%s/qemu/%d/clone", url.PathEscape(nodeName), vmID), d, resBody) + + if err != nil { + return err + } + + if resBody.Data == nil { + return errors.New("The server did not include a data object in the response") + } + + err = c.WaitForNodeTask(nodeName, *resBody.Data, 1800, 5) + + if err == nil { + return nil + } + time.Sleep(10 * time.Second) + } + + return err } // CreateVM creates a virtual machine. @@ -203,7 +225,16 @@ func (c *VirtualEnvironmentClient) RebootVMAsync(nodeName string, vmID int, d *V // ResizeVMDisk resizes a virtual machine disk. func (c *VirtualEnvironmentClient) ResizeVMDisk(nodeName string, vmID int, d *VirtualEnvironmentVMResizeDiskRequestBody) error { - return c.DoRequest(hmPUT, fmt.Sprintf("nodes/%s/qemu/%d/resize", url.PathEscape(nodeName), vmID), d, nil) + var err error + for i := 0; i < 5; i++ { + err = c.DoRequest(hmPUT, fmt.Sprintf("nodes/%s/qemu/%d/resize", url.PathEscape(nodeName), vmID), d, nil) + if err == nil { + return nil + } + log.Printf("[DEBUG] resize disk failed, retry nr: %d", i) + time.Sleep(5 * time.Second) + } + return err } // ShutdownVM shuts down a virtual machine. diff --git a/proxmox/virtual_environment_vm_types.go b/proxmox/virtual_environment_vm_types.go index ae2be692..aea3f84c 100644 --- a/proxmox/virtual_environment_vm_types.go +++ b/proxmox/virtual_environment_vm_types.go @@ -163,6 +163,9 @@ type CustomStorageDevice struct { Media *string `json:"media,omitempty" url:"media,omitempty"` Size *string `json:"size,omitempty" url:"size,omitempty"` Format *string `json:"format,omitempty" url:"format,omitempty"` + Interface *string + ID *string + FileId *string } // CustomStorageDevices handles QEMU SATA device parameters. diff --git a/proxmoxtf/resource_virtual_environment_vm.go b/proxmoxtf/resource_virtual_environment_vm.go index 378a1feb..bc87a825 100644 --- a/proxmoxtf/resource_virtual_environment_vm.go +++ b/proxmoxtf/resource_virtual_environment_vm.go @@ -7,6 +7,7 @@ package proxmoxtf import ( "errors" "fmt" + "log" "math" "strconv" "strings" @@ -18,6 +19,7 @@ import ( ) const ( + dvResourceVirtualEnvironmentVMRebootAfterCreation = false dvResourceVirtualEnvironmentVMACPI = true dvResourceVirtualEnvironmentVMAgentEnabled = false dvResourceVirtualEnvironmentVMAgentTimeout = "15m" @@ -32,6 +34,7 @@ const ( dvResourceVirtualEnvironmentVMCloneDatastoreID = "" dvResourceVirtualEnvironmentVMCloneNodeName = "" dvResourceVirtualEnvironmentVMCloneFull = true + dvResourceVirtualEnvironmentVMCloneRetries = 0 dvResourceVirtualEnvironmentVMCPUArchitecture = "x86_64" dvResourceVirtualEnvironmentVMCPUCores = 1 dvResourceVirtualEnvironmentVMCPUHotplugged = 0 @@ -83,6 +86,7 @@ const ( maxResourceVirtualEnvironmentVMNetworkDevices = 8 maxResourceVirtualEnvironmentVMSerialDevices = 4 + mkResourceVirtualEnvironmentVMRebootAfterCreation = "reboot" mkResourceVirtualEnvironmentVMACPI = "acpi" mkResourceVirtualEnvironmentVMAgent = "agent" mkResourceVirtualEnvironmentVMAgentEnabled = "enabled" @@ -98,6 +102,7 @@ const ( mkResourceVirtualEnvironmentVMCDROMEnabled = "enabled" mkResourceVirtualEnvironmentVMCDROMFileID = "file_id" mkResourceVirtualEnvironmentVMClone = "clone" + mkResourceVirtualEnvironmentVMCloneRetries = "retries" mkResourceVirtualEnvironmentVMCloneDatastoreID = "datastore_id" mkResourceVirtualEnvironmentVMCloneNodeName = "node_name" mkResourceVirtualEnvironmentVMCloneVMID = "vm_id" @@ -175,6 +180,12 @@ const ( func resourceVirtualEnvironmentVM() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ + mkResourceVirtualEnvironmentVMRebootAfterCreation: { + Type: schema.TypeBool, + Description: "Wether to reboot vm after creation", + Optional: true, + Default: dvResourceVirtualEnvironmentVMRebootAfterCreation, + }, mkResourceVirtualEnvironmentVMACPI: { Type: schema.TypeBool, Description: "Whether to enable ACPI", @@ -310,6 +321,13 @@ func resourceVirtualEnvironmentVM() *schema.Resource { }, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ + mkResourceVirtualEnvironmentVMCloneRetries: { + Type: schema.TypeInt, + Description: "The number of Retries to create a clone", + Optional: true, + ForceNew: true, + Default: dvResourceVirtualEnvironmentVMCloneRetries, + }, mkResourceVirtualEnvironmentVMCloneDatastoreID: { Type: schema.TypeString, Description: "The ID of the target datastore", @@ -987,6 +1005,7 @@ func resourceVirtualEnvironmentVMCreateClone(d *schema.ResourceData, m interface clone := d.Get(mkResourceVirtualEnvironmentVMClone).([]interface{}) cloneBlock := clone[0].(map[string]interface{}) + cloneRetries := cloneBlock[mkResourceVirtualEnvironmentVMCloneRetries].(int) cloneDatastoreID := cloneBlock[mkResourceVirtualEnvironmentVMCloneDatastoreID].(string) cloneNodeName := cloneBlock[mkResourceVirtualEnvironmentVMCloneNodeName].(string) cloneVMID := cloneBlock[mkResourceVirtualEnvironmentVMCloneVMID].(int) @@ -1034,9 +1053,9 @@ func resourceVirtualEnvironmentVMCreateClone(d *schema.ResourceData, m interface if cloneNodeName != "" && cloneNodeName != nodeName { cloneBody.TargetNodeName = &nodeName - err = veClient.CloneVM(cloneNodeName, cloneVMID, cloneBody) + err = veClient.CloneVM(cloneNodeName, cloneVMID, cloneRetries, cloneBody) } else { - err = veClient.CloneVM(nodeName, cloneVMID, cloneBody) + err = veClient.CloneVM(nodeName, cloneVMID, cloneRetries, cloneBody) } if err != nil { @@ -1784,6 +1803,7 @@ func resourceVirtualEnvironmentVMCreateCustomDisks(d *schema.ResourceData, m int func resourceVirtualEnvironmentVMCreateStart(d *schema.ResourceData, m interface{}) error { started := d.Get(mkResourceVirtualEnvironmentVMStarted).(bool) template := d.Get(mkResourceVirtualEnvironmentVMTemplate).(bool) + reboot := d.Get(mkResourceVirtualEnvironmentVMRebootAfterCreation).(bool) if !started || template { return resourceVirtualEnvironmentVMRead(d, m) @@ -1810,6 +1830,18 @@ func resourceVirtualEnvironmentVMCreateStart(d *schema.ResourceData, m interface return err } + if reboot { + rebootTimeout := 300 + + err := veClient.RebootVM(nodeName, vmID, &proxmox.VirtualEnvironmentVMRebootRequestBody{ + Timeout: &rebootTimeout, + }) + + if err != nil { + return err + } + } + return resourceVirtualEnvironmentVMRead(d, m) } @@ -2415,30 +2447,34 @@ func resourceVirtualEnvironmentVMReadCustom(d *schema.ResourceData, m interface{ } // Compare the disks to those stored in the state. - currentDisk := d.Get(mkResourceVirtualEnvironmentVMDisk).([]interface{}) + currentDisks := d.Get(mkResourceVirtualEnvironmentVMDisk).([]interface{}) diskList := []interface{}{} - diskObjects := []*proxmox.CustomStorageDevice{ - vmConfig.SCSIDevice0, - vmConfig.SCSIDevice1, - vmConfig.SCSIDevice2, - vmConfig.SCSIDevice3, - vmConfig.SCSIDevice4, - vmConfig.SCSIDevice5, - vmConfig.SCSIDevice6, - vmConfig.SCSIDevice7, - vmConfig.SCSIDevice8, - vmConfig.SCSIDevice9, - vmConfig.SCSIDevice10, - vmConfig.SCSIDevice11, - vmConfig.SCSIDevice12, - vmConfig.SCSIDevice13, + diskObjects := getDiskInfo(vmConfig) + + currentDiskMap := make(map[string]*proxmox.CustomStorageDevice) + + for _, dd := range currentDisks { + var disk proxmox.CustomStorageDevice + currentDiskEntry := dd.(map[string]interface{}) + + id := currentDiskEntry[mkResourceVirtualEnvironmentVMDiskDatastoreID].(string) + diskInterface := currentDiskEntry[mkResourcevirtualEnvironmentVMDiskInterface].(string) + format := currentDiskEntry[mkResourceVirtualEnvironmentVMDiskFileFormat].(string) + fileId := currentDiskEntry[mkResourceVirtualEnvironmentVMDiskFileID].(string) + + disk.Interface = &diskInterface + disk.ID = &id + disk.Format = &format + disk.FileId = &fileId + + currentDiskMap[diskInterface] = &disk } for di, dd := range diskObjects { disk := map[string]interface{}{} - if dd == nil { + if dd == nil || strings.HasPrefix(di, "ide") { continue } @@ -2446,11 +2482,13 @@ func resourceVirtualEnvironmentVMReadCustom(d *schema.ResourceData, m interface{ disk[mkResourceVirtualEnvironmentVMDiskDatastoreID] = fileIDParts[0] - if len(currentDisk) > di { - currentDiskEntry := currentDisk[di].(map[string]interface{}) + if val, ok := currentDiskMap[di]; ok { + if *val.FileId != "" { + disk[mkResourceVirtualEnvironmentVMDiskFileID] = val.FileId + } - disk[mkResourceVirtualEnvironmentVMDiskFileFormat] = currentDiskEntry[mkResourceVirtualEnvironmentVMDiskFileFormat] - disk[mkResourceVirtualEnvironmentVMDiskFileID] = currentDiskEntry[mkResourceVirtualEnvironmentVMDiskFileID] + disk[mkResourceVirtualEnvironmentVMDiskFileFormat] = val.Format + disk[mkResourcevirtualEnvironmentVMDiskInterface] = val.Interface } diskSize := 0 @@ -2525,11 +2563,12 @@ func resourceVirtualEnvironmentVMReadCustom(d *schema.ResourceData, m interface{ diskList = append(diskList, disk) } + log.Printf("[DEBUG] NUMBER CURRENT DISKS %d NUMBER READ DISKS %d", len(currentDisks), len(diskList)) if len(clone) > 0 { - if len(currentDisk) > 0 { + if len(currentDisks) > 0 || len(diskList) > 0 { d.Set(mkResourceVirtualEnvironmentVMDisk, diskList) } - } else if len(currentDisk) > 0 || len(diskList) > 0 { + } else if len(currentDisks) > 0 || len(diskList) > 0 { d.Set(mkResourceVirtualEnvironmentVMDisk, diskList) }