0
0
mirror of https://github.com/bpg/terraform-provider-proxmox.git synced 2025-07-05 05:24:01 +00:00

fix(vm): fixed startup / shutdown behaviour on HA clusters (#508)

* fix(vm): wait for VMs to actually stop when sending a shutdown command

Due to how a Proxmox cluster reacts to a VM shutdown command when
running in HA mode, the VM might still be running when the shutdown API
calls returns. This commit adds a loop that actively waits for the VM's
status to change to "stopped" (while also accounting for the shutdown
timeout) after the call's return.

* chore(refactoring): extracted VM state change wait loop into a separate function

* fix(vm): wait for VMs to actually start after requesting it from the cluster

This commit forces the plugin to wait for a VM to actually run after
requesting it to be started. This avoids problems with Proxmox's High
Availability mode, where a start request may not be immediately honoured
by the cluster.

* fix: linter errors

* fix: use `vmAPI.WaitForVMState`

---------

Co-authored-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
Emmanuel Benoît 2023-08-21 02:07:37 +02:00 committed by GitHub
parent 73c1294979
commit 148a9e0c9c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -1600,7 +1600,22 @@ func deleteIdeDrives(ctx context.Context, vmAPI *vms.Client, itf1 string, itf2 s
return nil
}
// Shutdown the VM.
// Start the VM, then wait for it to actually start; it may not be started immediately if running in HA mode.
func vmStart(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) diag.Diagnostics {
tflog.Debug(ctx, "Starting VM")
startVMTimeout := d.Get(mkResourceVirtualEnvironmentVMTimeoutStartVM).(int)
e := vmAPI.StartVM(ctx, startVMTimeout)
if e != nil {
return diag.FromErr(e)
}
return diag.FromErr(vmAPI.WaitForVMState(ctx, "running", startVMTimeout, 1))
}
// Shutdown the VM, then wait for it to actually shut down (it may not be shut down immediately if
// running in HA mode).
func vmShutdown(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) diag.Diagnostics {
tflog.Debug(ctx, "Shutting down VM")
@ -1615,7 +1630,7 @@ func vmShutdown(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData)
return diag.FromErr(e)
}
return nil
return diag.FromErr(vmAPI.WaitForVMState(ctx, "stopped", shutdownTimeout, 1))
}
func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
@ -1928,6 +1943,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
tflog.Trace(ctx, fmt.Sprintf("CloudInit IDE interface is '%s'", initializationInterface))
const cdromCloudInitEnabled = true
cdromCloudInitFileID := fmt.Sprintf("%s:cloudinit", initializationDatastoreID)
cdromCloudInitMedia := "cdrom"
ideDevices[initializationInterface] = vms.CustomStorageDevice{
@ -2750,10 +2766,8 @@ func vmCreateStart(ctx context.Context, d *schema.ResourceData, m interface{}) d
vmAPI := api.Node(nodeName).VM(vmID)
// Start the virtual machine and wait for it to reach a running state before continuing.
startVMTimeout := d.Get(mkResourceVirtualEnvironmentVMTimeoutStartVM).(int)
err = vmAPI.StartVM(ctx, startVMTimeout)
if err != nil {
return diag.FromErr(err)
if diags := vmStart(ctx, vmAPI, d); diags != nil {
return diags
}
if reboot {
@ -3499,6 +3513,7 @@ func vmReadCustom(
vmStatus *vms.GetStatusResponseData,
) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
@ -5444,11 +5459,8 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
if (d.HasChange(mkResourceVirtualEnvironmentVMStarted) || stoppedBeforeUpdate) && !bool(template) {
started := d.Get(mkResourceVirtualEnvironmentVMStarted).(bool)
if started {
startVMTimeout := d.Get(mkResourceVirtualEnvironmentVMTimeoutStartVM).(int)
e = vmAPI.StartVM(ctx, startVMTimeout)
if e != nil {
return diag.FromErr(e)
if diags := vmStart(ctx, vmAPI, d); diags != nil {
return diags
}
} else {
if e := vmShutdown(ctx, vmAPI, d); e != nil {
@ -5605,10 +5617,8 @@ func vmUpdateDiskLocationAndSize(
}
if shutdownForDisksRequired && started && !template {
startVMTimeout := d.Get(mkResourceVirtualEnvironmentVMTimeoutStartVM).(int)
err = vmAPI.StartVM(ctx, startVMTimeout)
if err != nil {
return diag.FromErr(err)
if diags := vmStart(ctx, vmAPI, d); diags != nil {
return diags
}
// This concludes an equivalent of a reboot, avoid doing another.