0
0
mirror of https://github.com/bpg/terraform-provider-proxmox.git synced 2025-07-05 05:24:01 +00:00

fix(vm): adding disks causes VM to be re-created (#1336)

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
Pavel Boldyrev 2024-05-29 23:12:05 -04:00 committed by GitHub
parent 2785c40d44
commit d02dc1eb0a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 276 additions and 91 deletions

View File

@ -67,4 +67,6 @@ jobs:
PROXMOX_VE_ACC_NODE_NAME: ${{ matrix.node }} PROXMOX_VE_ACC_NODE_NAME: ${{ matrix.node }}
PROXMOX_VE_ACC_NODE_SSH_ADDRESS: ${{ secrets.PROXMOX_VE_HOST }} PROXMOX_VE_ACC_NODE_SSH_ADDRESS: ${{ secrets.PROXMOX_VE_HOST }}
PROXMOX_VE_ACC_NODE_SSH_PORT: ${{ matrix.port }} PROXMOX_VE_ACC_NODE_SSH_PORT: ${{ matrix.port }}
PROXMOX_VE_ACC_CLOUD_IMAGES_SERVER: ${{ secrets.PROXMOX_VE_ACC_CLOUD_IMAGES_SERVER }}
PROXMOX_VE_ACC_CONTAINER_IMAGES_SERVER: ${{ secrets.PROXMOX_VE_ACC_CONTAINER_IMAGES_SERVER }}
run: make testacc run: make testacc

View File

@ -57,7 +57,7 @@ resource "proxmox_virtual_environment_download_file" "ubuntu_container_template"
content_type = "vztmpl" content_type = "vztmpl"
datastore_id = "local" datastore_id = "local"
node_name = "{{.NodeName}}" node_name = "{{.NodeName}}"
url = "http://download.proxmox.com/images/system/ubuntu-23.04-standard_23.04-1_amd64.tar.zst" url = "{{.ContainerImagesServer}}/images/system/ubuntu-23.04-standard_23.04-1_amd64.tar.zst"
overwrite_unmanaged = true overwrite_unmanaged = true
} }
resource "proxmox_virtual_environment_container" "test_container" { resource "proxmox_virtual_environment_container" "test_container" {

View File

@ -7,9 +7,11 @@
package test package test
import ( import (
"regexp"
"testing" "testing"
"github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/helper/resource"
"github.com/hashicorp/terraform-plugin-testing/plancheck"
) )
func TestAccResourceVM(t *testing.T) { func TestAccResourceVM(t *testing.T) {
@ -295,7 +297,7 @@ func TestAccResourceVMInitialization(t *testing.T) {
content_type = "iso" content_type = "iso"
datastore_id = "local" datastore_id = "local"
node_name = "{{.NodeName}}" node_name = "{{.NodeName}}"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img" url = "{{.CloudImagesServer}}/jammy/current/jammy-server-cloudimg-amd64.img"
overwrite_unmanaged = true overwrite_unmanaged = true
}`), }`),
}}}, }}},
@ -390,7 +392,7 @@ func TestAccResourceVMNetwork(t *testing.T) {
content_type = "iso" content_type = "iso"
datastore_id = "local" datastore_id = "local"
node_name = "{{.NodeName}}" node_name = "{{.NodeName}}"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img" url = "{{.CloudImagesServer}}/jammy/current/jammy-server-cloudimg-amd64.img"
overwrite_unmanaged = true overwrite_unmanaged = true
}`), }`),
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
@ -545,7 +547,7 @@ func TestAccResourceVMDisks(t *testing.T) {
content_type = "iso" content_type = "iso"
datastore_id = "local" datastore_id = "local"
node_name = "{{.NodeName}}" node_name = "{{.NodeName}}"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img" url = "{{.CloudImagesServer}}/jammy/current/jammy-server-cloudimg-amd64.img"
overwrite_unmanaged = true overwrite_unmanaged = true
} }
resource "proxmox_virtual_environment_vm" "test_disk2" { resource "proxmox_virtual_environment_vm" "test_disk2" {
@ -643,6 +645,109 @@ func TestAccResourceVMDisks(t *testing.T) {
RefreshState: true, RefreshState: true,
}, },
}}, }},
{"adding disks", []resource.TestStep{
{
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_vm" "test_disk" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 8
}
}`),
Check: ResourceAttributes("proxmox_virtual_environment_vm.test_disk", map[string]string{
"disk.0.interface": "scsi0",
"disk.0.path_in_datastore": `vm-\d+-disk-0`,
}),
},
{
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_vm" "test_disk" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 8
}
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi1"
size = 8
}
}`),
ConfigPlanChecks: resource.ConfigPlanChecks{
PreApply: []plancheck.PlanCheck{
plancheck.ExpectResourceAction("proxmox_virtual_environment_vm.test_disk", plancheck.ResourceActionUpdate),
},
},
Check: ResourceAttributes("proxmox_virtual_environment_vm.test_disk", map[string]string{
"disk.0.interface": "scsi0",
"disk.0.path_in_datastore": `vm-\d+-disk-0`,
"disk.1.interface": "scsi1",
"disk.1.path_in_datastore": `vm-\d+-disk-1`,
}),
},
{
RefreshState: true,
},
}},
{"removing disks", []resource.TestStep{
{
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_vm" "test_disk" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 8
}
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi1"
size = 8
}
}`),
Check: ResourceAttributes("proxmox_virtual_environment_vm.test_disk", map[string]string{
"disk.0.interface": "scsi0",
"disk.0.path_in_datastore": `vm-\d+-disk-0`,
"disk.1.interface": "scsi1",
"disk.1.path_in_datastore": `vm-\d+-disk-1`,
}),
},
{
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_vm" "test_disk" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 8
}
}`),
ExpectError: regexp.MustCompile(`deletion of disks not supported`),
},
}},
{"cdrom", []resource.TestStep{ {"cdrom", []resource.TestStep{
{ {
@ -833,6 +938,57 @@ func TestAccResourceVMDisks(t *testing.T) {
RefreshState: true, RefreshState: true,
}, },
}}, }},
{"clone with adding disk", []resource.TestStep{
{
SkipFunc: func() (bool, error) {
// this test is failing because of "Attribute 'disk.1.size' expected to be set"
return true, nil
},
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_vm" "test_disk3_template" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk3-template"
template = "true"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "virtio0"
size = 8
}
}
resource "proxmox_virtual_environment_vm" "test_disk3" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk3"
clone {
vm_id = proxmox_virtual_environment_vm.test_disk3_template.id
}
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 10
}
}`),
Check: resource.ComposeTestCheckFunc(
ResourceAttributes("proxmox_virtual_environment_vm.test_disk3", map[string]string{
"disk.1.datastore_id": "local-lvm",
"disk.1.interface": "virtio0",
"disk.1.size": "8",
"disk.0.datastore_id": "local-lvm",
"disk.0.interface": "scsi0",
"disk.0.size": "10",
}),
),
},
{
RefreshState: true,
},
}},
} }
for _, tt := range tests { for _, tt := range tests {

View File

@ -78,12 +78,24 @@ provider "proxmox" {
const datastoreID = "local" const datastoreID = "local"
cloudImagesServer := utils.GetAnyStringEnv("PROXMOX_VE_ACC_CLOUD_IMAGES_SERVER")
if cloudImagesServer == "" {
cloudImagesServer = "https://cloud-images.ubuntu.com"
}
containerImagesServer := utils.GetAnyStringEnv("PROXMOX_VE_ACC_CONTAINER_IMAGES_SERVER")
if containerImagesServer == "" {
containerImagesServer = "http://download.proxmox.com"
}
return &Environment{ return &Environment{
t: t, t: t,
templateVars: map[string]any{ templateVars: map[string]any{
"ProviderConfig": pc, "ProviderConfig": pc,
"NodeName": nodeName, "NodeName": nodeName,
"DatastoreID": datastoreID, "DatastoreID": datastoreID,
"CloudImagesServer": cloudImagesServer,
"ContainerImagesServer": containerImagesServer,
}, },
providerConfig: pc, providerConfig: pc,
NodeName: nodeName, NodeName: nodeName,

View File

@ -375,75 +375,68 @@ func CreateCustomDisks(
vmID int, vmID int,
storageDevices map[string]vms.CustomStorageDevices, storageDevices map[string]vms.CustomStorageDevices,
) diag.Diagnostics { ) diag.Diagnostics {
// flatten the map of disk devices
var disks []vms.CustomStorageDevice
for _, diskDevice := range storageDevices { for _, diskDevice := range storageDevices {
for _, disk := range diskDevice { for _, disk := range diskDevice {
if disk != nil { if disk != nil && disk.FileID != nil && *disk.FileID != "" {
disks = append(disks, *disk) // only custom disks with defined file ID
err := createCustomDisk(ctx, client, nodeName, vmID, *disk)
if err != nil {
return diag.FromErr(err)
}
} }
} }
} }
commands := make([]string, 0, len(disks)) return nil
resizes := make([]*vms.ResizeDiskRequestBody, 0, len(disks)) }
for _, d := range disks { func createCustomDisk(
if d.FileID == nil || *d.FileID == "" { ctx context.Context,
continue client proxmox.Client,
} nodeName string,
vmID int,
fileFormat := dvDiskFileFormat d vms.CustomStorageDevice,
if d.Format != nil && *d.Format != "" { ) error {
fileFormat = *d.Format fileFormat := dvDiskFileFormat
} if d.Format != nil && *d.Format != "" {
fileFormat = *d.Format
//nolint:lll
commands = append(
commands,
`set -e`,
ssh.TrySudo,
fmt.Sprintf(`file_id="%s"`, *d.FileID),
fmt.Sprintf(`file_format="%s"`, fileFormat),
fmt.Sprintf(`datastore_id_target="%s"`, *d.DatastoreID),
fmt.Sprintf(`vm_id="%d"`, vmID),
fmt.Sprintf(`disk_options="%s"`, d.EncodeOptions()),
fmt.Sprintf(`disk_interface="%s"`, *d.Interface),
`source_image=$(try_sudo "pvesm path $file_id")`,
`imported_disk="$(try_sudo "qm importdisk $vm_id $source_image $datastore_id_target -format $file_format" | grep "unused0" | cut -d ":" -f 3 | cut -d "'" -f 1)"`,
`disk_id="${datastore_id_target}:$imported_disk,${disk_options}"`,
`try_sudo "qm set $vm_id -${disk_interface} $disk_id"`,
)
resizes = append(resizes, &vms.ResizeDiskRequestBody{
Disk: *d.Interface,
Size: *d.Size,
})
} }
// Execute the commands on the node and wait for the result. //nolint:lll
// This is a highly experimental approach to disk imports and is not recommended by Proxmox. commands := []string{
if len(commands) > 0 { `set -e`,
out, err := client.SSH().ExecuteNodeCommands(ctx, nodeName, commands) ssh.TrySudo,
if err != nil { fmt.Sprintf(`file_id="%s"`, *d.FileID),
if matches, e := regexp.Match(`pvesm: .* not found`, out); e == nil && matches { fmt.Sprintf(`file_format="%s"`, fileFormat),
return diag.FromErr(ssh.NewErrUserHasNoPermission(client.SSH().Username())) fmt.Sprintf(`datastore_id_target="%s"`, *d.DatastoreID),
} fmt.Sprintf(`vm_id="%d"`, vmID),
fmt.Sprintf(`disk_options="%s"`, d.EncodeOptions()),
fmt.Sprintf(`disk_interface="%s"`, *d.Interface),
`source_image=$(try_sudo "pvesm path $file_id")`,
`imported_disk="$(try_sudo "qm importdisk $vm_id $source_image $datastore_id_target -format $file_format" | grep "unused0" | cut -d ":" -f 3 | cut -d "'" -f 1)"`,
`disk_id="${datastore_id_target}:$imported_disk,${disk_options}"`,
`try_sudo "qm set $vm_id -${disk_interface} $disk_id"`,
}
return diag.FromErr(err) out, err := client.SSH().ExecuteNodeCommands(ctx, nodeName, commands)
if err != nil {
if matches, e := regexp.Match(`pvesm: .* not found`, out); e == nil && matches {
err = ssh.NewErrUserHasNoPermission(client.SSH().Username())
} }
tflog.Debug(ctx, "vmCreateCustomDisks: commands", map[string]interface{}{ return fmt.Errorf("creating custom disk: %w", err)
"output": string(out), }
})
for _, resize := range resizes { tflog.Debug(ctx, "vmCreateCustomDisks: commands", map[string]interface{}{
err = client.Node(nodeName).VM(vmID).ResizeVMDisk(ctx, resize) "output": string(out),
if err != nil { })
return diag.FromErr(err)
} err = client.Node(nodeName).VM(vmID).ResizeVMDisk(ctx, &vms.ResizeDiskRequestBody{
} Disk: *d.Interface,
Size: *d.Size,
})
if err != nil {
return fmt.Errorf("resizing disk: %w", err)
} }
return nil return nil
@ -455,7 +448,7 @@ func Read(
d *schema.ResourceData, d *schema.ResourceData,
diskObjects vms.CustomStorageDevices, diskObjects vms.CustomStorageDevices,
vmID int, vmID int,
api proxmox.Client, client proxmox.Client,
nodeName string, nodeName string,
isClone bool, isClone bool,
) diag.Diagnostics { ) diag.Diagnostics {
@ -492,7 +485,7 @@ func Read(
if datastoreID != "" { if datastoreID != "" {
// disk format may not be returned by config API if it is default for the storage, and that may be different // disk format may not be returned by config API if it is default for the storage, and that may be different
// from the default qcow2, so we need to read it from the storage API to make sure we have the correct value // from the default qcow2, so we need to read it from the storage API to make sure we have the correct value
volume, e := api.Node(nodeName).Storage(datastoreID).GetDatastoreFile(ctx, dd.FileVolume) volume, e := client.Node(nodeName).Storage(datastoreID).GetDatastoreFile(ctx, dd.FileVolume)
if e != nil { if e != nil {
diags = append(diags, diag.FromErr(e)...) diags = append(diags, diag.FromErr(e)...)
continue continue
@ -638,9 +631,13 @@ func Read(
// Update updates the disk configuration of a VM. // Update updates the disk configuration of a VM.
func Update( func Update(
ctx context.Context,
client proxmox.Client,
nodeName string,
vmID int,
d *schema.ResourceData, d *schema.ResourceData,
planDisks map[string]vms.CustomStorageDevices, planDisks map[string]vms.CustomStorageDevices,
allDiskInfo vms.CustomStorageDevices, currentDisks vms.CustomStorageDevices,
updateBody *vms.UpdateRequestBody, updateBody *vms.UpdateRequestBody,
) (bool, error) { ) (bool, error) {
rebootRequired := false rebootRequired := false
@ -651,32 +648,52 @@ func Update(
continue continue
} }
for key, value := range diskMap { for key, disk := range diskMap {
if allDiskInfo[key] == nil { var tmp *vms.CustomStorageDevice
switch {
case currentDisks[key] == nil && disk != nil:
if disk.FileID != nil && *disk.FileID != "" {
// only disks with defined file ID are custom image disks that need to be created via import.
err := createCustomDisk(ctx, client, nodeName, vmID, *disk)
if err != nil {
return false, fmt.Errorf("creating custom disk: %w", err)
}
} else {
// otherwise this is a blank disk that can be added directly via update API
tmp = disk
}
case currentDisks[key] != nil:
// update existing disk
tmp = currentDisks[key]
default:
// something went wrong
return false, fmt.Errorf("missing %s device %s", prefix, key) return false, fmt.Errorf("missing %s device %s", prefix, key)
} }
tmp := allDiskInfo[key] if tmp == nil || disk == nil {
continue
if tmp.AIO != value.AIO {
rebootRequired = true
tmp.AIO = value.AIO
} }
tmp.Backup = value.Backup if tmp.AIO != disk.AIO {
tmp.BurstableReadSpeedMbps = value.BurstableReadSpeedMbps rebootRequired = true
tmp.BurstableWriteSpeedMbps = value.BurstableWriteSpeedMbps tmp.AIO = disk.AIO
tmp.Cache = value.Cache }
tmp.Discard = value.Discard
tmp.IOThread = value.IOThread tmp.Backup = disk.Backup
tmp.IopsRead = value.IopsRead tmp.BurstableReadSpeedMbps = disk.BurstableReadSpeedMbps
tmp.IopsWrite = value.IopsWrite tmp.BurstableWriteSpeedMbps = disk.BurstableWriteSpeedMbps
tmp.MaxIopsRead = value.MaxIopsRead tmp.Cache = disk.Cache
tmp.MaxIopsWrite = value.MaxIopsWrite tmp.Discard = disk.Discard
tmp.MaxReadSpeedMbps = value.MaxReadSpeedMbps tmp.IOThread = disk.IOThread
tmp.MaxWriteSpeedMbps = value.MaxWriteSpeedMbps tmp.IopsRead = disk.IopsRead
tmp.Replicate = value.Replicate tmp.IopsWrite = disk.IopsWrite
tmp.SSD = value.SSD tmp.MaxIopsRead = disk.MaxIopsRead
tmp.MaxIopsWrite = disk.MaxIopsWrite
tmp.MaxReadSpeedMbps = disk.MaxReadSpeedMbps
tmp.MaxWriteSpeedMbps = disk.MaxWriteSpeedMbps
tmp.Replicate = disk.Replicate
tmp.SSD = disk.SSD
switch prefix { switch prefix {
case "virtio": case "virtio":

View File

@ -50,7 +50,6 @@ func Schema() map[string]*schema.Schema {
Type: schema.TypeList, Type: schema.TypeList,
Description: "The disk devices", Description: "The disk devices",
Optional: true, Optional: true,
ForceNew: true,
DefaultFunc: func() (interface{}, error) { DefaultFunc: func() (interface{}, error) {
return []interface{}{ return []interface{}{
map[string]interface{}{ map[string]interface{}{
@ -97,7 +96,6 @@ func Schema() map[string]*schema.Schema {
Type: schema.TypeString, Type: schema.TypeString,
Description: "The file format", Description: "The file format",
Optional: true, Optional: true,
ForceNew: true,
Computed: true, Computed: true,
ValidateDiagFunc: validators.FileFormat(), ValidateDiagFunc: validators.FileFormat(),
}, },

View File

@ -5057,7 +5057,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
return diag.FromErr(err) return diag.FromErr(err)
} }
rr, err := disk.Update(d, planDisks, allDiskInfo, updateBody) rr, err := disk.Update(ctx, client, nodeName, vmID, d, planDisks, allDiskInfo, updateBody)
if err != nil { if err != nil {
return diag.FromErr(err) return diag.FromErr(err)
} }
@ -5473,7 +5473,7 @@ func vmUpdateDiskLocationAndSize(
for oldKey, oldDisk := range diskMap { for oldKey, oldDisk := range diskMap {
if _, present := diskNewEntries[prefix][oldKey]; !present { if _, present := diskNewEntries[prefix][oldKey]; !present {
return diag.Errorf( return diag.Errorf(
"deletion of disks not supported. Please delete disk by hand. Old Interface was %s", "deletion of disks not supported. Please delete disk by hand. Old interface was %q",
*oldDisk.Interface, *oldDisk.Interface,
) )
} }