0
0
mirror of https://github.com/bpg/terraform-provider-proxmox.git synced 2025-07-05 05:24:01 +00:00

fix(vm): adding disks causes VM to be re-created (#1336)

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
Pavel Boldyrev 2024-05-29 23:12:05 -04:00 committed by GitHub
parent 2785c40d44
commit d02dc1eb0a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 276 additions and 91 deletions

View File

@ -67,4 +67,6 @@ jobs:
PROXMOX_VE_ACC_NODE_NAME: ${{ matrix.node }}
PROXMOX_VE_ACC_NODE_SSH_ADDRESS: ${{ secrets.PROXMOX_VE_HOST }}
PROXMOX_VE_ACC_NODE_SSH_PORT: ${{ matrix.port }}
PROXMOX_VE_ACC_CLOUD_IMAGES_SERVER: ${{ secrets.PROXMOX_VE_ACC_CLOUD_IMAGES_SERVER }}
PROXMOX_VE_ACC_CONTAINER_IMAGES_SERVER: ${{ secrets.PROXMOX_VE_ACC_CONTAINER_IMAGES_SERVER }}
run: make testacc

View File

@ -57,7 +57,7 @@ resource "proxmox_virtual_environment_download_file" "ubuntu_container_template"
content_type = "vztmpl"
datastore_id = "local"
node_name = "{{.NodeName}}"
url = "http://download.proxmox.com/images/system/ubuntu-23.04-standard_23.04-1_amd64.tar.zst"
url = "{{.ContainerImagesServer}}/images/system/ubuntu-23.04-standard_23.04-1_amd64.tar.zst"
overwrite_unmanaged = true
}
resource "proxmox_virtual_environment_container" "test_container" {

View File

@ -7,9 +7,11 @@
package test
import (
"regexp"
"testing"
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
"github.com/hashicorp/terraform-plugin-testing/plancheck"
)
func TestAccResourceVM(t *testing.T) {
@ -295,7 +297,7 @@ func TestAccResourceVMInitialization(t *testing.T) {
content_type = "iso"
datastore_id = "local"
node_name = "{{.NodeName}}"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
url = "{{.CloudImagesServer}}/jammy/current/jammy-server-cloudimg-amd64.img"
overwrite_unmanaged = true
}`),
}}},
@ -390,7 +392,7 @@ func TestAccResourceVMNetwork(t *testing.T) {
content_type = "iso"
datastore_id = "local"
node_name = "{{.NodeName}}"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
url = "{{.CloudImagesServer}}/jammy/current/jammy-server-cloudimg-amd64.img"
overwrite_unmanaged = true
}`),
Check: resource.ComposeTestCheckFunc(
@ -545,7 +547,7 @@ func TestAccResourceVMDisks(t *testing.T) {
content_type = "iso"
datastore_id = "local"
node_name = "{{.NodeName}}"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
url = "{{.CloudImagesServer}}/jammy/current/jammy-server-cloudimg-amd64.img"
overwrite_unmanaged = true
}
resource "proxmox_virtual_environment_vm" "test_disk2" {
@ -643,6 +645,109 @@ func TestAccResourceVMDisks(t *testing.T) {
RefreshState: true,
},
}},
{"adding disks", []resource.TestStep{
{
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_vm" "test_disk" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 8
}
}`),
Check: ResourceAttributes("proxmox_virtual_environment_vm.test_disk", map[string]string{
"disk.0.interface": "scsi0",
"disk.0.path_in_datastore": `vm-\d+-disk-0`,
}),
},
{
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_vm" "test_disk" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 8
}
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi1"
size = 8
}
}`),
ConfigPlanChecks: resource.ConfigPlanChecks{
PreApply: []plancheck.PlanCheck{
plancheck.ExpectResourceAction("proxmox_virtual_environment_vm.test_disk", plancheck.ResourceActionUpdate),
},
},
Check: ResourceAttributes("proxmox_virtual_environment_vm.test_disk", map[string]string{
"disk.0.interface": "scsi0",
"disk.0.path_in_datastore": `vm-\d+-disk-0`,
"disk.1.interface": "scsi1",
"disk.1.path_in_datastore": `vm-\d+-disk-1`,
}),
},
{
RefreshState: true,
},
}},
{"removing disks", []resource.TestStep{
{
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_vm" "test_disk" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 8
}
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi1"
size = 8
}
}`),
Check: ResourceAttributes("proxmox_virtual_environment_vm.test_disk", map[string]string{
"disk.0.interface": "scsi0",
"disk.0.path_in_datastore": `vm-\d+-disk-0`,
"disk.1.interface": "scsi1",
"disk.1.path_in_datastore": `vm-\d+-disk-1`,
}),
},
{
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_vm" "test_disk" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 8
}
}`),
ExpectError: regexp.MustCompile(`deletion of disks not supported`),
},
}},
{"cdrom", []resource.TestStep{
{
@ -833,6 +938,57 @@ func TestAccResourceVMDisks(t *testing.T) {
RefreshState: true,
},
}},
{"clone with adding disk", []resource.TestStep{
{
SkipFunc: func() (bool, error) {
// this test is failing because of "Attribute 'disk.1.size' expected to be set"
return true, nil
},
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_vm" "test_disk3_template" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk3-template"
template = "true"
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "virtio0"
size = 8
}
}
resource "proxmox_virtual_environment_vm" "test_disk3" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk3"
clone {
vm_id = proxmox_virtual_environment_vm.test_disk3_template.id
}
disk {
file_format = "raw"
datastore_id = "local-lvm"
interface = "scsi0"
size = 10
}
}`),
Check: resource.ComposeTestCheckFunc(
ResourceAttributes("proxmox_virtual_environment_vm.test_disk3", map[string]string{
"disk.1.datastore_id": "local-lvm",
"disk.1.interface": "virtio0",
"disk.1.size": "8",
"disk.0.datastore_id": "local-lvm",
"disk.0.interface": "scsi0",
"disk.0.size": "10",
}),
),
},
{
RefreshState: true,
},
}},
}
for _, tt := range tests {

View File

@ -78,12 +78,24 @@ provider "proxmox" {
const datastoreID = "local"
cloudImagesServer := utils.GetAnyStringEnv("PROXMOX_VE_ACC_CLOUD_IMAGES_SERVER")
if cloudImagesServer == "" {
cloudImagesServer = "https://cloud-images.ubuntu.com"
}
containerImagesServer := utils.GetAnyStringEnv("PROXMOX_VE_ACC_CONTAINER_IMAGES_SERVER")
if containerImagesServer == "" {
containerImagesServer = "http://download.proxmox.com"
}
return &Environment{
t: t,
templateVars: map[string]any{
"ProviderConfig": pc,
"NodeName": nodeName,
"DatastoreID": datastoreID,
"ProviderConfig": pc,
"NodeName": nodeName,
"DatastoreID": datastoreID,
"CloudImagesServer": cloudImagesServer,
"ContainerImagesServer": containerImagesServer,
},
providerConfig: pc,
NodeName: nodeName,

View File

@ -375,75 +375,68 @@ func CreateCustomDisks(
vmID int,
storageDevices map[string]vms.CustomStorageDevices,
) diag.Diagnostics {
// flatten the map of disk devices
var disks []vms.CustomStorageDevice
for _, diskDevice := range storageDevices {
for _, disk := range diskDevice {
if disk != nil {
disks = append(disks, *disk)
if disk != nil && disk.FileID != nil && *disk.FileID != "" {
// only custom disks with defined file ID
err := createCustomDisk(ctx, client, nodeName, vmID, *disk)
if err != nil {
return diag.FromErr(err)
}
}
}
}
commands := make([]string, 0, len(disks))
resizes := make([]*vms.ResizeDiskRequestBody, 0, len(disks))
return nil
}
for _, d := range disks {
if d.FileID == nil || *d.FileID == "" {
continue
}
fileFormat := dvDiskFileFormat
if d.Format != nil && *d.Format != "" {
fileFormat = *d.Format
}
//nolint:lll
commands = append(
commands,
`set -e`,
ssh.TrySudo,
fmt.Sprintf(`file_id="%s"`, *d.FileID),
fmt.Sprintf(`file_format="%s"`, fileFormat),
fmt.Sprintf(`datastore_id_target="%s"`, *d.DatastoreID),
fmt.Sprintf(`vm_id="%d"`, vmID),
fmt.Sprintf(`disk_options="%s"`, d.EncodeOptions()),
fmt.Sprintf(`disk_interface="%s"`, *d.Interface),
`source_image=$(try_sudo "pvesm path $file_id")`,
`imported_disk="$(try_sudo "qm importdisk $vm_id $source_image $datastore_id_target -format $file_format" | grep "unused0" | cut -d ":" -f 3 | cut -d "'" -f 1)"`,
`disk_id="${datastore_id_target}:$imported_disk,${disk_options}"`,
`try_sudo "qm set $vm_id -${disk_interface} $disk_id"`,
)
resizes = append(resizes, &vms.ResizeDiskRequestBody{
Disk: *d.Interface,
Size: *d.Size,
})
func createCustomDisk(
ctx context.Context,
client proxmox.Client,
nodeName string,
vmID int,
d vms.CustomStorageDevice,
) error {
fileFormat := dvDiskFileFormat
if d.Format != nil && *d.Format != "" {
fileFormat = *d.Format
}
// Execute the commands on the node and wait for the result.
// This is a highly experimental approach to disk imports and is not recommended by Proxmox.
if len(commands) > 0 {
out, err := client.SSH().ExecuteNodeCommands(ctx, nodeName, commands)
if err != nil {
if matches, e := regexp.Match(`pvesm: .* not found`, out); e == nil && matches {
return diag.FromErr(ssh.NewErrUserHasNoPermission(client.SSH().Username()))
}
//nolint:lll
commands := []string{
`set -e`,
ssh.TrySudo,
fmt.Sprintf(`file_id="%s"`, *d.FileID),
fmt.Sprintf(`file_format="%s"`, fileFormat),
fmt.Sprintf(`datastore_id_target="%s"`, *d.DatastoreID),
fmt.Sprintf(`vm_id="%d"`, vmID),
fmt.Sprintf(`disk_options="%s"`, d.EncodeOptions()),
fmt.Sprintf(`disk_interface="%s"`, *d.Interface),
`source_image=$(try_sudo "pvesm path $file_id")`,
`imported_disk="$(try_sudo "qm importdisk $vm_id $source_image $datastore_id_target -format $file_format" | grep "unused0" | cut -d ":" -f 3 | cut -d "'" -f 1)"`,
`disk_id="${datastore_id_target}:$imported_disk,${disk_options}"`,
`try_sudo "qm set $vm_id -${disk_interface} $disk_id"`,
}
return diag.FromErr(err)
out, err := client.SSH().ExecuteNodeCommands(ctx, nodeName, commands)
if err != nil {
if matches, e := regexp.Match(`pvesm: .* not found`, out); e == nil && matches {
err = ssh.NewErrUserHasNoPermission(client.SSH().Username())
}
tflog.Debug(ctx, "vmCreateCustomDisks: commands", map[string]interface{}{
"output": string(out),
})
return fmt.Errorf("creating custom disk: %w", err)
}
for _, resize := range resizes {
err = client.Node(nodeName).VM(vmID).ResizeVMDisk(ctx, resize)
if err != nil {
return diag.FromErr(err)
}
}
tflog.Debug(ctx, "vmCreateCustomDisks: commands", map[string]interface{}{
"output": string(out),
})
err = client.Node(nodeName).VM(vmID).ResizeVMDisk(ctx, &vms.ResizeDiskRequestBody{
Disk: *d.Interface,
Size: *d.Size,
})
if err != nil {
return fmt.Errorf("resizing disk: %w", err)
}
return nil
@ -455,7 +448,7 @@ func Read(
d *schema.ResourceData,
diskObjects vms.CustomStorageDevices,
vmID int,
api proxmox.Client,
client proxmox.Client,
nodeName string,
isClone bool,
) diag.Diagnostics {
@ -492,7 +485,7 @@ func Read(
if datastoreID != "" {
// disk format may not be returned by config API if it is default for the storage, and that may be different
// from the default qcow2, so we need to read it from the storage API to make sure we have the correct value
volume, e := api.Node(nodeName).Storage(datastoreID).GetDatastoreFile(ctx, dd.FileVolume)
volume, e := client.Node(nodeName).Storage(datastoreID).GetDatastoreFile(ctx, dd.FileVolume)
if e != nil {
diags = append(diags, diag.FromErr(e)...)
continue
@ -638,9 +631,13 @@ func Read(
// Update updates the disk configuration of a VM.
func Update(
ctx context.Context,
client proxmox.Client,
nodeName string,
vmID int,
d *schema.ResourceData,
planDisks map[string]vms.CustomStorageDevices,
allDiskInfo vms.CustomStorageDevices,
currentDisks vms.CustomStorageDevices,
updateBody *vms.UpdateRequestBody,
) (bool, error) {
rebootRequired := false
@ -651,32 +648,52 @@ func Update(
continue
}
for key, value := range diskMap {
if allDiskInfo[key] == nil {
for key, disk := range diskMap {
var tmp *vms.CustomStorageDevice
switch {
case currentDisks[key] == nil && disk != nil:
if disk.FileID != nil && *disk.FileID != "" {
// only disks with defined file ID are custom image disks that need to be created via import.
err := createCustomDisk(ctx, client, nodeName, vmID, *disk)
if err != nil {
return false, fmt.Errorf("creating custom disk: %w", err)
}
} else {
// otherwise this is a blank disk that can be added directly via update API
tmp = disk
}
case currentDisks[key] != nil:
// update existing disk
tmp = currentDisks[key]
default:
// something went wrong
return false, fmt.Errorf("missing %s device %s", prefix, key)
}
tmp := allDiskInfo[key]
if tmp.AIO != value.AIO {
rebootRequired = true
tmp.AIO = value.AIO
if tmp == nil || disk == nil {
continue
}
tmp.Backup = value.Backup
tmp.BurstableReadSpeedMbps = value.BurstableReadSpeedMbps
tmp.BurstableWriteSpeedMbps = value.BurstableWriteSpeedMbps
tmp.Cache = value.Cache
tmp.Discard = value.Discard
tmp.IOThread = value.IOThread
tmp.IopsRead = value.IopsRead
tmp.IopsWrite = value.IopsWrite
tmp.MaxIopsRead = value.MaxIopsRead
tmp.MaxIopsWrite = value.MaxIopsWrite
tmp.MaxReadSpeedMbps = value.MaxReadSpeedMbps
tmp.MaxWriteSpeedMbps = value.MaxWriteSpeedMbps
tmp.Replicate = value.Replicate
tmp.SSD = value.SSD
if tmp.AIO != disk.AIO {
rebootRequired = true
tmp.AIO = disk.AIO
}
tmp.Backup = disk.Backup
tmp.BurstableReadSpeedMbps = disk.BurstableReadSpeedMbps
tmp.BurstableWriteSpeedMbps = disk.BurstableWriteSpeedMbps
tmp.Cache = disk.Cache
tmp.Discard = disk.Discard
tmp.IOThread = disk.IOThread
tmp.IopsRead = disk.IopsRead
tmp.IopsWrite = disk.IopsWrite
tmp.MaxIopsRead = disk.MaxIopsRead
tmp.MaxIopsWrite = disk.MaxIopsWrite
tmp.MaxReadSpeedMbps = disk.MaxReadSpeedMbps
tmp.MaxWriteSpeedMbps = disk.MaxWriteSpeedMbps
tmp.Replicate = disk.Replicate
tmp.SSD = disk.SSD
switch prefix {
case "virtio":

View File

@ -50,7 +50,6 @@ func Schema() map[string]*schema.Schema {
Type: schema.TypeList,
Description: "The disk devices",
Optional: true,
ForceNew: true,
DefaultFunc: func() (interface{}, error) {
return []interface{}{
map[string]interface{}{
@ -97,7 +96,6 @@ func Schema() map[string]*schema.Schema {
Type: schema.TypeString,
Description: "The file format",
Optional: true,
ForceNew: true,
Computed: true,
ValidateDiagFunc: validators.FileFormat(),
},

View File

@ -5057,7 +5057,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
return diag.FromErr(err)
}
rr, err := disk.Update(d, planDisks, allDiskInfo, updateBody)
rr, err := disk.Update(ctx, client, nodeName, vmID, d, planDisks, allDiskInfo, updateBody)
if err != nil {
return diag.FromErr(err)
}
@ -5473,7 +5473,7 @@ func vmUpdateDiskLocationAndSize(
for oldKey, oldDisk := range diskMap {
if _, present := diskNewEntries[prefix][oldKey]; !present {
return diag.Errorf(
"deletion of disks not supported. Please delete disk by hand. Old Interface was %s",
"deletion of disks not supported. Please delete disk by hand. Old interface was %q",
*oldDisk.Interface,
)
}