0
0
mirror of https://github.com/bpg/terraform-provider-proxmox.git synced 2025-07-09 15:25:01 +00:00

feat(vm): Import Disk via API. (#2012)

* feat(vm): Import Disk via API.

Signed-off-by: Marco Attia <54147992+Vaneixus@users.noreply.github.com>

* lint(vm): fix Linter Issues.

Signed-off-by: Marco Attia <54147992+Vaneixus@users.noreply.github.com>

* fix(vm): import_from update issues.

Signed-off-by: Marco Attia <54147992+Vaneixus@users.noreply.github.com>

* fix: store `import_from` in the state, add acc test for `import_from`

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>

* chore: update examples and docs

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>

* fix: linter

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>

* chore: re-gen docs

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>

---------

Signed-off-by: Marco Attia <54147992+Vaneixus@users.noreply.github.com>
Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
Co-authored-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
Marco Attia 2025-07-06 16:00:38 +00:00 committed by GitHub
parent 6a8f367c46
commit ddc4118b08
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 173 additions and 52 deletions

View File

@ -112,10 +112,6 @@ Add the following block to your VM config:
For more context, see #1639 and #1770.
### Disk Images Cannot Be Imported by Non-PAM Accounts
Due to limitations in the Proxmox VE API, certain actions need to be performed using SSH. This requires the use of a PAM account (standard Linux account).
### Disk Images from VMware Cannot Be Uploaded or Imported
Proxmox VE does not currently support VMware disk images directly.

View File

@ -32,7 +32,7 @@ resource "proxmox_virtual_environment_vm" "centos_vm" {
disk {
datastore_id = "local-lvm"
file_id = proxmox_virtual_environment_download_file.centos_cloud_image.id
import_from = proxmox_virtual_environment_download_file.centos_cloud_image.id
interface = "virtio0"
iothread = true
discard = "on"
@ -41,11 +41,10 @@ resource "proxmox_virtual_environment_vm" "centos_vm" {
}
resource "proxmox_virtual_environment_download_file" "centos_cloud_image" {
content_type = "iso"
content_type = "import"
datastore_id = "local"
node_name = "pve"
url = "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-latest.x86_64.qcow2"
file_name = "centos8.img"
}
```
@ -69,7 +68,7 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
disk {
datastore_id = "local-lvm"
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
import_from = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
interface = "virtio0"
iothread = true
discard = "on"
@ -78,10 +77,12 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
}
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
content_type = "iso"
content_type = "import"
datastore_id = "local"
node_name = "pve"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
# need to rename the file to *.qcow2 to indicate the actual file format for import
file_name = "jammy-server-cloudimg-amd64.qcow2"
}
```
@ -110,6 +111,7 @@ resource "proxmox_virtual_environment_vm" "debian_vm" {
disk {
datastore_id = "local-lvm"
# qcow2 image downloaded from https://cloud.debian.org/images/cloud/bookworm/latest/ and renamed to *.img
# the image is not of import type, so provider will use SSH client to import it
file_id = "local:iso/debian-12-genericcloud-amd64.img"
interface = "virtio0"
iothread = true

View File

@ -40,7 +40,7 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
disk {
datastore_id = "local-lvm"
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
import_from = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
interface = "virtio0"
iothread = true
discard = "on"
@ -53,11 +53,12 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
}
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
content_type = "iso"
content_type = "import"
datastore_id = "local"
node_name = "pve"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
# need to rename the file to *.qcow2 to indicate the actual file format for import
file_name = "jammy-server-cloudimg-amd64.qcow2"
}
```
@ -130,7 +131,7 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
disk {
datastore_id = "local-lvm"
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
import_from = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
interface = "virtio0"
iothread = true
discard = "on"
@ -154,11 +155,12 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
}
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
content_type = "iso"
content_type = "import"
datastore_id = "local"
node_name = "pve"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
# need to rename the file to *.qcow2 to indicate the actual file format for import
file_name = "jammy-server-cloudimg-amd64.qcow2"
}
output "vm_ipv4_address" {

View File

@ -296,7 +296,9 @@ terraform plan
The Proxmox provider can connect to a Proxmox node via SSH.
This is used in the `proxmox_virtual_environment_vm` or `proxmox_virtual_environment_file` resource to execute commands on the node to perform actions that are not supported by Proxmox API.
For example, to import VM disks, or to uploading certain type of resources, such as snippets.
For example, to import VM disks in certain cases, or to uploading certain type of resources, such as snippets.
~> Note that the SSH connection is not used when VM disk is imported using `import_from` attribute. It also is not used to _manage_ VMs or Containers, and is not required for most operations.
The SSH connection configuration is provided via the optional `ssh` block in the `provider` block:

View File

@ -9,8 +9,6 @@ subcategory: Virtual Environment
Manages a virtual machine.
> This resource uses SSH access to the node. You might need to configure the [`ssh` option in the `provider` section](../index.md#node-ip-address-used-for-ssh-connection).
## Example Usage
```hcl
@ -47,7 +45,7 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
disk {
datastore_id = "local-lvm"
file_id = proxmox_virtual_environment_download_file.latest_ubuntu_22_jammy_qcow2_img.id
import_from = proxmox_virtual_environment_download_file.latest_ubuntu_22_jammy_qcow2_img.id
interface = "scsi0"
}
@ -89,10 +87,12 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
}
resource "proxmox_virtual_environment_download_file" "latest_ubuntu_22_jammy_qcow2_img" {
content_type = "iso"
content_type = "import"
datastore_id = "local"
node_name = "pve"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
# need to rename the file to *.qcow2 to indicate the actual file format for import
file_name = "jammy-server-cloudimg-amd64.qcow2"
}
resource "random_password" "ubuntu_vm_password" {
@ -295,6 +295,9 @@ output "ubuntu_vm_public_key" {
- `vmdk` - VMware Disk Image.
- `file_id` - (Optional) The file ID for a disk image when importing a disk into VM. The ID format is
`<datastore_id>:<content_type>/<file_name>`, for example `local:iso/centos8.img`. Can be also taken from
`proxmox_virtual_environment_download_file` resource. *Deprecated*, use `import_from` instead.
- `import_from` - (Optional) The file ID for a disk image to import into VM. The image must be of `import` content type.
The ID format is `<datastore_id>:import/<file_name>`, for example `local:import/centos8.qcow2`. Can be also taken from
`proxmox_virtual_environment_download_file` resource.
- `interface` - (Required) The disk interface for Proxmox, currently `scsi`,
`sata` and `virtio` interfaces are supported. Append the disk index at

View File

@ -231,11 +231,18 @@ resource "proxmox_virtual_environment_vm" "data_vm" {
disk {
datastore_id = local.datastore_id
interface = "scsi0"
size = 8
import_from = proxmox_virtual_environment_download_file.latest_debian_12_bookworm_qcow2.id
}
disk {
datastore_id = local.datastore_id
interface = "scsi1"
size = 1
}
disk {
datastore_id = local.datastore_id
interface = "scsi1"
interface = "scsi2"
size = 4
}
}

View File

@ -15,7 +15,7 @@ resource "proxmox_virtual_environment_vm" "centos_vm" {
disk {
datastore_id = "local-lvm"
file_id = proxmox_virtual_environment_download_file.centos_cloud_image.id
import_from = proxmox_virtual_environment_download_file.centos_cloud_image.id
interface = "virtio0"
iothread = true
discard = "on"
@ -24,9 +24,8 @@ resource "proxmox_virtual_environment_vm" "centos_vm" {
}
resource "proxmox_virtual_environment_download_file" "centos_cloud_image" {
content_type = "iso"
content_type = "import"
datastore_id = "local"
node_name = "pve"
url = "https://cloud.centos.org/centos/8-stream/x86_64/images/CentOS-Stream-GenericCloud-8-latest.x86_64.qcow2"
file_name = "centos8.img"
}

View File

@ -10,8 +10,5 @@ terraform {
provider "proxmox" {
endpoint = var.virtual_environment_endpoint
api_token = var.virtual_environment_token
ssh {
agent = true
username = "terraform"
}
# SSH configuration is not required for this example
}

View File

@ -16,6 +16,7 @@ resource "proxmox_virtual_environment_vm" "debian_vm" {
disk {
datastore_id = "local-lvm"
# qcow2 image downloaded from https://cloud.debian.org/images/cloud/bookworm/latest/ and renamed to *.img
# the image is not of import type, so provider will use SSH client to import it
file_id = "local:iso/debian-12-genericcloud-amd64.img"
interface = "virtio0"
iothread = true

View File

@ -15,7 +15,7 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
disk {
datastore_id = "local-lvm"
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
import_from = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
interface = "virtio0"
iothread = true
discard = "on"
@ -24,8 +24,10 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
}
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
content_type = "iso"
content_type = "import"
datastore_id = "local"
node_name = "pve"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
# need to rename the file to *.qcow2 to indicate the actual file format for import
file_name = "jammy-server-cloudimg-amd64.qcow2"
}

View File

@ -10,8 +10,4 @@ terraform {
provider "proxmox" {
endpoint = var.virtual_environment_endpoint
api_token = var.virtual_environment_token
ssh {
agent = true
username = "terraform"
}
}

View File

@ -16,7 +16,7 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
disk {
datastore_id = "local-lvm"
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
import_from = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
interface = "virtio0"
iothread = true
discard = "on"
@ -40,11 +40,12 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
}
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
content_type = "iso"
content_type = "import"
datastore_id = "local"
node_name = "pve"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
# need to rename the file to *.qcow2 to indicate the actual file format for import
file_name = "jammy-server-cloudimg-amd64.qcow2"
}
output "vm_ipv4_address" {

View File

@ -25,7 +25,7 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
disk {
datastore_id = "local-lvm"
file_id = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
import_from = proxmox_virtual_environment_download_file.ubuntu_cloud_image.id
interface = "virtio0"
iothread = true
discard = "on"
@ -38,9 +38,10 @@ resource "proxmox_virtual_environment_vm" "ubuntu_vm" {
}
resource "proxmox_virtual_environment_download_file" "ubuntu_cloud_image" {
content_type = "iso"
content_type = "import"
datastore_id = "local"
node_name = "pve"
url = "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img"
# need to rename the file to *.qcow2 to indicate the actual file format for import
file_name = "jammy-server-cloudimg-amd64.qcow2"
}

View File

@ -10,8 +10,4 @@ terraform {
provider "proxmox" {
endpoint = var.virtual_environment_endpoint
api_token = var.virtual_environment_token
ssh {
agent = true
username = "terraform"
}
}

View File

@ -142,6 +142,45 @@ func TestAccResourceVMDisks(t *testing.T) {
}),
),
}}},
{"import disk from an image", []resource.TestStep{{
Config: te.RenderConfig(`
resource "proxmox_virtual_environment_download_file" "test_disk_image" {
content_type = "import"
datastore_id = "local"
node_name = "{{.NodeName}}"
url = "{{.CloudImagesServer}}/jammy/current/jammy-server-cloudimg-amd64.img"
file_name = "test-disk-image.img.raw"
overwrite_unmanaged = true
}
resource "proxmox_virtual_environment_vm" "test_disk" {
node_name = "{{.NodeName}}"
started = false
name = "test-disk"
disk {
datastore_id = "local-lvm"
import_from = proxmox_virtual_environment_download_file.test_disk_image.id
interface = "virtio0"
iothread = true
discard = "on"
serial = "dead_beef"
size = 20
}
}`),
Check: resource.ComposeTestCheckFunc(
ResourceAttributes("proxmox_virtual_environment_vm.test_disk", map[string]string{
"disk.0.cache": "none",
"disk.0.datastore_id": "local-lvm",
"disk.0.discard": "on",
"disk.0.file_format": "raw",
"disk.0.interface": "virtio0",
"disk.0.iothread": "true",
"disk.0.path_in_datastore": `vm-\d+-disk-\d+`,
"disk.0.serial": "dead_beef",
"disk.0.size": "20",
"disk.0.ssd": "false",
}),
),
}}},
{"clone default disk without overrides", []resource.TestStep{
{
Config: te.RenderConfig(`

View File

@ -36,6 +36,7 @@ type CustomStorageDevice struct {
BurstableWriteSpeedMbps *int `json:"mbps_wr_max,omitempty" url:"mbps_wr_max,omitempty"`
Cache *string `json:"cache,omitempty" url:"cache,omitempty"`
Discard *string `json:"discard,omitempty" url:"discard,omitempty"`
ImportFrom *string `json:"import_from,omitempty" url:"import_from,omitempty"`
Format *string `json:"format,omitempty" url:"format,omitempty"`
IopsRead *int `json:"iops_rd,omitempty" url:"iops_rd,omitempty"`
IopsWrite *int `json:"iops_wr,omitempty" url:"iops_wr,omitempty"`
@ -203,10 +204,18 @@ func (d *CustomStorageDevice) EncodeOptions() string {
// EncodeValues converts a CustomStorageDevice struct to a URL value.
func (d *CustomStorageDevice) EncodeValues(key string, v *url.Values) error {
if d.ImportFrom != nil && *d.ImportFrom != "" {
d.FileVolume = *d.DatastoreID + ":" + "0"
}
values := []string{
fmt.Sprintf("file=%s", d.FileVolume),
}
if d.ImportFrom != nil && *d.ImportFrom != "" {
values = append(values, fmt.Sprintf("import-from=%s", *d.ImportFrom))
}
if d.Format != nil {
values = append(values, fmt.Sprintf("format=%s", *d.Format))
}
@ -273,6 +282,9 @@ func (d *CustomStorageDevice) UnmarshalJSON(b []byte) error {
case "file":
d.FileVolume = v[1]
case "import_from":
d.ImportFrom = &v[1]
case "format":
d.Format = &v[1]
@ -399,6 +411,7 @@ func (d *CustomStorageDevice) MergeWith(m CustomStorageDevice) bool {
updated = ptr.UpdateIfChanged(&d.Replicate, m.Replicate) || updated
updated = ptr.UpdateIfChanged(&d.SSD, m.SSD) || updated
updated = ptr.UpdateIfChanged(&d.Serial, m.Serial) || updated
updated = ptr.UpdateIfChanged(&d.ImportFrom, m.ImportFrom) || updated
return updated
}

View File

@ -10,6 +10,7 @@ import (
"context"
"errors"
"fmt"
"maps"
"regexp"
"slices"
"strings"
@ -178,6 +179,7 @@ func GetDiskDeviceObjects(
diskInterface, _ := block[mkDiskInterface].(string)
fileFormat, _ := block[mkDiskFileFormat].(string)
fileID, _ := block[mkDiskFileID].(string)
importFrom, _ := block[mkDiskImportFrom].(string)
ioThread := types.CustomBool(block[mkDiskIOThread].(bool))
replicate := types.CustomBool(block[mkDiskReplicate].(bool))
serial := block[mkDiskSerial].(string)
@ -212,6 +214,7 @@ func GetDiskDeviceObjects(
diskDevice.DatastoreID = &datastoreID
diskDevice.Discard = &discard
diskDevice.FileID = &fileID
diskDevice.ImportFrom = &importFrom
diskDevice.Replicate = &replicate
diskDevice.Serial = &serial
diskDevice.Size = types.DiskSizeFromGigabytes(int64(size))
@ -418,6 +421,12 @@ func Read(
disk[mkDiskFileID] = dd.FileID
}
// note that PVE does not return back the 'import-from' attribute for the disks that are imported,
// but we'll keep it here for consistency. the actual value is set later down
if dd.ImportFrom != nil {
disk[mkDiskImportFrom] = dd.ImportFrom
}
disk[mkDiskInterface] = di
disk[mkDiskSize] = dd.Size.InGigabytes()
@ -539,8 +548,20 @@ func Read(
var diskList []interface{}
if len(currentDiskList) > 0 {
interfaces := utils.ListResourcesAttributeValue(currentDiskList, mkDiskInterface)
diskList = utils.OrderedListFromMapByKeyValues(diskMap, interfaces)
currentDiskMap := utils.MapResourcesByAttribute(currentDiskList, mkDiskInterface)
// copy import_from from the current disk if it exists
for k, v := range currentDiskMap {
if disk, ok := v.(map[string]interface{}); ok {
if importFrom, ok := disk[mkDiskImportFrom].(string); ok && importFrom != "" {
if _, exists := diskMap[k]; exists {
diskMap[k].(map[string]interface{})[mkDiskImportFrom] = importFrom
}
}
}
}
diskList = utils.OrderedListFromMapByKeyValues(diskMap,
slices.AppendSeq(make([]string, 0, len(currentDiskMap)), maps.Keys(currentDiskMap)))
} else {
diskList = utils.OrderedListFromMap(diskMap)
}
@ -598,6 +619,13 @@ func Update(
tmp.AIO = disk.AIO
}
if disk.ImportFrom != nil && *disk.ImportFrom != "" {
rebootRequired = true
tmp.DatastoreID = disk.DatastoreID
tmp.ImportFrom = disk.ImportFrom
tmp.Size = disk.Size
}
tmp.Backup = disk.Backup
tmp.BurstableReadSpeedMbps = disk.BurstableReadSpeedMbps
tmp.BurstableWriteSpeedMbps = disk.BurstableWriteSpeedMbps

View File

@ -31,6 +31,7 @@ const (
mkDiskDiscard = "discard"
mkDiskFileFormat = "file_format"
mkDiskFileID = "file_id"
mkDiskImportFrom = "import_from"
mkDiskInterface = "interface"
mkDiskIopsRead = "iops_read"
mkDiskIopsReadBurstable = "iops_read_burstable"
@ -64,6 +65,7 @@ func Schema() map[string]*schema.Schema {
mkDiskCache: dvDiskCache,
mkDiskDatastoreID: dvDiskDatastoreID,
mkDiskDiscard: dvDiskDiscard,
mkDiskImportFrom: "",
mkDiskFileID: "",
mkDiskInterface: dvDiskInterface,
mkDiskIOThread: false,
@ -133,6 +135,14 @@ func Schema() map[string]*schema.Schema {
Default: "",
ValidateDiagFunc: validators.FileID(),
},
mkDiskImportFrom: {
Type: schema.TypeString,
Description: "The file id of a disk image to import from storage.",
Optional: true,
ForceNew: false,
Default: "",
ValidateDiagFunc: validators.FileID(),
},
mkDiskSerial: {
Type: schema.TypeString,
Description: "The drives reported serial number",

View File

@ -20,6 +20,7 @@ func TestVMSchema(t *testing.T) {
mkDiskPathInDatastore,
mkDiskFileFormat,
mkDiskFileID,
mkDiskImportFrom,
mkDiskSize,
})
@ -28,6 +29,7 @@ func TestVMSchema(t *testing.T) {
mkDiskPathInDatastore: schema.TypeString,
mkDiskFileFormat: schema.TypeString,
mkDiskFileID: schema.TypeString,
mkDiskImportFrom: schema.TypeString,
mkDiskSize: schema.TypeInt,
})

View File

@ -2913,6 +2913,25 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
return diags
}
resizeDisks := diskDeviceObjects.Filter(func(device *vms.CustomStorageDevice) bool {
return device.ImportFrom != nil && *device.ImportFrom != ""
})
if len(resizeDisks) > 0 {
tflog.Info(ctx, "Resizing disks after VM creation")
for idev, device := range resizeDisks {
tflog.Info(ctx, fmt.Sprintf("VM %d: Resizing disk %s", vmID, idev))
err = client.Node(nodeName).VM(vmID).ResizeVMDisk(ctx, &vms.ResizeDiskRequestBody{
Size: *device.Size,
Disk: idev,
})
if err != nil {
return diag.FromErr(err)
}
}
}
return vmCreateStart(ctx, d, m)
}
@ -5908,6 +5927,11 @@ func vmUpdateDiskLocationAndSize(
}
}
// We need to resize the disk if the import source has changed.
if *oldDisk.ImportFrom != *diskNewEntries[oldIface].ImportFrom {
*oldDisk.Size = 0
}
if *oldDisk.Size != *diskNewEntries[oldIface].Size {
if *oldDisk.Size < *diskNewEntries[oldIface].Size {
if oldDisk.IsOwnedBy(vmID) {