0
0
mirror of https://github.com/bpg/terraform-provider-proxmox.git synced 2025-07-09 15:25:01 +00:00

more refactoring

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
Pavel Boldyrev 2024-02-03 10:35:09 -05:00
parent 5bf5754cb3
commit c1374a5c10
No known key found for this signature in database
GPG Key ID: 02A24794ADAC7455
7 changed files with 325 additions and 185 deletions

View File

@ -4,6 +4,7 @@
"Burstable",
"cdrom",
"CLRF",
"iface",
"iothread",
"keyctl",
"mbps",
@ -12,6 +13,7 @@
"qcow",
"rootfs",
"signoff",
"stretchr",
"tflog",
"unmanaged",
"virtio",

View File

@ -34,7 +34,7 @@ func TestCustomStorageDevice_UnmarshalJSON(t *testing.T) {
Enabled: true,
FileVolume: "local-lvm:vm-2041-disk-0",
IOThread: types.BoolPtr(true),
Size: &ds8gig,
Size: ds8gig,
SSD: types.BoolPtr(true),
},
},
@ -47,7 +47,7 @@ func TestCustomStorageDevice_UnmarshalJSON(t *testing.T) {
FileVolume: "nfs:2041/vm-2041-disk-0.raw",
Format: types.StrPtr("raw"),
IOThread: types.BoolPtr(true),
Size: &ds8gig,
Size: ds8gig,
SSD: types.BoolPtr(true),
},
},

View File

@ -38,8 +38,9 @@ func (r DiskSize) InGigabytes() int64 {
}
// DiskSizeFromGigabytes creates a DiskSize from gigabytes.
func DiskSizeFromGigabytes(size int64) DiskSize {
return DiskSize(size * 1024 * 1024 * 1024)
func DiskSizeFromGigabytes(size int64) *DiskSize {
ds := DiskSize(size * 1024 * 1024 * 1024)
return &ds
}
// MarshalJSON marshals a disk size into a Proxmox API `<DiskSize>` string.

View File

@ -17,6 +17,40 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
"golang.org/x/exp/maps"
)
const (
mkDisk = "disk"
mkDiskInterface = "interface"
mkDiskDatastoreID = "datastore_id"
mkDiskPathInDatastore = "path_in_datastore"
mkDiskFileFormat = "file_format"
mkDiskFileID = "file_id"
mkDiskSize = "size"
mkDiskIOThread = "iothread"
mkDiskSSD = "ssd"
mkDiskDiscard = "discard"
mkDiskCache = "cache"
mkDiskSpeed = "speed"
mkDiskSpeedRead = "read"
mkDiskSpeedReadBurstable = "read_burstable"
mkDiskSpeedWrite = "write"
mkDiskSpeedWriteBurstable = "write_burstable"
dvDiskInterface = "scsi0"
dvDiskDatastoreID = "local-lvm"
dvDiskFileFormat = "qcow2"
dvDiskFileID = ""
dvDiskSize = 8
dvDiskIOThread = false
dvDiskSSD = false
dvDiskDiscard = "ignore"
dvDiskCache = "none"
dvDiskSpeedRead = 0
dvDiskSpeedReadBurstable = 0
dvDiskSpeedWrite = 0
dvDiskSpeedWriteBurstable = 0
)
func diskSchema() *schema.Schema {
@ -165,26 +199,65 @@ func diskSchema() *schema.Schema {
}
}
func updateDisk1(
func createDisks(
ctx context.Context, vmConfig *vms.GetResponseData, d *schema.ResourceData, vmAPI *vms.Client,
) (map[string]*vms.CustomStorageDevice, error) {
allDiskInfo := getDiskInfo(vmConfig, d)
// this is what VM has at the moment: map of interface name (virtio1) -> disk object
currentStorageDevices := populateFileIDs(mapStorageDevices(vmConfig), d)
diskDeviceObjects, e := vmGetDiskDeviceObjects(d, nil)
// map of interface type (virtio|sata|scsi|...) -> map of interface name (virtio1) -> disk object
planStorageDevices, e := getStorageDevicesFromResource(d)
if e != nil {
return nil, e
}
// create disks that are not present in the current configuration
for prefix, disks := range planStorageDevices {
for diskInterface, disk := range disks {
if currentStorageDevices[diskInterface] == nil {
diskUpdateBody := &vms.UpdateRequestBody{}
switch prefix {
case "virtio":
if diskUpdateBody.VirtualIODevices == nil {
diskUpdateBody.VirtualIODevices = vms.CustomStorageDevices{}
}
diskUpdateBody.VirtualIODevices[diskInterface] = disk
case "sata":
if diskUpdateBody.SATADevices == nil {
diskUpdateBody.SATADevices = vms.CustomStorageDevices{}
}
diskUpdateBody.SATADevices[diskInterface] = disk
case "scsi":
if diskUpdateBody.SCSIDevices == nil {
diskUpdateBody.SCSIDevices = vms.CustomStorageDevices{}
}
diskUpdateBody.SCSIDevices[diskInterface] = disk
}
e = vmAPI.UpdateVM(ctx, diskUpdateBody)
if e != nil {
return nil, e
}
}
}
}
disk := d.Get(mkDisk).([]interface{})
for i := range disk {
diskBlock := disk[i].(map[string]interface{})
diskInterface := diskBlock[mkDiskInterface].(string)
dataStoreID := diskBlock[mkDiskDatastoreID].(string)
diskSize := int64(diskBlock[mkDiskSize].(int))
prefix := diskDigitPrefix(diskInterface)
currentDiskInfo := allDiskInfo[diskInterface]
configuredDiskInfo := diskDeviceObjects[prefix][diskInterface]
currentDiskInfo := currentStorageDevices[diskInterface]
configuredDiskInfo := planStorageDevices[prefix][diskInterface]
if currentDiskInfo == nil {
diskUpdateBody := &vms.UpdateRequestBody{}
@ -235,7 +308,7 @@ func updateDisk1(
diskResizeBody := &vms.ResizeDiskRequestBody{
Disk: diskInterface,
Size: types.DiskSizeFromGigabytes(diskSize),
Size: *types.DiskSizeFromGigabytes(diskSize),
}
moveDisk := false
@ -243,8 +316,8 @@ func updateDisk1(
if dataStoreID != "" {
moveDisk = true
if allDiskInfo[diskInterface] != nil {
fileIDParts := strings.Split(allDiskInfo[diskInterface].FileVolume, ":")
if currentStorageDevices[diskInterface] != nil {
fileIDParts := strings.Split(currentStorageDevices[diskInterface].FileVolume, ":")
moveDisk = dataStoreID != fileIDParts[0]
}
}
@ -266,7 +339,7 @@ func updateDisk1(
}
}
return allDiskInfo, nil
return currentStorageDevices, nil
}
func vmCreateCustomDisks(ctx context.Context, d *schema.ResourceData, m interface{}) error {
@ -430,43 +503,38 @@ func vmCreateCustomDisks(ctx context.Context, d *schema.ResourceData, m interfac
return nil
}
func vmGetDiskDeviceObjects(
func getStorageDevicesFromResource(d *schema.ResourceData) (map[string]map[string]vms.CustomStorageDevice, error) {
return getDiskDeviceObjects1(d, d.Get(mkDisk).([]interface{}))
}
func getDiskDeviceObjects1(
d *schema.ResourceData,
disks []interface{},
) (map[string]map[string]vms.CustomStorageDevice, error) {
var diskDevice []interface{}
if disks != nil {
diskDevice = disks
} else {
diskDevice = d.Get(mkDisk).([]interface{})
}
diskDeviceObjects := map[string]map[string]vms.CustomStorageDevice{}
resource := VM()
for _, diskEntry := range diskDevice {
for _, diskEntry := range disks {
diskDevice := vms.CustomStorageDevice{
Enabled: true,
}
block := diskEntry.(map[string]interface{})
diskInterface, _ := block[mkDiskInterface].(string)
datastoreID, _ := block[mkDiskDatastoreID].(string)
pathInDatastore := ""
if untyped, hasPathInDatastore := block[mkDiskPathInDatastore]; hasPathInDatastore {
pathInDatastore = untyped.(string)
}
size, _ := block[mkDiskSize].(int)
fileFormat, _ := block[mkDiskFileFormat].(string)
fileID, _ := block[mkDiskFileID].(string)
size, _ := block[mkDiskSize].(int)
diskInterface, _ := block[mkDiskInterface].(string)
ioThread := types.CustomBool(block[mkDiskIOThread].(bool))
ssd := types.CustomBool(block[mkDiskSSD].(bool))
discard := block[mkDiskDiscard].(string)
cache := block[mkDiskCache].(string)
pathInDatastore := ""
if untyped, hasPathInDatastore := block[mkDiskPathInDatastore]; hasPathInDatastore {
pathInDatastore = untyped.(string)
}
speedBlock, err := structure.GetSchemaBlock(
resource,
d,
@ -501,8 +569,7 @@ func vmGetDiskDeviceObjects(
diskDevice.Interface = &diskInterface
diskDevice.Format = &fileFormat
diskDevice.FileID = &fileID
diskSize := types.DiskSizeFromGigabytes(int64(size))
diskDevice.Size = &diskSize
diskDevice.Size = types.DiskSizeFromGigabytes(int64(size))
diskDevice.IOThread = &ioThread
diskDevice.Discard = &discard
diskDevice.Cache = &cache
@ -561,7 +628,7 @@ func readDisk1(ctx context.Context, d *schema.ResourceData,
) diag.Diagnostics {
currentDiskList := d.Get(mkDisk).([]interface{})
diskMap := map[string]interface{}{}
diskObjects := getDiskInfo(vmConfig, d)
diskObjects := populateFileIDs(mapStorageDevices(vmConfig), d)
var diags diag.Diagnostics
@ -689,12 +756,12 @@ func updateDisk(d *schema.ResourceData, vmConfig *vms.GetResponseData, updateBod
return nil
}
diskDeviceObjects, err := vmGetDiskDeviceObjects(d, nil)
diskDeviceObjects, err := getStorageDevicesFromResource(d)
if err != nil {
return err
}
diskDeviceInfo := getDiskInfo(vmConfig, d)
diskDeviceInfo := populateFileIDs(mapStorageDevices(vmConfig), d)
for prefix, diskMap := range diskDeviceObjects {
if diskMap == nil {
@ -751,3 +818,146 @@ func updateDisk(d *schema.ResourceData, vmConfig *vms.GetResponseData, updateBod
return nil
}
// mapStorageDevices maps the current VM storage devices by their interface names.
func mapStorageDevices(resp *vms.GetResponseData) map[string]*vms.CustomStorageDevice {
storageDevices := map[string]*vms.CustomStorageDevice{}
fillMap := func(iface string, dev *vms.CustomStorageDevice) {
if dev != nil {
d := *dev
if d.Size == nil {
d.Size = new(types.DiskSize)
}
d.Interface = &iface
storageDevices[iface] = &d
}
}
fillMap("ide0", resp.IDEDevice0)
fillMap("ide1", resp.IDEDevice1)
fillMap("ide2", resp.IDEDevice2)
fillMap("ide3", resp.IDEDevice3)
fillMap("sata0", resp.SATADevice0)
fillMap("sata1", resp.SATADevice1)
fillMap("sata2", resp.SATADevice2)
fillMap("sata3", resp.SATADevice3)
fillMap("sata4", resp.SATADevice4)
fillMap("sata5", resp.SATADevice5)
fillMap("scsi0", resp.SCSIDevice0)
fillMap("scsi1", resp.SCSIDevice1)
fillMap("scsi2", resp.SCSIDevice2)
fillMap("scsi3", resp.SCSIDevice3)
fillMap("scsi4", resp.SCSIDevice4)
fillMap("scsi5", resp.SCSIDevice5)
fillMap("scsi6", resp.SCSIDevice6)
fillMap("scsi7", resp.SCSIDevice7)
fillMap("scsi8", resp.SCSIDevice8)
fillMap("scsi9", resp.SCSIDevice9)
fillMap("scsi10", resp.SCSIDevice10)
fillMap("scsi11", resp.SCSIDevice11)
fillMap("scsi12", resp.SCSIDevice12)
fillMap("scsi13", resp.SCSIDevice13)
fillMap("virtio0", resp.VirtualIODevice0)
fillMap("virtio1", resp.VirtualIODevice1)
fillMap("virtio2", resp.VirtualIODevice2)
fillMap("virtio3", resp.VirtualIODevice3)
fillMap("virtio4", resp.VirtualIODevice4)
fillMap("virtio5", resp.VirtualIODevice5)
fillMap("virtio6", resp.VirtualIODevice6)
fillMap("virtio7", resp.VirtualIODevice7)
fillMap("virtio8", resp.VirtualIODevice8)
fillMap("virtio9", resp.VirtualIODevice9)
fillMap("virtio10", resp.VirtualIODevice10)
fillMap("virtio11", resp.VirtualIODevice11)
fillMap("virtio12", resp.VirtualIODevice12)
fillMap("virtio13", resp.VirtualIODevice13)
fillMap("virtio14", resp.VirtualIODevice14)
fillMap("virtio15", resp.VirtualIODevice15)
return storageDevices
}
// mapStorageDevices maps the current VM storage devices by their interface names.
func populateFileIDs(devices map[string]*vms.CustomStorageDevice, d *schema.ResourceData) map[string]*vms.CustomStorageDevice {
planDisk := d.Get(mkDisk)
planDiskList := planDisk.([]interface{})
planDiskMap := map[string]map[string]interface{}{}
for _, v := range planDiskList {
dm := v.(map[string]interface{})
iface := dm[mkDiskInterface].(string)
planDiskMap[iface] = dm
}
for k, v := range devices {
if v != nil && planDiskMap[k] != nil {
if planDiskMap[k][mkDiskFileID] != nil {
fileID := planDiskMap[k][mkDiskFileID].(string)
v.FileID = &fileID
}
}
}
return devices
}
// getDiskDatastores returns a list of the used datastores in a VM.
func getDiskDatastores(vm *vms.GetResponseData, d *schema.ResourceData) []string {
storageDevices := populateFileIDs(mapStorageDevices(vm), d)
datastoresSet := map[string]int{}
for _, diskInfo := range storageDevices {
// Ignore empty storage devices and storage devices (like ide) which may not have any media mounted
if diskInfo == nil || diskInfo.FileVolume == "none" {
continue
}
fileIDParts := strings.Split(diskInfo.FileVolume, ":")
datastoresSet[fileIDParts[0]] = 1
}
if vm.EFIDisk != nil {
fileIDParts := strings.Split(vm.EFIDisk.FileVolume, ":")
datastoresSet[fileIDParts[0]] = 1
}
if vm.TPMState != nil {
fileIDParts := strings.Split(vm.TPMState.FileVolume, ":")
datastoresSet[fileIDParts[0]] = 1
}
datastores := []string{}
for datastore := range datastoresSet {
datastores = append(datastores, datastore)
}
return datastores
}
type customStorageDeviceMap struct {
// map of interface type (virtio|sata|scsi|...) -> map of interface name (virtio1) -> disk object
devices map[string]map[string]vms.CustomStorageDevice
}
func (c *customStorageDeviceMap) byInterfaceType(interfaceType string) []vms.CustomStorageDevice {
return maps.Values[map[string]vms.CustomStorageDevice](c.devices[interfaceType])
}
func (c *customStorageDeviceMap) byInterfaceName(interfaceName string) (*vms.CustomStorageDevice, bool) {
for _, devices := range c.devices {
if device, ok := devices[interfaceName]; ok {
return &device, true
}
}
return nil, false
}

View File

@ -0,0 +1,72 @@
package vm
import (
"testing"
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/vms"
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMapStorageDevices(t *testing.T) {
devices := &vms.GetResponseData{
VirtualIODevice0: &vms.CustomStorageDevice{
Interface: types.StrPtr("virtio0"),
},
VirtualIODevice1: &vms.CustomStorageDevice{
Interface: types.StrPtr("virtio1"),
Size: types.DiskSizeFromGigabytes(10),
},
}
expected := map[string]*vms.CustomStorageDevice{
"virtio0": {
Interface: types.StrPtr("virtio0"),
Size: new(types.DiskSize),
},
"virtio1": {
Interface: types.StrPtr("virtio1"),
Size: types.DiskSizeFromGigabytes(10),
},
}
result := mapStorageDevices(devices)
assert.Equal(t, expected, result)
}
func TestPopulateFileID(t *testing.T) {
devicesMap := map[string]*vms.CustomStorageDevice{
"virtio0": {},
"virtio1": {},
}
disk := []map[string]interface{}{
{
mkDiskInterface: "virtio0",
mkDiskFileID: "local:100/vm-100-disk-1.qcow2",
},
{
mkDiskInterface: "virtio1",
mkDiskFileID: "local:100/vm-100-disk-2.qcow2",
},
}
d := VM().TestResourceData()
err := d.Set("disk", disk)
require.NoError(t, err)
expected := map[string]*vms.CustomStorageDevice{
"virtio0": {
FileID: types.StrPtr("local:100/vm-100-disk-1.qcow2"),
},
"virtio1": {
FileID: types.StrPtr("local:100/vm-100-disk-2.qcow2"),
},
}
result := populateFileIDs(devicesMap, d)
assert.Equal(t, expected, result)
}

View File

@ -41,19 +41,6 @@ const (
dvCPUType = "qemu64"
dvCPUUnits = 1024
dvDescription = ""
dvDiskInterface = "scsi0"
dvDiskDatastoreID = "local-lvm"
dvDiskFileFormat = "qcow2"
dvDiskFileID = ""
dvDiskSize = 8
dvDiskIOThread = false
dvDiskSSD = false
dvDiskDiscard = "ignore"
dvDiskCache = "none"
dvDiskSpeedRead = 0
dvDiskSpeedReadBurstable = 0
dvDiskSpeedWrite = 0
dvDiskSpeedWriteBurstable = 0
dvEFIDiskDatastoreID = "local-lvm"
dvEFIDiskFileFormat = "qcow2"
dvEFIDiskType = "2m"
@ -161,22 +148,6 @@ const (
mkCPUType = "type"
mkCPUUnits = "units"
mkDescription = "description"
mkDisk = "disk"
mkDiskInterface = "interface"
mkDiskDatastoreID = "datastore_id"
mkDiskPathInDatastore = "path_in_datastore"
mkDiskFileFormat = "file_format"
mkDiskFileID = "file_id"
mkDiskSize = "size"
mkDiskIOThread = "iothread"
mkDiskSSD = "ssd"
mkDiskDiscard = "discard"
mkDiskCache = "cache"
mkDiskSpeed = "speed"
mkDiskSpeedRead = "read"
mkDiskSpeedReadBurstable = "read_burstable"
mkDiskSpeedWrite = "write"
mkDiskSpeedWriteBurstable = "write_burstable"
mkEFIDisk = "efi_disk"
mkEFIDiskDatastoreID = "datastore_id"
mkEFIDiskFileFormat = "file_format"

View File

@ -594,9 +594,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
return diag.FromErr(e)
}
// from the cloned VM
// from the resource config
allDiskInfo, err := updateDisk1(ctx, vmConfig, d, vmAPI)
allDiskInfo, err := createDisks(ctx, vmConfig, d, vmAPI)
if err != nil {
return diag.FromErr(err)
}
@ -786,7 +784,7 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
description := d.Get(mkDescription).(string)
diskDeviceObjects, err := vmGetDiskDeviceObjects(d, nil)
diskDeviceObjects, err := getStorageDevicesFromResource(d)
if err != nil {
return diag.FromErr(err)
}
@ -3825,7 +3823,7 @@ func vmUpdateDiskLocationAndSize(
if d.HasChange(mkDisk) {
diskOld, diskNew := d.GetChange(mkDisk)
diskOldEntries, err := vmGetDiskDeviceObjects(
diskOldEntries, err := getDiskDeviceObjects1(
d,
diskOld.([]interface{}),
)
@ -3833,7 +3831,7 @@ func vmUpdateDiskLocationAndSize(
return diag.FromErr(err)
}
diskNewEntries, err := vmGetDiskDeviceObjects(
diskNewEntries, err := getDiskDeviceObjects1(
d,
diskNew.([]interface{}),
)
@ -4087,120 +4085,6 @@ func diskDigitPrefix(s string) string {
return s
}
func getDiskInfo(resp *vms.GetResponseData, d *schema.ResourceData) map[string]*vms.CustomStorageDevice {
currentDisk := d.Get(mkDisk)
currentDiskList := currentDisk.([]interface{})
currentDiskMap := map[string]map[string]interface{}{}
for _, v := range currentDiskList {
diskMap := v.(map[string]interface{})
diskInterface := diskMap[mkDiskInterface].(string)
currentDiskMap[diskInterface] = diskMap
}
storageDevices := map[string]*vms.CustomStorageDevice{}
storageDevices["ide0"] = resp.IDEDevice0
storageDevices["ide1"] = resp.IDEDevice1
storageDevices["ide2"] = resp.IDEDevice2
storageDevices["ide3"] = resp.IDEDevice3
storageDevices["sata0"] = resp.SATADevice0
storageDevices["sata1"] = resp.SATADevice1
storageDevices["sata2"] = resp.SATADevice2
storageDevices["sata3"] = resp.SATADevice3
storageDevices["sata4"] = resp.SATADevice4
storageDevices["sata5"] = resp.SATADevice5
storageDevices["scsi0"] = resp.SCSIDevice0
storageDevices["scsi1"] = resp.SCSIDevice1
storageDevices["scsi2"] = resp.SCSIDevice2
storageDevices["scsi3"] = resp.SCSIDevice3
storageDevices["scsi4"] = resp.SCSIDevice4
storageDevices["scsi5"] = resp.SCSIDevice5
storageDevices["scsi6"] = resp.SCSIDevice6
storageDevices["scsi7"] = resp.SCSIDevice7
storageDevices["scsi8"] = resp.SCSIDevice8
storageDevices["scsi9"] = resp.SCSIDevice9
storageDevices["scsi10"] = resp.SCSIDevice10
storageDevices["scsi11"] = resp.SCSIDevice11
storageDevices["scsi12"] = resp.SCSIDevice12
storageDevices["scsi13"] = resp.SCSIDevice13
storageDevices["virtio0"] = resp.VirtualIODevice0
storageDevices["virtio1"] = resp.VirtualIODevice1
storageDevices["virtio2"] = resp.VirtualIODevice2
storageDevices["virtio3"] = resp.VirtualIODevice3
storageDevices["virtio4"] = resp.VirtualIODevice4
storageDevices["virtio5"] = resp.VirtualIODevice5
storageDevices["virtio6"] = resp.VirtualIODevice6
storageDevices["virtio7"] = resp.VirtualIODevice7
storageDevices["virtio8"] = resp.VirtualIODevice8
storageDevices["virtio9"] = resp.VirtualIODevice9
storageDevices["virtio10"] = resp.VirtualIODevice10
storageDevices["virtio11"] = resp.VirtualIODevice11
storageDevices["virtio12"] = resp.VirtualIODevice12
storageDevices["virtio13"] = resp.VirtualIODevice13
storageDevices["virtio14"] = resp.VirtualIODevice14
storageDevices["virtio15"] = resp.VirtualIODevice15
for k, v := range storageDevices {
if v != nil {
if currentDiskMap[k] != nil {
if currentDiskMap[k][mkDiskFileID] != nil {
fileID := currentDiskMap[k][mkDiskFileID].(string)
v.FileID = &fileID
}
}
if v.Size == nil {
v.Size = new(types.DiskSize)
}
// defensive copy of the loop variable
iface := k
v.Interface = &iface
}
}
return storageDevices
}
// getDiskDatastores returns a list of the used datastores in a VM.
func getDiskDatastores(vm *vms.GetResponseData, d *schema.ResourceData) []string {
storageDevices := getDiskInfo(vm, d)
datastoresSet := map[string]int{}
for _, diskInfo := range storageDevices {
// Ignore empty storage devices and storage devices (like ide) which may not have any media mounted
if diskInfo == nil || diskInfo.FileVolume == "none" {
continue
}
fileIDParts := strings.Split(diskInfo.FileVolume, ":")
datastoresSet[fileIDParts[0]] = 1
}
if vm.EFIDisk != nil {
fileIDParts := strings.Split(vm.EFIDisk.FileVolume, ":")
datastoresSet[fileIDParts[0]] = 1
}
if vm.TPMState != nil {
fileIDParts := strings.Split(vm.TPMState.FileVolume, ":")
datastoresSet[fileIDParts[0]] = 1
}
datastores := []string{}
for datastore := range datastoresSet {
datastores = append(datastores, datastore)
}
return datastores
}
func getPCIInfo(resp *vms.GetResponseData, _ *schema.ResourceData) map[string]*vms.CustomPCIDevice {
pciDevices := map[string]*vms.CustomPCIDevice{}