mirror of
https://github.com/bpg/terraform-provider-proxmox.git
synced 2025-07-03 03:52:58 +00:00
feat: add import support for a lot of resources (#390)
* add import support for a lot of resources * fix lints * set user_id after importing (cherry picked from commit c3d09ed00f6e1d7b0bb3ab01a1fc5c81510aa2e1) * add tests, fix empty ID * add import docs * fix lint
This commit is contained in:
parent
feac6b0128
commit
4147ff6a29
@ -43,3 +43,15 @@ resource "proxmox_virtual_environment_cluster_firewall" "example" {
|
|||||||
## Attribute Reference
|
## Attribute Reference
|
||||||
|
|
||||||
There are no additional attributes available for this resource.
|
There are no additional attributes available for this resource.
|
||||||
|
|
||||||
|
## Important Notes
|
||||||
|
|
||||||
|
Be careful not to use this resource multiple times for the same node.
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported without an ID, but you still need to pass one, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_cluster_firewall.example example
|
||||||
|
```
|
||||||
|
@ -86,3 +86,11 @@ resource "proxmox_virtual_environment_cluster_firewall_security_group" "webserve
|
|||||||
- `pos` - Position of the rule in the list.
|
- `pos` - Position of the rule in the list.
|
||||||
|
|
||||||
There are no attribute references available for this resource.
|
There are no attribute references available for this resource.
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `name`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_cluster_firewall_security_group.webserver webserver
|
||||||
|
```
|
||||||
|
@ -181,3 +181,11 @@ output "ubuntu_container_public_key" {
|
|||||||
## Attribute Reference
|
## Attribute Reference
|
||||||
|
|
||||||
There are no additional attributes available for this resource.
|
There are no additional attributes available for this resource.
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `node_name` and the `vm_id`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_container.ubuntu_container first-node/1234
|
||||||
|
```
|
||||||
|
@ -42,3 +42,11 @@ There are no additional attributes available for this resource.
|
|||||||
## Important Notes
|
## Important Notes
|
||||||
|
|
||||||
Be careful not to use this resource multiple times for the same node.
|
Be careful not to use this resource multiple times for the same node.
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `node_name`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_dns.first_node first-node
|
||||||
|
```
|
||||||
|
@ -70,7 +70,7 @@ EOF
|
|||||||
- `file_name` - (Optional) The file name to use instead of the source file
|
- `file_name` - (Optional) The file name to use instead of the source file
|
||||||
name.
|
name.
|
||||||
- `insecure` - (Optional) Whether to skip the TLS verification step for
|
- `insecure` - (Optional) Whether to skip the TLS verification step for
|
||||||
HTTPSsources (defaults to `false`).
|
HTTPS sources (defaults to `false`).
|
||||||
- `path` - (Required) A path to a local file or a URL.
|
- `path` - (Required) A path to a local file or a URL.
|
||||||
- `source_raw` - (Optional) The raw source (conflicts with `source_file`).
|
- `source_raw` - (Optional) The raw source (conflicts with `source_file`).
|
||||||
- `data` - (Required) The raw data.
|
- `data` - (Required) The raw data.
|
||||||
@ -93,3 +93,11 @@ file locally before uploading it.
|
|||||||
You must ensure that you have at least `Size-in-MB * 2 + 1` MB of storage space
|
You must ensure that you have at least `Size-in-MB * 2 + 1` MB of storage space
|
||||||
available (twice the size plus overhead because a multipart payload needs to be
|
available (twice the size plus overhead because a multipart payload needs to be
|
||||||
created as another temporary file).
|
created as another temporary file).
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `node_name`, `datastore_id`, `content_type` and the `file_name`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_file.cloud_config pve/local/snippets/example.cloud-config.yaml
|
||||||
|
```
|
||||||
|
@ -32,3 +32,11 @@ resource "proxmox_virtual_environment_group" "operations_team" {
|
|||||||
## Attribute Reference
|
## Attribute Reference
|
||||||
|
|
||||||
- `members` - The group members as a list of `username@realm` entries
|
- `members` - The group members as a list of `username@realm` entries
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `group_id`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_group.operations_team operations-team
|
||||||
|
```
|
||||||
|
@ -42,3 +42,15 @@ resource "proxmox_virtual_environment_hosts" "first_node_host_entries" {
|
|||||||
- `entries` - The host entries (conversion of `addresses` and `hostnames` into
|
- `entries` - The host entries (conversion of `addresses` and `hostnames` into
|
||||||
objects).
|
objects).
|
||||||
- `hostnames` - The hostnames associated with each of the IP addresses.
|
- `hostnames` - The hostnames associated with each of the IP addresses.
|
||||||
|
|
||||||
|
## Important Notes
|
||||||
|
|
||||||
|
Be careful not to use this resource multiple times for the same node.
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `node_name`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_hosts.first_node_host_entries first-node
|
||||||
|
```
|
||||||
|
@ -33,3 +33,11 @@ resource "proxmox_virtual_environment_pool" "operations_pool" {
|
|||||||
- `node_name` - The node name.
|
- `node_name` - The node name.
|
||||||
- `type` - The member type.
|
- `type` - The member type.
|
||||||
- `vm_id` - The virtual machine identifier.
|
- `vm_id` - The virtual machine identifier.
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `pool_id`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_pool.operations_pool operations-pool
|
||||||
|
```
|
||||||
|
@ -31,3 +31,11 @@ resource "proxmox_virtual_environment_role" "operations_monitoring" {
|
|||||||
## Attribute Reference
|
## Attribute Reference
|
||||||
|
|
||||||
There are no additional attributes available for this resource.
|
There are no additional attributes available for this resource.
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `role_id`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_role.operations_monitoring operations-monitoring
|
||||||
|
```
|
||||||
|
@ -29,3 +29,11 @@ resource "proxmox_virtual_environment_time" "first_node_time" {
|
|||||||
|
|
||||||
- `local_time` - The node's local time.
|
- `local_time` - The node's local time.
|
||||||
- `utc_time` - The node's local time formatted as UTC.
|
- `utc_time` - The node's local time formatted as UTC.
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `node_name`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_dns.first_node first-node
|
||||||
|
```
|
||||||
|
@ -55,3 +55,11 @@ resource "proxmox_virtual_environment_role" "operations_monitoring" {
|
|||||||
## Attribute Reference
|
## Attribute Reference
|
||||||
|
|
||||||
There are no additional attributes available for this resource.
|
There are no additional attributes available for this resource.
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `user_id`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_user.operations_automation operations-automation@pve
|
||||||
|
```
|
||||||
|
@ -462,3 +462,11 @@ to force the migration step to migrate all disks to a specific datastore on the
|
|||||||
target node. If you need certain disks to be on specific datastores, set
|
target node. If you need certain disks to be on specific datastores, set
|
||||||
the `datastore_id` argument of the disks in the `disks` block to move the disks
|
the `datastore_id` argument of the disks in the `disks` block to move the disks
|
||||||
to the correct datastore after the cloning and migrating succeeded.
|
to the correct datastore after the cloning and migrating succeeded.
|
||||||
|
|
||||||
|
## Import
|
||||||
|
|
||||||
|
Instances can be imported using the `node_name` and the `vm_id`, e.g.,
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ terraform import proxmox_virtual_environment_vm.ubuntu_vm first-node/4321
|
||||||
|
```
|
||||||
|
@ -107,6 +107,9 @@ func Firewall() *schema.Resource {
|
|||||||
ReadContext: selectFirewallAPI(firewallRead),
|
ReadContext: selectFirewallAPI(firewallRead),
|
||||||
UpdateContext: selectFirewallAPI(firewallUpdate),
|
UpdateContext: selectFirewallAPI(firewallUpdate),
|
||||||
DeleteContext: selectFirewallAPI(firewallDelete),
|
DeleteContext: selectFirewallAPI(firewallDelete),
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: schema.ImportStatePassthroughContext,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,6 +50,9 @@ func SecurityGroup() *schema.Resource {
|
|||||||
ReadContext: selectFirewallAPI(SecurityGroupRead),
|
ReadContext: selectFirewallAPI(SecurityGroupRead),
|
||||||
UpdateContext: selectFirewallAPI(SecurityGroupUpdate),
|
UpdateContext: selectFirewallAPI(SecurityGroupUpdate),
|
||||||
DeleteContext: selectFirewallAPI(SecurityGroupDelete),
|
DeleteContext: selectFirewallAPI(SecurityGroupDelete),
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: schema.ImportStatePassthroughContext,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -643,6 +643,22 @@ func Container() *schema.Resource {
|
|||||||
ReadContext: containerRead,
|
ReadContext: containerRead,
|
||||||
UpdateContext: containerUpdate,
|
UpdateContext: containerUpdate,
|
||||||
DeleteContext: containerDelete,
|
DeleteContext: containerDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: func(ctx context.Context, d *schema.ResourceData, i interface{}) ([]*schema.ResourceData, error) {
|
||||||
|
node, id, err := parseImportIDWithNodeName(d.Id())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(id)
|
||||||
|
err = d.Set(mkResourceVirtualEnvironmentContainerNodeName, node)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed setting state during import: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return []*schema.ResourceData{d}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,6 +54,20 @@ func DNS() *schema.Resource {
|
|||||||
ReadContext: dnsRead,
|
ReadContext: dnsRead,
|
||||||
UpdateContext: dnsUpdate,
|
UpdateContext: dnsUpdate,
|
||||||
DeleteContext: dnsDelete,
|
DeleteContext: dnsDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: func(ctx context.Context, d *schema.ResourceData, i interface{}) ([]*schema.ResourceData, error) {
|
||||||
|
nodeName := d.Id()
|
||||||
|
|
||||||
|
err := d.Set(mkResourceVirtualEnvironmentDNSNodeName, nodeName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed setting state during import: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(fmt.Sprintf("%s_dns", nodeName))
|
||||||
|
|
||||||
|
return []*schema.ResourceData{d}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,9 +193,41 @@ func File() *schema.Resource {
|
|||||||
CreateContext: fileCreate,
|
CreateContext: fileCreate,
|
||||||
ReadContext: fileRead,
|
ReadContext: fileRead,
|
||||||
DeleteContext: fileDelete,
|
DeleteContext: fileDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: func(ctx context.Context, d *schema.ResourceData, i interface{}) ([]*schema.ResourceData, error) {
|
||||||
|
node, datastore, volumeID, err := fileParseImportID(d.Id())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(volumeID)
|
||||||
|
|
||||||
|
err = d.Set(mkResourceVirtualEnvironmentFileNodeName, node)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed setting state during import: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = d.Set(mkResourceVirtualEnvironmentFileDatastoreID, datastore)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed setting state during import: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return []*schema.ResourceData{d}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func fileParseImportID(id string) (string, string, string, error) {
|
||||||
|
parts := strings.SplitN(id, "/", 4)
|
||||||
|
|
||||||
|
if len(parts) != 4 || parts[0] == "" || parts[1] == "" || parts[2] == "" || parts[3] == "" {
|
||||||
|
return "", "", "", fmt.Errorf("unexpected format of ID (%s), expected node/datastore_id/content_type/file_name", id)
|
||||||
|
}
|
||||||
|
|
||||||
|
return parts[0], parts[1], strings.Join(parts[2:], "/"), nil
|
||||||
|
}
|
||||||
|
|
||||||
func fileCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func fileCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
var diags diag.Diagnostics
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmoxtf/test"
|
"github.com/bpg/terraform-provider-proxmox/proxmoxtf/test"
|
||||||
)
|
)
|
||||||
@ -98,3 +99,41 @@ func TestFileSchema(t *testing.T) {
|
|||||||
mkResourceVirtualEnvironmentFileSourceRawResize: schema.TypeInt,
|
mkResourceVirtualEnvironmentFileSourceRawResize: schema.TypeInt,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_fileParseImportID(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
value string
|
||||||
|
valid bool
|
||||||
|
expectedNodeName string
|
||||||
|
expectedDatastoreID string
|
||||||
|
expectedVolumeID string
|
||||||
|
}{
|
||||||
|
{"empty", "", false, "", "", ""},
|
||||||
|
{"missing slash", "invalid", false, "", "", ""},
|
||||||
|
{"missing parts", "invalid/invalid/invalid", false, "", "", ""},
|
||||||
|
{"valid", "node/datastore_id/content_type/file_name", true, "node", "datastore_id", "content_type/file_name"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
nodeName, datastoreID, volumeID, err := fileParseImportID(tt.value)
|
||||||
|
|
||||||
|
if !tt.valid {
|
||||||
|
require.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Nil(err)
|
||||||
|
require.Equal(tt.expectedNodeName, nodeName)
|
||||||
|
require.Equal(tt.expectedDatastoreID, datastoreID)
|
||||||
|
require.Equal(tt.expectedVolumeID, volumeID)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -85,6 +85,9 @@ func Group() *schema.Resource {
|
|||||||
ReadContext: groupRead,
|
ReadContext: groupRead,
|
||||||
UpdateContext: groupUpdate,
|
UpdateContext: groupUpdate,
|
||||||
DeleteContext: groupDelete,
|
DeleteContext: groupDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: schema.ImportStatePassthroughContext,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,6 +107,20 @@ func Hosts() *schema.Resource {
|
|||||||
ReadContext: hostsRead,
|
ReadContext: hostsRead,
|
||||||
UpdateContext: hostsUpdate,
|
UpdateContext: hostsUpdate,
|
||||||
DeleteContext: hostsDelete,
|
DeleteContext: hostsDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: func(ctx context.Context, d *schema.ResourceData, i interface{}) ([]*schema.ResourceData, error) {
|
||||||
|
nodeName := d.Id()
|
||||||
|
|
||||||
|
err := d.Set(mkResourceVirtualEnvironmentHostsNodeName, nodeName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed setting state during import: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(fmt.Sprintf("%s_hosts", nodeName))
|
||||||
|
|
||||||
|
return []*schema.ResourceData{d}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,6 +85,9 @@ func Pool() *schema.Resource {
|
|||||||
ReadContext: poolRead,
|
ReadContext: poolRead,
|
||||||
UpdateContext: poolUpdate,
|
UpdateContext: poolUpdate,
|
||||||
DeleteContext: poolDelete,
|
DeleteContext: poolDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: schema.ImportStatePassthroughContext,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -44,6 +44,9 @@ func Role() *schema.Resource {
|
|||||||
ReadContext: roleRead,
|
ReadContext: roleRead,
|
||||||
UpdateContext: roleUpdate,
|
UpdateContext: roleUpdate,
|
||||||
DeleteContext: roleDelete,
|
DeleteContext: roleDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: schema.ImportStatePassthroughContext,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,6 +54,20 @@ func Time() *schema.Resource {
|
|||||||
ReadContext: timeRead,
|
ReadContext: timeRead,
|
||||||
UpdateContext: timeUpdate,
|
UpdateContext: timeUpdate,
|
||||||
DeleteContext: timeDelete,
|
DeleteContext: timeDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: func(ctx context.Context, d *schema.ResourceData, i interface{}) ([]*schema.ResourceData, error) {
|
||||||
|
nodeName := d.Id()
|
||||||
|
|
||||||
|
err := d.Set(mkResourceVirtualEnvironmentTimeNodeName, nodeName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed setting state during import: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(fmt.Sprintf("%s_time", nodeName))
|
||||||
|
|
||||||
|
return []*schema.ResourceData{d}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -144,6 +144,9 @@ func User() *schema.Resource {
|
|||||||
ReadContext: userRead,
|
ReadContext: userRead,
|
||||||
UpdateContext: userUpdate,
|
UpdateContext: userUpdate,
|
||||||
DeleteContext: userDelete,
|
DeleteContext: userDelete,
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: schema.ImportStatePassthroughContext,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,6 +251,11 @@ func userRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
|
err = d.Set(mkResourceVirtualEnvironmentUserUserID, userID)
|
||||||
|
diags = append(diags, diag.FromErr(err)...)
|
||||||
|
|
||||||
var aclParsed []interface{}
|
var aclParsed []interface{}
|
||||||
|
|
||||||
for _, v := range acl {
|
for _, v := range acl {
|
||||||
@ -268,8 +276,6 @@ func userRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var diags diag.Diagnostics
|
|
||||||
|
|
||||||
err = d.Set(mkResourceVirtualEnvironmentUserACL, aclParsed)
|
err = d.Set(mkResourceVirtualEnvironmentUserACL, aclParsed)
|
||||||
diags = append(diags, diag.FromErr(err)...)
|
diags = append(diags, diag.FromErr(err)...)
|
||||||
|
|
||||||
|
@ -7,7 +7,6 @@
|
|||||||
package resource
|
package resource
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
@ -16,9 +15,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"github.com/hashicorp/go-multierror"
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||||
|
|
||||||
@ -561,22 +557,12 @@ func getCloudInitTypeValidator() schema.SchemaValidateDiagFunc {
|
|||||||
}, false))
|
}, false))
|
||||||
}
|
}
|
||||||
|
|
||||||
type ErrorDiags diag.Diagnostics
|
func parseImportIDWithNodeName(id string) (string, string, error) {
|
||||||
|
nodeName, id, found := strings.Cut(id, "/")
|
||||||
|
|
||||||
func (diags ErrorDiags) Errors() []error {
|
if !found {
|
||||||
var es []error
|
return "", "", fmt.Errorf("unexpected format of ID (%s), expected node/id", id)
|
||||||
for i := range diags {
|
|
||||||
if diags[i].Severity == diag.Error {
|
|
||||||
s := fmt.Sprintf("Error: %s", diags[i].Summary)
|
|
||||||
if diags[i].Detail != "" {
|
|
||||||
s = fmt.Sprintf("%s: %s", s, diags[i].Detail)
|
|
||||||
}
|
|
||||||
es = append(es, errors.New(s))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return es
|
|
||||||
}
|
|
||||||
|
|
||||||
func (diags ErrorDiags) Error() string {
|
return nodeName, id, nil
|
||||||
return multierror.ListFormatFunc(diags.Errors())
|
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,8 @@ package resource
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_getCPUTypeValidator(t *testing.T) {
|
func Test_getCPUTypeValidator(t *testing.T) {
|
||||||
@ -29,11 +31,51 @@ func Test_getCPUTypeValidator(t *testing.T) {
|
|||||||
tt := tt
|
tt := tt
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
f := getCPUTypeValidator()
|
f := getCPUTypeValidator()
|
||||||
res := f(tt.value, nil)
|
res := f(tt.value, nil)
|
||||||
if !res.HasError() != tt.valid {
|
|
||||||
t.Errorf("validate: '%s', want %v got %v", tt.value, tt.valid, res)
|
if tt.valid {
|
||||||
|
require.Empty(res, "validate: '%s'", tt.value)
|
||||||
|
} else {
|
||||||
|
require.NotEmpty(res, "validate: '%s'", tt.value)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_parseImportIDWIthNodeName(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
value string
|
||||||
|
valid bool
|
||||||
|
expectedNodeName string
|
||||||
|
expectedID string
|
||||||
|
}{
|
||||||
|
{"empty", "", false, "", ""},
|
||||||
|
{"missing slash", "invalid", false, "", ""},
|
||||||
|
{"valid", "host/id", true, "host", "id"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
tt := tt
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
require := require.New(t)
|
||||||
|
|
||||||
|
nodeName, id, err := parseImportIDWithNodeName(tt.value)
|
||||||
|
|
||||||
|
if !tt.valid {
|
||||||
|
require.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Nil(err)
|
||||||
|
require.Equal(tt.expectedNodeName, nodeName)
|
||||||
|
require.Equal(tt.expectedID, id)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1344,6 +1344,22 @@ func VM() *schema.Resource {
|
|||||||
},
|
},
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
|
Importer: &schema.ResourceImporter{
|
||||||
|
StateContext: func(ctx context.Context, d *schema.ResourceData, i interface{}) ([]*schema.ResourceData, error) {
|
||||||
|
node, id, err := parseImportIDWithNodeName(d.Id())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(id)
|
||||||
|
err = d.Set(mkResourceVirtualEnvironmentVMNodeName, node)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed setting state during import: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return []*schema.ResourceData{d}, nil
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user