0
0
mirror of https://github.com/bpg/terraform-provider-proxmox.git synced 2025-07-05 05:24:01 +00:00

chore: remove unused code (#1236)

Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
Pavel Boldyrev 2024-04-24 20:52:41 -04:00 committed by GitHub
parent dd7bcc700d
commit efd0d7b466
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 83 additions and 271 deletions

View File

@ -14,7 +14,7 @@ jobs:
pull-requests: write
checks: write
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
ref: ${{ github.event.pull_request.head.sha }} # to check out the actual pull request commit, not the merge commit
fetch-depth: 0 # a full history is required for pull request analysis

View File

@ -15,7 +15,7 @@ This repository is a fork of <https://github.com/danitso/terraform-provider-prox
## Compatibility promise
This provider is compatible with the latest version of Proxmox VE (currently 8.1).
This provider is compatible with the latest version of Proxmox VE (currently 8.2).
While it may work with older 7.x versions, it is not guaranteed to do so.
While provider is on version 0.x, it is not guaranteed to be backwards compatible with all previous minor versions.

View File

@ -518,7 +518,7 @@ func (r *apiResolver) Resolve(ctx context.Context, nodeName string) (ssh.Proxmox
tflog.Debug(ctx, fmt.Sprintf("Attempting a DNS lookup of node %q.", nc.NodeName))
ips, err := net.LookupIP(nodeName)
if err != nil {
if err == nil {
for _, ip := range ips {
if ipv4 := ip.To4(); ipv4 != nil {
nodeAddress = ipv4.String()

View File

@ -19,8 +19,7 @@ import (
)
const (
accTestContainerName = "proxmox_virtual_environment_container.test_container"
accTestContainerCloneName = "proxmox_virtual_environment_container.test_container_clone"
accTestContainerName = "proxmox_virtual_environment_container.test_container"
)
//nolint:gochecknoglobals

View File

@ -39,12 +39,3 @@ func CopyInt(i *int) *int {
return IntPtr(*i)
}
// Int64PtrToIntPtr converts an int64 pointer to an int pointer.
func Int64PtrToIntPtr(i *int64) *int {
if i == nil {
return nil
}
return IntPtr(int(*i))
}

View File

@ -1,65 +0,0 @@
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*/
package cluster
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
fw "github.com/bpg/terraform-provider-proxmox/proxmox/firewall"
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
"github.com/bpg/terraform-provider-proxmox/proxmoxtf/datasource/firewall"
)
// FirewallAlias returns a resource that represents a single firewall alias.
func FirewallAlias() *schema.Resource {
return &schema.Resource{
Schema: firewall.AliasSchema(),
ReadContext: invokeFirewallAPI(firewall.AliasRead),
}
}
// FirewallAliases returns a resource that represents firewall aliases.
func FirewallAliases() *schema.Resource {
return &schema.Resource{
Schema: firewall.AliasesSchema(),
ReadContext: invokeFirewallAPI(firewall.AliasesRead),
}
}
// FirewallIPSet returns a resource that represents a single firewall IP set.
func FirewallIPSet() *schema.Resource {
return &schema.Resource{
Schema: firewall.IPSetSchema(),
ReadContext: invokeFirewallAPI(firewall.IPSetRead),
}
}
// FirewallIPSets returns a resource that represents firewall IP sets.
func FirewallIPSets() *schema.Resource {
return &schema.Resource{
Schema: firewall.IPSetsSchema(),
ReadContext: invokeFirewallAPI(firewall.IPSetsRead),
}
}
func invokeFirewallAPI(
f func(context.Context, fw.API, *schema.ResourceData) diag.Diagnostics,
) func(context.Context, *schema.ResourceData, interface{}) diag.Diagnostics {
return func(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
return f(ctx, api.Cluster().Firewall(), d)
}
}

View File

@ -7,12 +7,7 @@
package firewall
import (
"context"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/bpg/terraform-provider-proxmox/proxmox/cluster/firewall"
)
const (
@ -42,81 +37,3 @@ func SecurityGroupSchema() map[string]*schema.Schema {
},
}
}
// SecurityGroupRead reads the security group.
func SecurityGroupRead(ctx context.Context, api firewall.SecurityGroup, d *schema.ResourceData) diag.Diagnostics {
var diags diag.Diagnostics
name := d.Get(mkSecurityGroupName).(string)
allGroups, err := api.ListGroups(ctx)
if err != nil {
return diag.FromErr(err)
}
for _, v := range allGroups {
if v.Group == name {
err = d.Set(mkSecurityGroupName, v.Group)
diags = append(diags, diag.FromErr(err)...)
err = d.Set(mkSecurityGroupComment, v.Comment)
diags = append(diags, diag.FromErr(err)...)
break
}
}
// rules := d.Get(mkRules).([]interface{})
// ruleIDs, err := fw.ListGroupRules(ctx, name)
// if err != nil {
// if strings.Contains(err.Error(), "no such security group") {
// d.SetId("")
// return nil
// }
// return diag.FromErr(err)
// }
// for _, id := range ruleIDs {
// ruleMap := map[string]interface{}{}
// err = readGroupRule(ctx, fw, name, id.Pos, ruleMap)
// if err != nil {
// diags = append(diags, diag.FromErr(err)...)
// } else {
// rules = append(rules, ruleMap)
// }
// }
// if diags.HasError() {
// return diags
// }
// err = d.Set(mkRules, rules)
// diags = append(diags, diag.FromErr(err)...)
d.SetId(name)
return diags
}
// func readGroupRule(
// ctx context.Context,
// fw firewall.API,
// group string,
// pos int,
// ruleMap map[string]interface{},
// ) error {
// rule, err := fw.GetGroupRule(ctx, group, pos)
// if err != nil {
// if strings.Contains(err.Error(), "no such security group") {
// return nil
// }
// return fmt.Errorf("error reading rule %d for group %s: %w", pos, group, err)
// }
//
// baseRuleToMap(&rule.BaseRule, ruleMap)
//
// // pos in the map should be int!
// ruleMap[mkRulePos] = pos
// ruleMap[mkRuleAction] = rule.Action
// ruleMap[mkRuleType] = rule.Type
//
// return nil
// }

View File

@ -7,13 +7,7 @@
package firewall
import (
"context"
"github.com/google/uuid"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/bpg/terraform-provider-proxmox/proxmox/cluster/firewall"
)
const (
@ -31,23 +25,3 @@ func SecurityGroupsSchema() map[string]*schema.Schema {
},
}
}
// SecurityGroupsRead reads the security groups.
func SecurityGroupsRead(ctx context.Context, api firewall.SecurityGroup, d *schema.ResourceData) diag.Diagnostics {
groups, err := api.ListGroups(ctx)
if err != nil {
return diag.FromErr(err)
}
groupNames := make([]interface{}, len(groups))
for i, v := range groups {
groupNames[i] = v.Group
}
d.SetId(uuid.New().String())
err = d.Set(mkSecurityGroupsSecurityGroupNames, groupNames)
return diag.FromErr(err)
}

View File

@ -9,9 +9,10 @@ package datasource
import (
"context"
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
)
const (

View File

@ -15,7 +15,6 @@ import (
)
const (
dvProviderOTP = ""
mkProviderEndpoint = "endpoint"
mkProviderInsecure = "insecure"
mkProviderMinTLS = "min_tls"

View File

@ -934,7 +934,7 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -961,7 +961,7 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
vmID := d.Get(mkVMID).(int)
if vmID == -1 {
vmIDNew, e := api.Cluster().GetVMID(ctx)
vmIDNew, e := client.Cluster().GetVMID(ctx)
if e != nil {
return diag.FromErr(e)
}
@ -995,9 +995,9 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
if cloneNodeName != "" && cloneNodeName != nodeName {
cloneBody.TargetNodeName = &nodeName
err = api.Node(cloneNodeName).Container(cloneVMID).CloneContainer(ctx, cloneBody)
err = client.Node(cloneNodeName).Container(cloneVMID).CloneContainer(ctx, cloneBody)
} else {
err = api.Node(nodeName).Container(cloneVMID).CloneContainer(ctx, cloneBody)
err = client.Node(nodeName).Container(cloneVMID).CloneContainer(ctx, cloneBody)
}
if err != nil {
@ -1006,7 +1006,7 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
d.SetId(strconv.Itoa(vmID))
containerAPI := api.Node(nodeName).Container(vmID)
containerAPI := client.Node(nodeName).Container(vmID)
// Wait for the container to be created and its configuration lock to be released.
err = containerAPI.WaitForContainerConfigUnlock(ctx, true)
@ -1312,16 +1312,16 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
nodeName := d.Get(mkNodeName).(string)
resource := Container()
container := Container()
consoleBlock, err := structure.GetSchemaBlock(
resource,
container,
d,
[]string{mkConsole},
0,
@ -1338,7 +1338,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
consoleTTYCount := consoleBlock[mkConsoleTTYCount].(int)
cpuBlock, err := structure.GetSchemaBlock(
resource,
container,
d,
[]string{mkCPU},
0,
@ -1355,7 +1355,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
description := d.Get(mkDescription).(string)
diskBlock, err := structure.GetSchemaBlock(
resource,
container,
d,
[]string{mkDisk},
0,
@ -1378,7 +1378,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
}
}
features, err := containerGetFeatures(resource, d)
features, err := containerGetFeatures(container, d)
if err != nil {
return diag.FromErr(err)
}
@ -1489,7 +1489,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
}
memoryBlock, err := structure.GetSchemaBlock(
resource,
container,
d,
[]string{mkMemory},
0,
@ -1664,7 +1664,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
vmID := d.Get(mkVMID).(int)
if vmID == -1 {
vmIDNew, e := api.Cluster().GetVMID(ctx)
vmIDNew, e := client.Cluster().GetVMID(ctx)
if e != nil {
return diag.FromErr(e)
}
@ -1672,7 +1672,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
vmID = *vmIDNew
}
// Attempt to create the resource using the retrieved values.
// Attempt to create the container using the retrieved values.
createBody := containers.CreateRequestBody{
ConsoleEnabled: &consoleEnabled,
ConsoleMode: &consoleMode,
@ -1734,7 +1734,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
createBody.Tags = &tagsString
}
err = api.Node(nodeName).Container(0).CreateContainer(ctx, &createBody)
err = client.Node(nodeName).Container(0).CreateContainer(ctx, &createBody)
if err != nil {
return diag.FromErr(err)
}
@ -1742,7 +1742,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
d.SetId(strconv.Itoa(vmID))
// Wait for the container's lock to be released.
err = api.Node(nodeName).Container(vmID).WaitForContainerConfigUnlock(ctx, true)
err = client.Node(nodeName).Container(vmID).WaitForContainerConfigUnlock(ctx, true)
if err != nil {
return diag.FromErr(err)
}
@ -1760,7 +1760,7 @@ func containerCreateStart(ctx context.Context, d *schema.ResourceData, m interfa
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -1772,7 +1772,7 @@ func containerCreateStart(ctx context.Context, d *schema.ResourceData, m interfa
return diag.FromErr(err)
}
containerAPI := api.Node(nodeName).Container(vmID)
containerAPI := client.Node(nodeName).Container(vmID)
// Start the container and wait for it to reach a running state before continuing.
err = containerAPI.StartContainer(ctx)
@ -2580,7 +2580,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
config := m.(proxmoxtf.ProviderConfiguration)
api, e := config.GetClient()
client, e := config.GetClient()
if e != nil {
return diag.FromErr(e)
}
@ -2592,7 +2592,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
return diag.FromErr(e)
}
containerAPI := api.Node(nodeName).Container(vmID)
containerAPI := client.Node(nodeName).Container(vmID)
// Prepare the new request object.
updateBody := containers.UpdateRequestBody{
@ -2600,7 +2600,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
}
rebootRequired := false
resource := Container()
container := Container()
// Retrieve the clone argument as the update logic varies for clones.
clone := d.Get(mkClone).([]interface{})
@ -2618,7 +2618,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
// Prepare the new console configuration.
if d.HasChange(mkConsole) {
consoleBlock, err := structure.GetSchemaBlock(
resource,
container,
d,
[]string{mkConsole},
0,
@ -2644,7 +2644,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
// Prepare the new CPU configuration.
if d.HasChange(mkCPU) {
cpuBlock, err := structure.GetSchemaBlock(
resource,
container,
d,
[]string{mkCPU},
0,
@ -2666,7 +2666,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
}
if d.HasChange(mkFeatures) {
features, err := containerGetFeatures(resource, d)
features, err := containerGetFeatures(container, d)
if err != nil {
return diag.FromErr(err)
}
@ -2775,7 +2775,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
// Prepare the new memory configuration.
if d.HasChange(mkMemory) {
memoryBlock, err := structure.GetSchemaBlock(
resource,
container,
d,
[]string{mkMemory},
0,
@ -2946,7 +2946,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
// Prepare the new operating system configuration.
if d.HasChange(mkOperatingSystem) {
operatingSystem, err := structure.GetSchemaBlock(
resource,
container,
d,
[]string{mkOperatingSystem},
0,

View File

@ -35,7 +35,6 @@ import (
)
const (
dvResourceVirtualEnvironmentFileContentType = ""
dvResourceVirtualEnvironmentFileSourceFileChanged = false
dvResourceVirtualEnvironmentFileSourceFileChecksum = ""
dvResourceVirtualEnvironmentFileSourceFileFileName = ""

View File

@ -94,7 +94,7 @@ func Group() *schema.Resource {
func groupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -107,7 +107,7 @@ func groupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) dia
ID: groupID,
}
err = api.Access().CreateGroup(ctx, body)
err = client.Access().CreateGroup(ctx, body)
if err != nil {
return diag.FromErr(err)
}
@ -131,7 +131,7 @@ func groupCreate(ctx context.Context, d *schema.ResourceData, m interface{}) dia
Roles: []string{aclEntry[mkResourceVirtualEnvironmentGroupACLRoleID].(string)},
}
err := api.Access().UpdateACL(ctx, aclBody)
err := client.Access().UpdateACL(ctx, aclBody)
if err != nil {
return diag.FromErr(err)
}
@ -203,7 +203,7 @@ func groupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.
func groupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -215,7 +215,7 @@ func groupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) dia
Comment: &comment,
}
err = api.Access().UpdateGroup(ctx, groupID, body)
err = client.Access().UpdateGroup(ctx, groupID, body)
if err != nil {
return diag.FromErr(err)
}
@ -238,7 +238,7 @@ func groupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) dia
Roles: []string{aclEntry[mkResourceVirtualEnvironmentGroupACLRoleID].(string)},
}
err := api.Access().UpdateACL(ctx, aclBody)
err := client.Access().UpdateACL(ctx, aclBody)
if err != nil {
return diag.FromErr(err)
}
@ -261,7 +261,7 @@ func groupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) dia
Roles: []string{aclEntry[mkResourceVirtualEnvironmentGroupACLRoleID].(string)},
}
err := api.Access().UpdateACL(ctx, aclBody)
err := client.Access().UpdateACL(ctx, aclBody)
if err != nil {
return diag.FromErr(err)
}

View File

@ -103,7 +103,7 @@ func Pool() *schema.Resource {
func poolCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -116,7 +116,7 @@ func poolCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
ID: poolID,
}
err = api.Pool().CreatePool(ctx, body)
err = client.Pool().CreatePool(ctx, body)
if err != nil {
return diag.FromErr(err)
}
@ -185,7 +185,7 @@ func poolRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
func poolUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -197,7 +197,7 @@ func poolUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
Comment: &comment,
}
err = api.Pool().UpdatePool(ctx, poolID, body)
err = client.Pool().UpdatePool(ctx, poolID, body)
if err != nil {
return diag.FromErr(err)
}

View File

@ -53,7 +53,7 @@ func Role() *schema.Resource {
func roleCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -71,7 +71,7 @@ func roleCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
Privileges: customPrivileges,
}
err = api.Access().CreateRole(ctx, body)
err = client.Access().CreateRole(ctx, body)
if err != nil {
return diag.FromErr(err)
}
@ -113,7 +113,7 @@ func roleRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
func roleUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -130,7 +130,7 @@ func roleUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
Privileges: customPrivileges,
}
err = api.Access().UpdateRole(ctx, roleID, body)
err = client.Access().UpdateRole(ctx, roleID, body)
if err != nil {
return diag.FromErr(err)
}

View File

@ -154,7 +154,7 @@ func User() *schema.Resource {
func userCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -197,7 +197,7 @@ func userCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
Password: password,
}
err = api.Access().CreateUser(ctx, body)
err = client.Access().CreateUser(ctx, body)
if err != nil {
return diag.FromErr(err)
}
@ -221,7 +221,7 @@ func userCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
Users: []string{userID},
}
err := api.Access().UpdateACL(ctx, aclBody)
err := client.Access().UpdateACL(ctx, aclBody)
if err != nil {
return diag.FromErr(err)
}
@ -349,7 +349,7 @@ func userRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
func userUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -389,14 +389,14 @@ func userUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
}
userID := d.Id()
err = api.Access().UpdateUser(ctx, userID, body)
err = client.Access().UpdateUser(ctx, userID, body)
if err != nil {
return diag.FromErr(err)
}
if d.HasChange(mkResourceVirtualEnvironmentUserPassword) {
password := d.Get(mkResourceVirtualEnvironmentUserPassword).(string)
err = api.Access().ChangeUserPassword(ctx, userID, password)
err = client.Access().ChangeUserPassword(ctx, userID, password)
if err != nil {
return diag.FromErr(err)
}
@ -420,7 +420,7 @@ func userUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
Users: []string{userID},
}
err := api.Access().UpdateACL(ctx, aclBody)
err := client.Access().UpdateACL(ctx, aclBody)
if err != nil {
return diag.FromErr(err)
}
@ -443,7 +443,7 @@ func userUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
Users: []string{userID},
}
err := api.Access().UpdateACL(ctx, aclBody)
err := client.Access().UpdateACL(ctx, aclBody)
if err != nil {
return diag.FromErr(err)
}

View File

@ -13,10 +13,7 @@ import (
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
var (
rateExpression = regexp.MustCompile(`[1-9][0-9]*/(second|minute|hour|day)`)
ifaceExpression = regexp.MustCompile(`net\d+`)
)
var rateExpression = regexp.MustCompile(`[1-9][0-9]*/(second|minute|hour|day)`)
// FirewallRate returns a schema validation function for a firewall rate.
func FirewallRate() schema.SchemaValidateDiagFunc {
@ -26,14 +23,6 @@ func FirewallRate() schema.SchemaValidateDiagFunc {
))
}
// FirewallIFace returns a schema validation function for a firewall iface.
func FirewallIFace() schema.SchemaValidateDiagFunc {
return validation.ToDiagFunc(validation.StringMatch(
ifaceExpression,
"Must be a valid VM/Container iface key, e.g. 'net0'",
))
}
// FirewallPolicy returns a schema validation function for a firewall policy.
func FirewallPolicy() schema.SchemaValidateDiagFunc {
return validation.ToDiagFunc(validation.StringInSlice(

View File

@ -21,7 +21,6 @@ const (
dvNetworkDeviceModel = "virtio"
dvNetworkDeviceQueues = 0
dvNetworkDeviceRateLimit = 0
dvNetworkDeviceTrunks = ""
dvNetworkDeviceVLANID = 0
mkIPv4Addresses = "ipv4_addresses"

View File

@ -2335,7 +2335,7 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -2553,7 +2553,7 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
vmID := vmIDUntyped.(int)
if !hasVMID {
vmIDNew, e := api.Cluster().GetVMID(ctx)
vmIDNew, e := client.Cluster().GetVMID(ctx)
if e != nil {
return diag.FromErr(e)
}
@ -2685,7 +2685,7 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
}
// Only the root account is allowed to change the CPU architecture, which makes this check necessary.
if api.API().IsRootTicket() ||
if client.API().IsRootTicket() ||
cpuArchitecture != dvCPUArchitecture {
createBody.CPUArchitecture = &cpuArchitecture
}
@ -2740,7 +2740,7 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
createBody.HookScript = &hookScript
}
err = api.Node(nodeName).VM(0).CreateVM(ctx, createBody)
err = client.Node(nodeName).VM(0).CreateVM(ctx, createBody)
if err != nil {
return diag.FromErr(err)
}
@ -2766,7 +2766,7 @@ func vmCreateStart(ctx context.Context, d *schema.ResourceData, m interface{}) d
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -2778,7 +2778,7 @@ func vmCreateStart(ctx context.Context, d *schema.ResourceData, m interface{}) d
return diag.FromErr(err)
}
vmAPI := api.Node(nodeName).VM(vmID)
vmAPI := client.Node(nodeName).VM(vmID)
// Start the virtual machine and wait for it to reach a running state before continuing.
if diags := vmStart(ctx, vmAPI, d); diags != nil {
@ -3415,7 +3415,7 @@ func vmReadCustom(
) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, e := config.GetClient()
client, e := config.GetClient()
if e != nil {
return diag.FromErr(e)
}
@ -3593,7 +3593,7 @@ func vmReadCustom(
} else {
// Default value of "arch" is "" according to the API documentation.
// However, assume the provider's default value as a workaround when the root account is not being used.
if !api.API().IsRootTicket() {
if !client.API().IsRootTicket() {
cpu[mkCPUArchitecture] = dvCPUArchitecture
} else {
cpu[mkCPUArchitecture] = ""
@ -3728,7 +3728,7 @@ func vmReadCustom(
allDiskInfo := disk.GetInfo(vmConfig, d)
diags = append(diags, disk.Read(ctx, d, allDiskInfo, vmID, api, nodeName, len(clone) > 0)...)
diags = append(diags, disk.Read(ctx, d, allDiskInfo, vmID, client, nodeName, len(clone) > 0)...)
if vmConfig.EFIDisk != nil {
efiDisk := map[string]interface{}{}
@ -3742,7 +3742,7 @@ func vmReadCustom(
} else {
// disk format may not be returned by config API if it is default for the storage, and that may be different
// from the default qcow2, so we need to read it from the storage API to make sure we have the correct value
volume, err := api.Node(nodeName).Storage(fileIDParts[0]).GetDatastoreFile(ctx, vmConfig.EFIDisk.FileVolume)
volume, err := client.Node(nodeName).Storage(fileIDParts[0]).GetDatastoreFile(ctx, vmConfig.EFIDisk.FileVolume)
if err != nil {
diags = append(diags, diag.FromErr(e)...)
} else {
@ -4467,7 +4467,7 @@ func vmReadCustom(
}
}
vmAPI := api.Node(nodeName).VM(vmID)
vmAPI := client.Node(nodeName).VM(vmID)
started := d.Get(mkStarted).(bool)
agentTimeout, e := getAgentTimeout(d)
@ -4713,7 +4713,7 @@ func vmUpdatePool(
func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, e := config.GetClient()
client, e := config.GetClient()
if e != nil {
return diag.FromErr(e)
}
@ -4726,7 +4726,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
return diag.FromErr(e)
}
e = vmUpdatePool(ctx, d, api.Pool(), vmID)
e = vmUpdatePool(ctx, d, client.Pool(), vmID)
if e != nil {
return diag.FromErr(e)
}
@ -4740,7 +4740,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
oldNodeNameValue, _ := d.GetChange(mkNodeName)
oldNodeName := oldNodeNameValue.(string)
vmAPI := api.Node(oldNodeName).VM(vmID)
vmAPI := client.Node(oldNodeName).VM(vmID)
trueValue := types.CustomBool(true)
migrateBody := &vms.MigrateRequestBody{
@ -4755,7 +4755,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
}
}
vmAPI := api.Node(nodeName).VM(vmID)
vmAPI := client.Node(nodeName).VM(vmID)
updateBody := &vms.UpdateRequestBody{
IDEDevices: vms.CustomStorageDevices{
@ -4994,7 +4994,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
cpuAffinity := cpuBlock[mkCPUAffinity].(string)
// Only the root account is allowed to change the CPU architecture, which makes this check necessary.
if api.API().IsRootTicket() ||
if client.API().IsRootTicket() ||
cpuArchitecture != dvCPUArchitecture {
updateBody.CPUArchitecture = &cpuArchitecture
}
@ -5359,7 +5359,7 @@ func vmUpdateDiskLocationAndSize(
) diag.Diagnostics {
config := m.(proxmoxtf.ProviderConfiguration)
api, err := config.GetClient()
client, err := config.GetClient()
if err != nil {
return diag.FromErr(err)
}
@ -5373,7 +5373,7 @@ func vmUpdateDiskLocationAndSize(
return diag.FromErr(err)
}
vmAPI := api.Node(nodeName).VM(vmID)
vmAPI := client.Node(nodeName).VM(vmID)
// Determine if any of the disks are changing location and/or size, and initiate the necessary actions.
//nolint: nestif

9
qodana.yaml Normal file
View File

@ -0,0 +1,9 @@
version: "1.0"
linter: jetbrains/qodana-go:2023.3
include:
- name: CheckDependencyLicenses
exclude:
- name: All
paths:
- CONTRIBUTORS.md
- README.md