mirror of
https://github.com/bpg/terraform-provider-proxmox.git
synced 2025-07-04 21:14:05 +00:00
fix(vm,lxc,file): improve timeouts handling (#1222)
Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
parent
7e4710d3f0
commit
0c9c2066fd
@ -215,7 +215,9 @@ output "ubuntu_container_public_key" {
|
|||||||
meta-argument to ignore changes to this attribute.
|
meta-argument to ignore changes to this attribute.
|
||||||
- `template` - (Optional) Whether to create a template (defaults to `false`).
|
- `template` - (Optional) Whether to create a template (defaults to `false`).
|
||||||
- `timeout_create` - (Optional) Timeout for creating a container in seconds (defaults to 1800).
|
- `timeout_create` - (Optional) Timeout for creating a container in seconds (defaults to 1800).
|
||||||
- `timeout_start` - (Optional) Timeout for starting a container in seconds (defaults to 300).
|
- `timeout_clone` - (Optional) Timeout for cloning a container in seconds (defaults to 1800).
|
||||||
|
- `timeout_delete` - (Optional) Timeout for deleting a container in seconds (defaults to 60).
|
||||||
|
- `timeout_update` - (Optional) Timeout for updating a container in seconds (defaults to 1800).
|
||||||
- `unprivileged` - (Optional) Whether the container runs as unprivileged on
|
- `unprivileged` - (Optional) Whether the container runs as unprivileged on
|
||||||
the host (defaults to `false`).
|
the host (defaults to `false`).
|
||||||
- `vm_id` - (Optional) The container identifier
|
- `vm_id` - (Optional) The container identifier
|
||||||
|
@ -515,8 +515,6 @@ output "ubuntu_vm_public_key" {
|
|||||||
1800).
|
1800).
|
||||||
- `timeout_create` - (Optional) Timeout for creating a VM in seconds (defaults to
|
- `timeout_create` - (Optional) Timeout for creating a VM in seconds (defaults to
|
||||||
1800).
|
1800).
|
||||||
- `timeout_move_disk` - (Optional) Timeout for moving the disk of a VM in
|
|
||||||
seconds (defaults to 1800).
|
|
||||||
- `timeout_migrate` - (Optional) Timeout for migrating the VM (defaults to
|
- `timeout_migrate` - (Optional) Timeout for migrating the VM (defaults to
|
||||||
1800).
|
1800).
|
||||||
- `timeout_reboot` - (Optional) Timeout for rebooting a VM in seconds (defaults
|
- `timeout_reboot` - (Optional) Timeout for rebooting a VM in seconds (defaults
|
||||||
|
@ -8,10 +8,12 @@ package fwprovider
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
|
"github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
|
||||||
"github.com/hashicorp/terraform-plugin-framework/path"
|
"github.com/hashicorp/terraform-plugin-framework/path"
|
||||||
@ -26,6 +28,7 @@ import (
|
|||||||
"github.com/hashicorp/terraform-plugin-framework/types"
|
"github.com/hashicorp/terraform-plugin-framework/types"
|
||||||
|
|
||||||
"github.com/bpg/terraform-provider-proxmox/fwprovider/structure"
|
"github.com/bpg/terraform-provider-proxmox/fwprovider/structure"
|
||||||
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
|
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox"
|
"github.com/bpg/terraform-provider-proxmox/proxmox"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes"
|
||||||
@ -361,6 +364,11 @@ func (r *downloadFileResource) Create(
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
timeout := time.Duration(plan.UploadTimeout.ValueInt64()) * time.Second
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
fileMetadata, err := r.getURLMetadata(
|
fileMetadata, err := r.getURLMetadata(
|
||||||
ctx,
|
ctx,
|
||||||
&plan,
|
&plan,
|
||||||
@ -394,27 +402,20 @@ func (r *downloadFileResource) Create(
|
|||||||
}
|
}
|
||||||
|
|
||||||
storageClient := nodesClient.Storage(plan.Storage.ValueString())
|
storageClient := nodesClient.Storage(plan.Storage.ValueString())
|
||||||
err = storageClient.DownloadFileByURL(
|
|
||||||
ctx,
|
err = storageClient.DownloadFileByURL(ctx, &downloadFileReq)
|
||||||
&downloadFileReq,
|
|
||||||
plan.UploadTimeout.ValueInt64(),
|
|
||||||
)
|
|
||||||
|
|
||||||
if isErrFileAlreadyExists(err) && plan.OverwriteUnmanaged.ValueBool() {
|
if isErrFileAlreadyExists(err) && plan.OverwriteUnmanaged.ValueBool() {
|
||||||
fileID := plan.Content.ValueString() + "/" + plan.FileName.ValueString()
|
fileID := plan.Content.ValueString() + "/" + plan.FileName.ValueString()
|
||||||
|
|
||||||
err = storageClient.DeleteDatastoreFile(ctx, fileID)
|
err = storageClient.DeleteDatastoreFile(ctx, fileID)
|
||||||
if err != nil {
|
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
resp.Diagnostics.AddError("Error deleting file from datastore",
|
resp.Diagnostics.AddError("Error deleting file from datastore",
|
||||||
fmt.Sprintf("Could not delete file '%s', unexpected error: %s", fileID, err.Error()),
|
fmt.Sprintf("Could not delete file '%s', unexpected error: %s", fileID, err.Error()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storageClient.DownloadFileByURL(
|
err = storageClient.DownloadFileByURL(ctx, &downloadFileReq)
|
||||||
ctx,
|
|
||||||
&downloadFileReq,
|
|
||||||
plan.UploadTimeout.ValueInt64(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -610,7 +611,7 @@ func (r *downloadFileResource) Delete(
|
|||||||
ctx,
|
ctx,
|
||||||
state.ID.ValueString(),
|
state.ID.ValueString(),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
if strings.Contains(err.Error(), "unable to parse") {
|
if strings.Contains(err.Error(), "unable to parse") {
|
||||||
resp.Diagnostics.AddWarning(
|
resp.Diagnostics.AddWarning(
|
||||||
"Datastore file does not exists",
|
"Datastore file does not exists",
|
||||||
|
@ -13,6 +13,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func TestAccDatasourceNode(t *testing.T) {
|
func TestAccDatasourceNode(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
te := initTestEnvironment(t)
|
te := initTestEnvironment(t)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
|
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
|
||||||
"github.com/hashicorp/terraform-plugin-testing/terraform"
|
"github.com/hashicorp/terraform-plugin-testing/terraform"
|
||||||
@ -104,7 +105,10 @@ func testAccResourceContainerCreateCheck(te *testEnvironment) resource.TestCheck
|
|||||||
return resource.ComposeTestCheckFunc(
|
return resource.ComposeTestCheckFunc(
|
||||||
resource.TestCheckResourceAttr(accTestContainerName, "description", "my\ndescription\nvalue\n"),
|
resource.TestCheckResourceAttr(accTestContainerName, "description", "my\ndescription\nvalue\n"),
|
||||||
func(*terraform.State) error {
|
func(*terraform.State) error {
|
||||||
err := te.nodeClient().Container(accTestContainerID).WaitForContainerStatus(context.Background(), "running", 10, 1)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
err := te.nodeClient().Container(accTestContainerID).WaitForContainerStatus(ctx, "running")
|
||||||
require.NoError(te.t, err, "container did not start")
|
require.NoError(te.t, err, "container did not start")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -137,7 +141,10 @@ func testAccResourceContainerCreateCloneCheck(te *testEnvironment) resource.Test
|
|||||||
|
|
||||||
return resource.ComposeTestCheckFunc(
|
return resource.ComposeTestCheckFunc(
|
||||||
func(*terraform.State) error {
|
func(*terraform.State) error {
|
||||||
err := te.nodeClient().Container(accCloneContainerID).WaitForContainerStatus(context.Background(), "running", 10, 1)
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
err := te.nodeClient().Container(accCloneContainerID).WaitForContainerStatus(ctx, "running")
|
||||||
require.NoError(te.t, err, "container did not start")
|
require.NoError(te.t, err, "container did not start")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -9,6 +9,7 @@ package tests
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
|
"github.com/hashicorp/terraform-plugin-testing/helper/resource"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -126,17 +127,23 @@ func TestAccResourceDownloadFile(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
{"override unmanaged file", []resource.TestStep{{
|
{"override unmanaged file", []resource.TestStep{{
|
||||||
PreConfig: func() {
|
PreConfig: func() {
|
||||||
err := te.nodeStorageClient().DownloadFileByURL(context.Background(), &storage.DownloadURLPostRequestBody{
|
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
_ = te.nodeStorageClient().DeleteDatastoreFile(ctx, "iso/fake_file.iso") //nolint: errcheck
|
||||||
|
|
||||||
|
err := te.nodeStorageClient().DownloadFileByURL(ctx, &storage.DownloadURLPostRequestBody{
|
||||||
Content: types.StrPtr("iso"),
|
Content: types.StrPtr("iso"),
|
||||||
FileName: types.StrPtr("fake_file.iso"),
|
FileName: types.StrPtr("fake_file.iso"),
|
||||||
Node: types.StrPtr(te.nodeName),
|
Node: types.StrPtr(te.nodeName),
|
||||||
Storage: types.StrPtr(te.datastoreID),
|
Storage: types.StrPtr(te.datastoreID),
|
||||||
URL: types.StrPtr(fakeFileISO),
|
URL: types.StrPtr(fakeFileISO),
|
||||||
}, 600)
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
t.Cleanup(func() {
|
t.Cleanup(func() {
|
||||||
err := te.nodeStorageClient().DeleteDatastoreFile(context.Background(), "iso/fake_file.iso")
|
e := te.nodeStorageClient().DeleteDatastoreFile(context.Background(), "iso/fake_file.iso")
|
||||||
require.NoError(t, err)
|
require.NoError(t, e)
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
Config: te.renderConfig(`
|
Config: te.renderConfig(`
|
||||||
|
@ -311,6 +311,11 @@ func (c *client) HTTP() *http.Client {
|
|||||||
// validateResponseCode ensures that a response is valid.
|
// validateResponseCode ensures that a response is valid.
|
||||||
func validateResponseCode(res *http.Response) error {
|
func validateResponseCode(res *http.Response) error {
|
||||||
if res.StatusCode < 200 || res.StatusCode >= 300 {
|
if res.StatusCode < 200 || res.StatusCode >= 300 {
|
||||||
|
if res.StatusCode == http.StatusNotFound ||
|
||||||
|
(res.StatusCode == http.StatusInternalServerError && strings.Contains(res.Status, "does not exist")) {
|
||||||
|
return ErrResourceDoesNotExist
|
||||||
|
}
|
||||||
|
|
||||||
msg := strings.TrimPrefix(res.Status, fmt.Sprintf("%d ", res.StatusCode))
|
msg := strings.TrimPrefix(res.Status, fmt.Sprintf("%d ", res.StatusCode))
|
||||||
|
|
||||||
errRes := &ErrorResponseBody{}
|
errRes := &ErrorResponseBody{}
|
||||||
|
105
proxmox/api/client_test.go
Normal file
105
proxmox/api/client_test.go
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RoundTripFunc .
|
||||||
|
type RoundTripFunc func(req *http.Request) *http.Response
|
||||||
|
|
||||||
|
// RoundTrip .
|
||||||
|
func (f RoundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||||
|
return f(req), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTestClient returns *http.Client with Transport replaced to avoid making real calls.
|
||||||
|
func newTestClient(fn RoundTripFunc) *http.Client {
|
||||||
|
return &http.Client{
|
||||||
|
Transport: fn,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type dummyAuthenticator struct{}
|
||||||
|
|
||||||
|
func (dummyAuthenticator) IsRoot() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dummyAuthenticator) IsRootTicket() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (dummyAuthenticator) AuthenticateRequest(_ context.Context, _ *http.Request) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestClientDoRequest(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
status string
|
||||||
|
wantErr error
|
||||||
|
}{
|
||||||
|
{name: "no error", status: "200 OK", wantErr: nil},
|
||||||
|
{name: "not exists - 404 status", status: "404 missing", wantErr: ErrResourceDoesNotExist},
|
||||||
|
{name: "not exists - 500 status", status: "500 This thing does not exist", wantErr: ErrResourceDoesNotExist},
|
||||||
|
{name: "500 status", status: "500 Internal Server Error", wantErr: &HTTPError{
|
||||||
|
Code: 500,
|
||||||
|
Message: "Internal Server Error",
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
c := client{
|
||||||
|
conn: &Connection{
|
||||||
|
endpoint: "http://localhost",
|
||||||
|
httpClient: newTestClient(func(_ *http.Request) *http.Response {
|
||||||
|
sc, err := strconv.Atoi(strings.Fields(tt.status)[0])
|
||||||
|
require.NoError(t, err)
|
||||||
|
return &http.Response{
|
||||||
|
Status: tt.status,
|
||||||
|
StatusCode: sc,
|
||||||
|
Body: nil,
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
auth: dummyAuthenticator{},
|
||||||
|
}
|
||||||
|
|
||||||
|
err := c.DoRequest(context.Background(), "POST", "any", nil, nil)
|
||||||
|
fail := false
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case err == nil && tt.wantErr == nil:
|
||||||
|
return
|
||||||
|
case err != nil && tt.wantErr == nil:
|
||||||
|
fallthrough
|
||||||
|
case err == nil && tt.wantErr != nil:
|
||||||
|
fail = true
|
||||||
|
default:
|
||||||
|
var he, we *HTTPError
|
||||||
|
if errors.As(err, &he) && errors.As(tt.wantErr, &we) {
|
||||||
|
fail = !reflect.DeepEqual(he, we)
|
||||||
|
} else {
|
||||||
|
fail = !errors.Is(err, tt.wantErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fail {
|
||||||
|
t.Errorf("DoRequest() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -6,7 +6,9 @@
|
|||||||
|
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import "fmt"
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
// Error is a sentinel error type for API errors.
|
// Error is a sentinel error type for API errors.
|
||||||
type Error string
|
type Error string
|
||||||
@ -18,12 +20,15 @@ func (err Error) Error() string {
|
|||||||
// ErrNoDataObjectInResponse is returned when the server does not include a data object in the response.
|
// ErrNoDataObjectInResponse is returned when the server does not include a data object in the response.
|
||||||
const ErrNoDataObjectInResponse Error = "the server did not include a data object in the response"
|
const ErrNoDataObjectInResponse Error = "the server did not include a data object in the response"
|
||||||
|
|
||||||
|
// ErrResourceDoesNotExist is returned when the requested resource does not exist.
|
||||||
|
const ErrResourceDoesNotExist Error = "the requested resource does not exist"
|
||||||
|
|
||||||
// HTTPError is a generic error type for HTTP errors.
|
// HTTPError is a generic error type for HTTP errors.
|
||||||
type HTTPError struct {
|
type HTTPError struct {
|
||||||
Code int
|
Code int
|
||||||
Message string
|
Message string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err *HTTPError) Error() string {
|
func (err HTTPError) Error() string {
|
||||||
return fmt.Sprintf("received an HTTP %d response - Reason: %s", err.Code, err.Message)
|
return fmt.Sprintf("received an HTTP %d response - Reason: %s", err.Code, err.Message)
|
||||||
}
|
}
|
||||||
|
@ -8,11 +8,14 @@ package containers
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/avast/retry-go/v4"
|
||||||
|
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,13 +30,13 @@ func (c *Client) CloneContainer(ctx context.Context, d *CloneRequestBody) error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateContainer creates a container.
|
// CreateContainer creates a container.
|
||||||
func (c *Client) CreateContainer(ctx context.Context, d *CreateRequestBody, timeout int) error {
|
func (c *Client) CreateContainer(ctx context.Context, d *CreateRequestBody) error {
|
||||||
taskID, err := c.CreateContainerAsync(ctx, d)
|
taskID, err := c.CreateContainerAsync(ctx, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
|
err = c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error waiting for container created: %w", err)
|
return fmt.Errorf("error waiting for container created: %w", err)
|
||||||
}
|
}
|
||||||
@ -120,7 +123,7 @@ func (c *Client) ShutdownContainer(ctx context.Context, d *ShutdownRequestBody)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StartContainer starts a container if is not already running.
|
// StartContainer starts a container if is not already running.
|
||||||
func (c *Client) StartContainer(ctx context.Context, timeout int) error {
|
func (c *Client) StartContainer(ctx context.Context) error {
|
||||||
status, err := c.GetContainerStatus(ctx)
|
status, err := c.GetContainerStatus(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error retrieving container status: %w", err)
|
return fmt.Errorf("error retrieving container status: %w", err)
|
||||||
@ -135,13 +138,13 @@ func (c *Client) StartContainer(ctx context.Context, timeout int) error {
|
|||||||
return fmt.Errorf("error starting container: %w", err)
|
return fmt.Errorf("error starting container: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
|
err = c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error waiting for container start: %w", err)
|
return fmt.Errorf("error waiting for container start: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// the timeout here should probably be configurable
|
// the timeout here should probably be configurable
|
||||||
err = c.WaitForContainerStatus(ctx, "running", timeout*2, 5)
|
err = c.WaitForContainerStatus(ctx, "running")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error waiting for container start: %w", err)
|
return fmt.Errorf("error waiting for container start: %w", err)
|
||||||
}
|
}
|
||||||
@ -186,74 +189,77 @@ func (c *Client) UpdateContainer(ctx context.Context, d *UpdateRequestBody) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WaitForContainerStatus waits for a container to reach a specific state.
|
// WaitForContainerStatus waits for a container to reach a specific state.
|
||||||
func (c *Client) WaitForContainerStatus(ctx context.Context, status string, timeout int, delay int) error {
|
func (c *Client) WaitForContainerStatus(ctx context.Context, status string) error {
|
||||||
status = strings.ToLower(status)
|
status = strings.ToLower(status)
|
||||||
|
|
||||||
timeDelay := int64(delay)
|
unexpectedStatus := fmt.Errorf("unexpected status %q", status)
|
||||||
timeMax := float64(timeout)
|
|
||||||
timeStart := time.Now()
|
|
||||||
timeElapsed := timeStart.Sub(timeStart)
|
|
||||||
|
|
||||||
for timeElapsed.Seconds() < timeMax {
|
err := retry.Do(
|
||||||
if int64(timeElapsed.Seconds())%timeDelay == 0 {
|
func() error {
|
||||||
data, err := c.GetContainerStatus(ctx)
|
data, err := c.GetContainerStatus(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error retrieving container status: %w", err)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if data.Status == status {
|
if data.Status != status {
|
||||||
return nil
|
return unexpectedStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
return nil
|
||||||
}
|
},
|
||||||
|
retry.Context(ctx),
|
||||||
time.Sleep(200 * time.Millisecond)
|
retry.RetryIf(func(err error) bool {
|
||||||
|
return errors.Is(err, unexpectedStatus)
|
||||||
timeElapsed = time.Since(timeStart)
|
}),
|
||||||
|
retry.Attempts(0), // retry until context deadline
|
||||||
if ctx.Err() != nil {
|
retry.Delay(1*time.Second),
|
||||||
return fmt.Errorf("context error: %w", ctx.Err())
|
retry.LastErrorOnly(true),
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf(
|
|
||||||
"timeout while waiting for container \"%d\" to enter the status \"%s\"",
|
|
||||||
c.VMID,
|
|
||||||
status,
|
|
||||||
)
|
)
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForContainerLock waits for a container lock to be released.
|
if errors.Is(err, context.DeadlineExceeded) {
|
||||||
func (c *Client) WaitForContainerLock(ctx context.Context, timeout int, delay int, ignoreErrorResponse bool) error {
|
return fmt.Errorf("timeout while waiting for container %d to enter the status %q", c.VMID, status)
|
||||||
timeDelay := int64(delay)
|
|
||||||
timeMax := float64(timeout)
|
|
||||||
timeStart := time.Now()
|
|
||||||
timeElapsed := timeStart.Sub(timeStart)
|
|
||||||
|
|
||||||
for timeElapsed.Seconds() < timeMax {
|
|
||||||
if int64(timeElapsed.Seconds())%timeDelay == 0 {
|
|
||||||
data, err := c.GetContainerStatus(ctx)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
if !ignoreErrorResponse {
|
|
||||||
return fmt.Errorf("error retrieving container status: %w", err)
|
|
||||||
}
|
|
||||||
} else if data.Lock == nil || *data.Lock == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
|
||||||
|
|
||||||
timeElapsed = time.Since(timeStart)
|
|
||||||
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return fmt.Errorf("context error: %w", ctx.Err())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("timeout while waiting for container \"%d\" to become unlocked", c.VMID)
|
if err != nil {
|
||||||
|
return fmt.Errorf("error waiting for container %d to enter the status %q: %w", c.VMID, status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForContainerConfigUnlock waits for a container lock to be released.
|
||||||
|
func (c *Client) WaitForContainerConfigUnlock(ctx context.Context, ignoreErrorResponse bool) error {
|
||||||
|
stillLocked := errors.New("still locked")
|
||||||
|
|
||||||
|
err := retry.Do(
|
||||||
|
func() error {
|
||||||
|
data, err := c.GetContainerStatus(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.Lock != nil && *data.Lock != "" {
|
||||||
|
return stillLocked
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
retry.Context(ctx),
|
||||||
|
retry.RetryIf(func(err error) bool {
|
||||||
|
return errors.Is(err, stillLocked) || ignoreErrorResponse
|
||||||
|
}),
|
||||||
|
retry.Attempts(0), // retry until context deadline
|
||||||
|
retry.Delay(1*time.Second),
|
||||||
|
retry.LastErrorOnly(true),
|
||||||
|
)
|
||||||
|
|
||||||
|
if errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
return fmt.Errorf("timeout while waiting for container %d configuration to become unlocked", c.VMID)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil && !ignoreErrorResponse {
|
||||||
|
return fmt.Errorf("error waiting for container %d configuration to become unlocked: %w", c.VMID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
networkReloadTimeoutSec = 5
|
networkReloadTimeout = 10 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// reloadLock is used to prevent concurrent network reloads.
|
// reloadLock is used to prevent concurrent network reloads.
|
||||||
@ -66,6 +66,9 @@ func (c *Client) CreateNetworkInterface(ctx context.Context, d *NetworkInterface
|
|||||||
|
|
||||||
// ReloadNetworkConfiguration reloads the network configuration for a specific node.
|
// ReloadNetworkConfiguration reloads the network configuration for a specific node.
|
||||||
func (c *Client) ReloadNetworkConfiguration(ctx context.Context) error {
|
func (c *Client) ReloadNetworkConfiguration(ctx context.Context) error {
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, networkReloadTimeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
reloadLock.Lock()
|
reloadLock.Lock()
|
||||||
defer reloadLock.Unlock()
|
defer reloadLock.Unlock()
|
||||||
|
|
||||||
@ -82,8 +85,9 @@ func (c *Client) ReloadNetworkConfiguration(ctx context.Context) error {
|
|||||||
return api.ErrNoDataObjectInResponse
|
return api.ErrNoDataObjectInResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.Tasks().WaitForTask(ctx, *resBody.Data, networkReloadTimeoutSec, 1)
|
return c.Tasks().WaitForTask(ctx, *resBody.Data)
|
||||||
},
|
},
|
||||||
|
retry.Context(ctx),
|
||||||
retry.Delay(1*time.Second),
|
retry.Delay(1*time.Second),
|
||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.RetryIf(func(err error) bool {
|
retry.RetryIf(func(err error) bool {
|
||||||
|
@ -8,11 +8,14 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/avast/retry-go/v4"
|
||||||
|
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -21,17 +24,17 @@ func (c *Client) DeleteDatastoreFile(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
volumeID string,
|
volumeID string,
|
||||||
) error {
|
) error {
|
||||||
err := c.DoRequest(
|
path := c.ExpandPath(fmt.Sprintf("content/%s", url.PathEscape(volumeID)))
|
||||||
ctx,
|
|
||||||
http.MethodDelete,
|
err := retry.Do(
|
||||||
c.ExpandPath(
|
func() error {
|
||||||
fmt.Sprintf(
|
return c.DoRequest(ctx, http.MethodDelete, path, nil, nil)
|
||||||
"content/%s",
|
},
|
||||||
url.PathEscape(volumeID),
|
retry.Context(ctx),
|
||||||
),
|
retry.RetryIf(func(err error) bool {
|
||||||
),
|
return !errors.Is(err, api.ErrResourceDoesNotExist)
|
||||||
nil,
|
}),
|
||||||
nil,
|
retry.LastErrorOnly(true),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error deleting file %s from datastore %s: %w", volumeID, c.StorageName, err)
|
return fmt.Errorf("error deleting file %s from datastore %s: %w", volumeID, c.StorageName, err)
|
||||||
|
@ -18,7 +18,6 @@ import (
|
|||||||
func (c *Client) DownloadFileByURL(
|
func (c *Client) DownloadFileByURL(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
d *DownloadURLPostRequestBody,
|
d *DownloadURLPostRequestBody,
|
||||||
uploadTimeout int64,
|
|
||||||
) error {
|
) error {
|
||||||
resBody := &DownloadURLResponseBody{}
|
resBody := &DownloadURLResponseBody{}
|
||||||
|
|
||||||
@ -31,7 +30,7 @@ func (c *Client) DownloadFileByURL(
|
|||||||
return api.ErrNoDataObjectInResponse
|
return api.ErrNoDataObjectInResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
taskErr := c.Tasks().WaitForTask(ctx, *resBody.TaskID, int(uploadTimeout), 5)
|
taskErr := c.Tasks().WaitForTask(ctx, *resBody.TaskID)
|
||||||
if taskErr != nil {
|
if taskErr != nil {
|
||||||
err = fmt.Errorf(
|
err = fmt.Errorf(
|
||||||
"error download file to datastore %s: failed waiting for url download: %w",
|
"error download file to datastore %s: failed waiting for url download: %w",
|
||||||
|
@ -17,7 +17,6 @@ import (
|
|||||||
func (c *Client) APIUpload(
|
func (c *Client) APIUpload(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
d *api.FileUploadRequest,
|
d *api.FileUploadRequest,
|
||||||
uploadTimeout int,
|
|
||||||
tempDir string,
|
tempDir string,
|
||||||
) (*DatastoreUploadResponseBody, error) {
|
) (*DatastoreUploadResponseBody, error) {
|
||||||
tflog.Debug(ctx, "uploading file to datastore using PVE API", map[string]interface{}{
|
tflog.Debug(ctx, "uploading file to datastore using PVE API", map[string]interface{}{
|
||||||
@ -149,7 +148,7 @@ func (c *Client) APIUpload(
|
|||||||
return nil, fmt.Errorf("error uploading file to datastore %s: no uploadID", c.StorageName)
|
return nil, fmt.Errorf("error uploading file to datastore %s: no uploadID", c.StorageName)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Tasks().WaitForTask(ctx, *resBody.UploadID, uploadTimeout, 5)
|
err = c.Tasks().WaitForTask(ctx, *resBody.UploadID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error uploading file to datastore %s: failed waiting for upload - %w", c.StorageName, err)
|
return nil, fmt.Errorf("error uploading file to datastore %s: failed waiting for upload - %w", c.StorageName, err)
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,8 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/avast/retry-go/v4"
|
||||||
|
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -25,13 +27,7 @@ func (c *Client) GetTaskStatus(ctx context.Context, upid string) (*GetTaskStatus
|
|||||||
return nil, fmt.Errorf("error building path for task status: %w", err)
|
return nil, fmt.Errorf("error building path for task status: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.DoRequest(
|
err = c.DoRequest(ctx, http.MethodGet, path, nil, resBody)
|
||||||
ctx,
|
|
||||||
http.MethodGet,
|
|
||||||
path,
|
|
||||||
nil,
|
|
||||||
resBody,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error retrieving task status: %w", err)
|
return nil, fmt.Errorf("error retrieving task status: %w", err)
|
||||||
}
|
}
|
||||||
@ -55,13 +51,7 @@ func (c *Client) GetTaskLog(ctx context.Context, upid string) ([]string, error)
|
|||||||
return lines, fmt.Errorf("error building path for task status: %w", err)
|
return lines, fmt.Errorf("error building path for task status: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.DoRequest(
|
err = c.DoRequest(ctx, http.MethodGet, path, nil, resBody)
|
||||||
ctx,
|
|
||||||
http.MethodGet,
|
|
||||||
path,
|
|
||||||
nil,
|
|
||||||
resBody,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return lines, fmt.Errorf("error retrieving task status: %w", err)
|
return lines, fmt.Errorf("error retrieving task status: %w", err)
|
||||||
}
|
}
|
||||||
@ -84,14 +74,12 @@ func (c *Client) DeleteTask(ctx context.Context, upid string) error {
|
|||||||
return fmt.Errorf("error creating task path: %w", err)
|
return fmt.Errorf("error creating task path: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.DoRequest(
|
err = c.DoRequest(ctx, http.MethodDelete, path, nil, nil)
|
||||||
ctx,
|
|
||||||
http.MethodDelete,
|
|
||||||
path,
|
|
||||||
nil,
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
return fmt.Errorf("error deleting task: %w", err)
|
return fmt.Errorf("error deleting task: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,62 +87,53 @@ func (c *Client) DeleteTask(ctx context.Context, upid string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WaitForTask waits for a specific task to complete.
|
// WaitForTask waits for a specific task to complete.
|
||||||
func (c *Client) WaitForTask(ctx context.Context, upid string, timeoutSec, delaySec int) error {
|
func (c *Client) WaitForTask(ctx context.Context, upid string) error {
|
||||||
timeDelay := int64(delaySec)
|
errStillRunning := errors.New("still running")
|
||||||
timeMax := float64(timeoutSec)
|
|
||||||
timeStart := time.Now()
|
|
||||||
timeElapsed := timeStart.Sub(timeStart)
|
|
||||||
|
|
||||||
isCriticalError := func(err error) bool {
|
status, err := retry.DoWithData(
|
||||||
var target *api.HTTPError
|
func() (*GetTaskStatusResponseData, error) {
|
||||||
if errors.As(err, &target) {
|
|
||||||
if target.Code != http.StatusBadRequest {
|
|
||||||
// this is a special case to account for eventual consistency
|
|
||||||
// when creating a task -- the task may not be available via status API
|
|
||||||
// immediately after creation
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return err != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for timeElapsed.Seconds() < timeMax {
|
|
||||||
if int64(timeElapsed.Seconds())%timeDelay == 0 {
|
|
||||||
status, err := c.GetTaskStatus(ctx, upid)
|
status, err := c.GetTaskStatus(ctx, upid)
|
||||||
if isCriticalError(err) {
|
if err != nil {
|
||||||
return err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if status.Status != "running" {
|
if status.Status == "running" {
|
||||||
if status.ExitCode != "OK" {
|
return nil, errStillRunning
|
||||||
return fmt.Errorf(
|
}
|
||||||
"task \"%s\" failed to complete with exit code: %s",
|
|
||||||
upid,
|
return status, err
|
||||||
status.ExitCode,
|
},
|
||||||
)
|
retry.Context(ctx),
|
||||||
|
retry.RetryIf(func(err error) bool {
|
||||||
|
var target *api.HTTPError
|
||||||
|
if errors.As(err, &target) {
|
||||||
|
if target.Code == http.StatusBadRequest {
|
||||||
|
// this is a special case to account for eventual consistency
|
||||||
|
// when creating a task -- the task may not be available via status API
|
||||||
|
// immediately after creation
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
return errors.Is(err, errStillRunning)
|
||||||
}
|
}),
|
||||||
|
retry.LastErrorOnly(true),
|
||||||
|
retry.Attempts(0), // retry until context deadline
|
||||||
|
retry.DelayType(retry.FixedDelay),
|
||||||
|
retry.Delay(time.Second),
|
||||||
|
)
|
||||||
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
if errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
return fmt.Errorf("timeout while waiting for task %q to complete", upid)
|
||||||
timeElapsed = time.Since(timeStart)
|
|
||||||
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return fmt.Errorf(
|
|
||||||
"context error while waiting for task \"%s\" to complete: %w",
|
|
||||||
upid, ctx.Err(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf(
|
if err != nil {
|
||||||
"timeout while waiting for task \"%s\" to complete",
|
return fmt.Errorf("error while waiting for task %q to complete: %w", upid, err)
|
||||||
upid,
|
}
|
||||||
)
|
|
||||||
|
if status.ExitCode != "OK" {
|
||||||
|
return fmt.Errorf("task %q failed to complete with exit code: %s", upid, status.ExitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@ package vms
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -23,7 +24,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// CloneVM clones a virtual machine.
|
// CloneVM clones a virtual machine.
|
||||||
func (c *Client) CloneVM(ctx context.Context, retries int, d *CloneRequestBody, timeout int) error {
|
func (c *Client) CloneVM(ctx context.Context, retries int, d *CloneRequestBody) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
resBody := &MoveDiskResponseBody{}
|
resBody := &MoveDiskResponseBody{}
|
||||||
@ -43,7 +44,7 @@ func (c *Client) CloneVM(ctx context.Context, retries int, d *CloneRequestBody,
|
|||||||
return api.ErrNoDataObjectInResponse
|
return api.ErrNoDataObjectInResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.Tasks().WaitForTask(ctx, *resBody.Data, timeout, 5)
|
return c.Tasks().WaitForTask(ctx, *resBody.Data)
|
||||||
}, retry.Attempts(uint(retries)), retry.Delay(10*time.Second))
|
}, retry.Attempts(uint(retries)), retry.Delay(10*time.Second))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error waiting for VM clone: %w", err)
|
return fmt.Errorf("error waiting for VM clone: %w", err)
|
||||||
@ -53,13 +54,13 @@ func (c *Client) CloneVM(ctx context.Context, retries int, d *CloneRequestBody,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateVM creates a virtual machine.
|
// CreateVM creates a virtual machine.
|
||||||
func (c *Client) CreateVM(ctx context.Context, d *CreateRequestBody, timeout int) error {
|
func (c *Client) CreateVM(ctx context.Context, d *CreateRequestBody) error {
|
||||||
taskID, err := c.CreateVMAsync(ctx, d)
|
taskID, err := c.CreateVMAsync(ctx, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 1)
|
err = c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error waiting for VM creation: %w", err)
|
return fmt.Errorf("error waiting for VM creation: %w", err)
|
||||||
}
|
}
|
||||||
@ -67,7 +68,7 @@ func (c *Client) CreateVM(ctx context.Context, d *CreateRequestBody, timeout int
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateVMAsync creates a virtual machine asynchronously.
|
// CreateVMAsync creates a virtual machine asynchronously. Returns ID of the started task.
|
||||||
func (c *Client) CreateVMAsync(ctx context.Context, d *CreateRequestBody) (*string, error) {
|
func (c *Client) CreateVMAsync(ctx context.Context, d *CreateRequestBody) (*string, error) {
|
||||||
resBody := &CreateResponseBody{}
|
resBody := &CreateResponseBody{}
|
||||||
|
|
||||||
@ -76,30 +77,57 @@ func (c *Client) CreateVMAsync(ctx context.Context, d *CreateRequestBody) (*stri
|
|||||||
return nil, fmt.Errorf("error creating VM: %w", err)
|
return nil, fmt.Errorf("error creating VM: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if resBody.Data == nil {
|
if resBody.TaskID == nil {
|
||||||
return nil, api.ErrNoDataObjectInResponse
|
return nil, api.ErrNoDataObjectInResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
return resBody.Data, nil
|
return resBody.TaskID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteVM deletes a virtual machine.
|
// DeleteVM creates a virtual machine.
|
||||||
func (c *Client) DeleteVM(ctx context.Context) error {
|
func (c *Client) DeleteVM(ctx context.Context) error {
|
||||||
err := c.DoRequest(ctx, http.MethodDelete, c.ExpandPath("?destroy-unreferenced-disks=1&purge=1"), nil, nil)
|
taskID, err := c.DeleteVMAsync(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error deleting VM: %w", err)
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error waiting for VM deletion: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeleteVMAsync deletes a virtual machine asynchronously. Returns ID of the started task.
|
||||||
|
func (c *Client) DeleteVMAsync(ctx context.Context) (*string, error) {
|
||||||
|
// PVE may return a 500 error "got no worker upid - start worker failed", so we retry few times.
|
||||||
|
resBody := &DeleteResponseBody{}
|
||||||
|
|
||||||
|
err := retry.Do(
|
||||||
|
func() error {
|
||||||
|
return c.DoRequest(ctx, http.MethodDelete, c.ExpandPath("?destroy-unreferenced-disks=1&purge=1"), nil, resBody)
|
||||||
|
},
|
||||||
|
retry.Context(ctx),
|
||||||
|
retry.RetryIf(func(err error) bool {
|
||||||
|
return !errors.Is(err, api.ErrResourceDoesNotExist)
|
||||||
|
}),
|
||||||
|
retry.LastErrorOnly(true),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error deleting VM: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return resBody.TaskID, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetVM retrieves a virtual machine.
|
// GetVM retrieves a virtual machine.
|
||||||
func (c *Client) GetVM(ctx context.Context) (*GetResponseData, error) {
|
func (c *Client) GetVM(ctx context.Context) (*GetResponseData, error) {
|
||||||
resBody := &GetResponseBody{}
|
resBody := &GetResponseBody{}
|
||||||
|
|
||||||
err := c.DoRequest(ctx, http.MethodGet, c.ExpandPath("config"), nil, resBody)
|
err := c.DoRequest(ctx, http.MethodGet, c.ExpandPath("config"), nil, resBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error retrieving VM: %w", err)
|
return nil, fmt.Errorf("error retrieving VM %d: %w", c.VMID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if resBody.Data == nil {
|
if resBody.Data == nil {
|
||||||
@ -142,13 +170,13 @@ func (c *Client) GetVMStatus(ctx context.Context) (*GetStatusResponseData, error
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MigrateVM migrates a virtual machine.
|
// MigrateVM migrates a virtual machine.
|
||||||
func (c *Client) MigrateVM(ctx context.Context, d *MigrateRequestBody, timeout int) error {
|
func (c *Client) MigrateVM(ctx context.Context, d *MigrateRequestBody) error {
|
||||||
taskID, err := c.MigrateVMAsync(ctx, d)
|
taskID, err := c.MigrateVMAsync(ctx, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
|
err = c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error waiting for VM migration: %w", err)
|
return fmt.Errorf("error waiting for VM migration: %w", err)
|
||||||
}
|
}
|
||||||
@ -173,7 +201,7 @@ func (c *Client) MigrateVMAsync(ctx context.Context, d *MigrateRequestBody) (*st
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MoveVMDisk moves a virtual machine disk.
|
// MoveVMDisk moves a virtual machine disk.
|
||||||
func (c *Client) MoveVMDisk(ctx context.Context, d *MoveDiskRequestBody, timeout int) error {
|
func (c *Client) MoveVMDisk(ctx context.Context, d *MoveDiskRequestBody) error {
|
||||||
taskID, err := c.MoveVMDiskAsync(ctx, d)
|
taskID, err := c.MoveVMDiskAsync(ctx, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "you can't move to the same storage with same format") {
|
if strings.Contains(err.Error(), "you can't move to the same storage with same format") {
|
||||||
@ -184,7 +212,7 @@ func (c *Client) MoveVMDisk(ctx context.Context, d *MoveDiskRequestBody, timeout
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
|
err = c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error waiting for VM disk move: %w", err)
|
return fmt.Errorf("error waiting for VM disk move: %w", err)
|
||||||
}
|
}
|
||||||
@ -225,13 +253,13 @@ func (c *Client) ListVMs(ctx context.Context) ([]*ListResponseData, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RebootVM reboots a virtual machine.
|
// RebootVM reboots a virtual machine.
|
||||||
func (c *Client) RebootVM(ctx context.Context, d *RebootRequestBody, timeout int) error {
|
func (c *Client) RebootVM(ctx context.Context, d *RebootRequestBody) error {
|
||||||
taskID, err := c.RebootVMAsync(ctx, d)
|
taskID, err := c.RebootVMAsync(ctx, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
|
err = c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error waiting for VM reboot: %w", err)
|
return fmt.Errorf("error waiting for VM reboot: %w", err)
|
||||||
}
|
}
|
||||||
@ -256,15 +284,17 @@ func (c *Client) RebootVMAsync(ctx context.Context, d *RebootRequestBody) (*stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ResizeVMDisk resizes a virtual machine disk.
|
// ResizeVMDisk resizes a virtual machine disk.
|
||||||
func (c *Client) ResizeVMDisk(ctx context.Context, d *ResizeDiskRequestBody, timeout int) error {
|
func (c *Client) ResizeVMDisk(ctx context.Context, d *ResizeDiskRequestBody) error {
|
||||||
err := retry.Do(func() error {
|
err := retry.Do(
|
||||||
taskID, err := c.ResizeVMDiskAsync(ctx, d)
|
func() error {
|
||||||
if err != nil {
|
taskID, err := c.ResizeVMDiskAsync(ctx, d)
|
||||||
return err
|
if err != nil {
|
||||||
}
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
|
return c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
},
|
},
|
||||||
|
retry.Context(ctx),
|
||||||
retry.Attempts(3),
|
retry.Attempts(3),
|
||||||
retry.Delay(1*time.Second),
|
retry.Delay(1*time.Second),
|
||||||
retry.LastErrorOnly(false),
|
retry.LastErrorOnly(false),
|
||||||
@ -296,13 +326,13 @@ func (c *Client) ResizeVMDiskAsync(ctx context.Context, d *ResizeDiskRequestBody
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ShutdownVM shuts down a virtual machine.
|
// ShutdownVM shuts down a virtual machine.
|
||||||
func (c *Client) ShutdownVM(ctx context.Context, d *ShutdownRequestBody, timeout int) error {
|
func (c *Client) ShutdownVM(ctx context.Context, d *ShutdownRequestBody) error {
|
||||||
taskID, err := c.ShutdownVMAsync(ctx, d)
|
taskID, err := c.ShutdownVMAsync(ctx, d)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
|
err = c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error waiting for VM shutdown: %w", err)
|
return fmt.Errorf("error waiting for VM shutdown: %w", err)
|
||||||
}
|
}
|
||||||
@ -328,13 +358,13 @@ func (c *Client) ShutdownVMAsync(ctx context.Context, d *ShutdownRequestBody) (*
|
|||||||
|
|
||||||
// StartVM starts a virtual machine.
|
// StartVM starts a virtual machine.
|
||||||
// Returns the task log if the VM had warnings at startup, or fails to start.
|
// Returns the task log if the VM had warnings at startup, or fails to start.
|
||||||
func (c *Client) StartVM(ctx context.Context, timeout int) ([]string, error) {
|
func (c *Client) StartVM(ctx context.Context, timeoutSec int) ([]string, error) {
|
||||||
taskID, err := c.StartVMAsync(ctx, timeout)
|
taskID, err := c.StartVMAsync(ctx, timeoutSec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
|
err = c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log, e := c.Tasks().GetTaskLog(ctx, *taskID)
|
log, e := c.Tasks().GetTaskLog(ctx, *taskID)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
@ -357,9 +387,9 @@ func (c *Client) StartVM(ctx context.Context, timeout int) ([]string, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StartVMAsync starts a virtual machine asynchronously.
|
// StartVMAsync starts a virtual machine asynchronously.
|
||||||
func (c *Client) StartVMAsync(ctx context.Context, timeout int) (*string, error) {
|
func (c *Client) StartVMAsync(ctx context.Context, timeoutSec int) (*string, error) {
|
||||||
reqBody := &StartRequestBody{
|
reqBody := &StartRequestBody{
|
||||||
TimeoutSeconds: &timeout,
|
TimeoutSeconds: &timeoutSec,
|
||||||
}
|
}
|
||||||
resBody := &StartResponseBody{}
|
resBody := &StartResponseBody{}
|
||||||
|
|
||||||
@ -376,13 +406,13 @@ func (c *Client) StartVMAsync(ctx context.Context, timeout int) (*string, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// StopVM stops a virtual machine.
|
// StopVM stops a virtual machine.
|
||||||
func (c *Client) StopVM(ctx context.Context, timeout int) error {
|
func (c *Client) StopVM(ctx context.Context) error {
|
||||||
taskID, err := c.StopVMAsync(ctx)
|
taskID, err := c.StopVMAsync(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.Tasks().WaitForTask(ctx, *taskID, timeout, 5)
|
err = c.Tasks().WaitForTask(ctx, *taskID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error waiting for VM stop: %w", err)
|
return fmt.Errorf("error waiting for VM stop: %w", err)
|
||||||
}
|
}
|
||||||
@ -437,7 +467,7 @@ func (c *Client) WaitForNetworkInterfacesFromVMAgent(
|
|||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
timeout int, // time in seconds to wait until giving up
|
timeout int, // time in seconds to wait until giving up
|
||||||
delay int, // the delay in seconds between requests to the agent
|
delay int, // the delay in seconds between requests to the agent
|
||||||
waitForIP bool, // whether or not to block until an IP is found, or just block until the interfaces are published
|
waitForIP bool, // whether to block until an IP is found, or just block until the interfaces are published
|
||||||
) (*GetQEMUNetworkInterfacesResponseData, error) {
|
) (*GetQEMUNetworkInterfacesResponseData, error) {
|
||||||
delaySeconds := int64(delay)
|
delaySeconds := int64(delay)
|
||||||
timeMaxSeconds := float64(timeout)
|
timeMaxSeconds := float64(timeout)
|
||||||
@ -525,103 +555,78 @@ func (c *Client) WaitForNetworkInterfacesFromVMAgent(
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForNoNetworkInterfacesFromVMAgent waits for a virtual machine's QEMU agent to unpublish the network interfaces.
|
|
||||||
func (c *Client) WaitForNoNetworkInterfacesFromVMAgent(ctx context.Context, timeout int, delay int) error {
|
|
||||||
timeDelay := int64(delay)
|
|
||||||
timeMax := float64(timeout)
|
|
||||||
timeStart := time.Now()
|
|
||||||
timeElapsed := timeStart.Sub(timeStart)
|
|
||||||
|
|
||||||
for timeElapsed.Seconds() < timeMax {
|
|
||||||
if int64(timeElapsed.Seconds())%timeDelay == 0 {
|
|
||||||
_, err := c.GetVMNetworkInterfacesFromAgent(ctx)
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
|
||||||
|
|
||||||
timeElapsed = time.Since(timeStart)
|
|
||||||
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return fmt.Errorf("error waiting for VM network interfaces: %w", ctx.Err())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf(
|
|
||||||
"timeout while waiting for the QEMU agent on VM \"%d\" to unpublish the network interfaces",
|
|
||||||
c.VMID,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForVMConfigUnlock waits for a virtual machine configuration to become unlocked.
|
// WaitForVMConfigUnlock waits for a virtual machine configuration to become unlocked.
|
||||||
func (c *Client) WaitForVMConfigUnlock(ctx context.Context, timeout int, delay int, ignoreErrorResponse bool) error {
|
func (c *Client) WaitForVMConfigUnlock(ctx context.Context, ignoreErrorResponse bool) error {
|
||||||
timeDelay := int64(delay)
|
stillLocked := errors.New("still locked")
|
||||||
timeMax := float64(timeout)
|
|
||||||
timeStart := time.Now()
|
|
||||||
timeElapsed := timeStart.Sub(timeStart)
|
|
||||||
|
|
||||||
for timeElapsed.Seconds() < timeMax {
|
err := retry.Do(
|
||||||
if int64(timeElapsed.Seconds())%timeDelay == 0 {
|
func() error {
|
||||||
data, err := c.GetVMStatus(ctx)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
if !ignoreErrorResponse {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else if data.Lock == nil || *data.Lock == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
|
||||||
|
|
||||||
timeElapsed = time.Since(timeStart)
|
|
||||||
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return fmt.Errorf("error waiting for VM configuration to become unlocked: %w", ctx.Err())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("timeout while waiting for VM \"%d\" configuration to become unlocked", c.VMID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WaitForVMStatus waits for a virtual machine to reach a specific status.
|
|
||||||
func (c *Client) WaitForVMStatus(ctx context.Context, state string, timeout int, delay int) error {
|
|
||||||
state = strings.ToLower(state)
|
|
||||||
|
|
||||||
timeDelay := int64(delay)
|
|
||||||
timeMax := float64(timeout)
|
|
||||||
timeStart := time.Now()
|
|
||||||
timeElapsed := timeStart.Sub(timeStart)
|
|
||||||
|
|
||||||
for timeElapsed.Seconds() < timeMax {
|
|
||||||
if int64(timeElapsed.Seconds())%timeDelay == 0 {
|
|
||||||
data, err := c.GetVMStatus(ctx)
|
data, err := c.GetVMStatus(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if data.Status == state {
|
if data.Lock != nil && *data.Lock != "" {
|
||||||
return nil
|
return stillLocked
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(1 * time.Second)
|
return nil
|
||||||
}
|
},
|
||||||
|
retry.Context(ctx),
|
||||||
|
retry.RetryIf(func(err error) bool {
|
||||||
|
return errors.Is(err, stillLocked) || ignoreErrorResponse
|
||||||
|
}),
|
||||||
|
retry.Attempts(0), // retry until context deadline
|
||||||
|
retry.Delay(1*time.Second),
|
||||||
|
retry.LastErrorOnly(true),
|
||||||
|
)
|
||||||
|
|
||||||
time.Sleep(200 * time.Millisecond)
|
if errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
return fmt.Errorf("timeout while waiting for VM %d configuration to become unlocked", c.VMID)
|
||||||
timeElapsed = time.Since(timeStart)
|
|
||||||
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return fmt.Errorf("error waiting for VM state: %w", ctx.Err())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("timeout while waiting for VM \"%d\" to enter the state \"%s\"", c.VMID, state)
|
if err != nil && !ignoreErrorResponse {
|
||||||
|
return fmt.Errorf("error waiting for VM %d configuration to become unlocked: %w", c.VMID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForVMStatus waits for a virtual machine to reach a specific status.
|
||||||
|
func (c *Client) WaitForVMStatus(ctx context.Context, status string) error {
|
||||||
|
status = strings.ToLower(status)
|
||||||
|
|
||||||
|
unexpectedStatus := fmt.Errorf("unexpected status %q", status)
|
||||||
|
|
||||||
|
err := retry.Do(
|
||||||
|
func() error {
|
||||||
|
data, err := c.GetVMStatus(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if data.Status != status {
|
||||||
|
return unexpectedStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
retry.Context(ctx),
|
||||||
|
retry.RetryIf(func(err error) bool {
|
||||||
|
return errors.Is(err, unexpectedStatus)
|
||||||
|
}),
|
||||||
|
retry.Attempts(0), // retry until context deadline
|
||||||
|
retry.Delay(1*time.Second),
|
||||||
|
retry.LastErrorOnly(true),
|
||||||
|
)
|
||||||
|
|
||||||
|
if errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
return fmt.Errorf("timeout while waiting for VM %d to enter the status %q", c.VMID, status)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error waiting for VM %d to enter the status %q: %w", c.VMID, status, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -296,7 +296,12 @@ type CreateRequestBody struct {
|
|||||||
|
|
||||||
// CreateResponseBody contains the body from a create response.
|
// CreateResponseBody contains the body from a create response.
|
||||||
type CreateResponseBody struct {
|
type CreateResponseBody struct {
|
||||||
Data *string `json:"data,omitempty"`
|
TaskID *string `json:"data,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteResponseBody contains the body from a delete response.
|
||||||
|
type DeleteResponseBody struct {
|
||||||
|
TaskID *string `json:"data,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetQEMUNetworkInterfacesResponseBody contains the body from a QEMU get network interfaces response.
|
// GetQEMUNetworkInterfacesResponseBody contains the body from a QEMU get network interfaces response.
|
||||||
|
@ -8,6 +8,7 @@ package datasource
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -15,6 +16,7 @@ import (
|
|||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
|
||||||
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -61,7 +63,7 @@ func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Dia
|
|||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
|
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -69,10 +71,9 @@ func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Dia
|
|||||||
nodeName := d.Get(mkDataSourceVirtualEnvironmentVMNodeName).(string)
|
nodeName := d.Get(mkDataSourceVirtualEnvironmentVMNodeName).(string)
|
||||||
vmID := d.Get(mkDataSourceVirtualEnvironmentVMVMID).(int)
|
vmID := d.Get(mkDataSourceVirtualEnvironmentVMVMID).(int)
|
||||||
|
|
||||||
vmStatus, err := api.Node(nodeName).VM(vmID).GetVMStatus(ctx)
|
vmStatus, err := client.Node(nodeName).VM(vmID).GetVMStatus(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "HTTP 404") ||
|
if errors.Is(err, api.ErrNoDataObjectInResponse) {
|
||||||
(strings.Contains(err.Error(), "HTTP 500") && strings.Contains(err.Error(), "does not exist")) {
|
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -8,15 +8,18 @@ package resource
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||||
|
|
||||||
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/containers"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/containers"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
||||||
@ -76,7 +79,9 @@ const (
|
|||||||
dvStartOnBoot = true
|
dvStartOnBoot = true
|
||||||
dvTemplate = false
|
dvTemplate = false
|
||||||
dvTimeoutCreate = 1800
|
dvTimeoutCreate = 1800
|
||||||
dvTimeoutStart = 300
|
dvTimeoutClone = 1800
|
||||||
|
dvTimeoutUpdate = 1800
|
||||||
|
dvTimeoutDelete = 60
|
||||||
dvUnprivileged = false
|
dvUnprivileged = false
|
||||||
dvVMID = -1
|
dvVMID = -1
|
||||||
|
|
||||||
@ -157,7 +162,9 @@ const (
|
|||||||
mkTags = "tags"
|
mkTags = "tags"
|
||||||
mkTemplate = "template"
|
mkTemplate = "template"
|
||||||
mkTimeoutCreate = "timeout_create"
|
mkTimeoutCreate = "timeout_create"
|
||||||
mkTimeoutStart = "timeout_start"
|
mkTimeoutClone = "timeout_clone"
|
||||||
|
mkTimeoutUpdate = "timeout_update"
|
||||||
|
mkTimeoutDelete = "timeout_delete"
|
||||||
mkUnprivileged = "unprivileged"
|
mkUnprivileged = "unprivileged"
|
||||||
mkVMID = "vm_id"
|
mkVMID = "vm_id"
|
||||||
)
|
)
|
||||||
@ -844,11 +851,31 @@ func Container() *schema.Resource {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Default: dvTimeoutCreate,
|
Default: dvTimeoutCreate,
|
||||||
},
|
},
|
||||||
mkTimeoutStart: {
|
mkTimeoutClone: {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Description: "Clone container timeout",
|
||||||
|
Optional: true,
|
||||||
|
Default: dvTimeoutClone,
|
||||||
|
},
|
||||||
|
mkTimeoutUpdate: {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Description: "Update container timeout",
|
||||||
|
Optional: true,
|
||||||
|
Default: dvTimeoutUpdate,
|
||||||
|
},
|
||||||
|
mkTimeoutDelete: {
|
||||||
|
Type: schema.TypeInt,
|
||||||
|
Description: "Delete container timeout",
|
||||||
|
Optional: true,
|
||||||
|
Default: dvTimeoutDelete,
|
||||||
|
},
|
||||||
|
"timeout_start": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Description: "Start container timeout",
|
Description: "Start container timeout",
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: dvTimeoutStart,
|
Default: 300,
|
||||||
|
Deprecated: "This field is deprecated and will be removed in a future release. " +
|
||||||
|
"An overall operation timeout (`timeout_create` / `timeout_clone`) is used instead.",
|
||||||
},
|
},
|
||||||
mkUnprivileged: {
|
mkUnprivileged: {
|
||||||
Type: schema.TypeBool,
|
Type: schema.TypeBool,
|
||||||
@ -900,6 +927,11 @@ func containerCreate(ctx context.Context, d *schema.ResourceData, m interface{})
|
|||||||
}
|
}
|
||||||
|
|
||||||
func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
cloneTimeoutSec := d.Get(mkTimeoutClone).(int)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(cloneTimeoutSec)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
|
|
||||||
api, err := config.GetClient()
|
api, err := config.GetClient()
|
||||||
@ -977,7 +1009,7 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
|
|||||||
containerAPI := api.Node(nodeName).Container(vmID)
|
containerAPI := api.Node(nodeName).Container(vmID)
|
||||||
|
|
||||||
// Wait for the container to be created and its configuration lock to be released.
|
// Wait for the container to be created and its configuration lock to be released.
|
||||||
err = containerAPI.WaitForContainerLock(ctx, 600, 5, true)
|
err = containerAPI.WaitForContainerConfigUnlock(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -1264,7 +1296,7 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the container's lock to be released.
|
// Wait for the container's lock to be released.
|
||||||
err = containerAPI.WaitForContainerLock(ctx, 600, 5, true)
|
err = containerAPI.WaitForContainerConfigUnlock(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -1273,6 +1305,11 @@ func containerCreateClone(ctx context.Context, d *schema.ResourceData, m interfa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
createTimeoutSec := d.Get(mkTimeoutCreate).(int)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(createTimeoutSec)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
|
|
||||||
api, err := config.GetClient()
|
api, err := config.GetClient()
|
||||||
@ -1625,7 +1662,6 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
template := types.CustomBool(d.Get(mkTemplate).(bool))
|
template := types.CustomBool(d.Get(mkTemplate).(bool))
|
||||||
unprivileged := types.CustomBool(d.Get(mkUnprivileged).(bool))
|
unprivileged := types.CustomBool(d.Get(mkUnprivileged).(bool))
|
||||||
vmID := d.Get(mkVMID).(int)
|
vmID := d.Get(mkVMID).(int)
|
||||||
createTimeout := d.Get(mkTimeoutCreate).(int)
|
|
||||||
|
|
||||||
if vmID == -1 {
|
if vmID == -1 {
|
||||||
vmIDNew, e := api.Cluster().GetVMID(ctx)
|
vmIDNew, e := api.Cluster().GetVMID(ctx)
|
||||||
@ -1698,7 +1734,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
createBody.Tags = &tagsString
|
createBody.Tags = &tagsString
|
||||||
}
|
}
|
||||||
|
|
||||||
err = api.Node(nodeName).Container(0).CreateContainer(ctx, &createBody, createTimeout)
|
err = api.Node(nodeName).Container(0).CreateContainer(ctx, &createBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -1706,7 +1742,7 @@ func containerCreateCustom(ctx context.Context, d *schema.ResourceData, m interf
|
|||||||
d.SetId(strconv.Itoa(vmID))
|
d.SetId(strconv.Itoa(vmID))
|
||||||
|
|
||||||
// Wait for the container's lock to be released.
|
// Wait for the container's lock to be released.
|
||||||
err = api.Node(nodeName).Container(vmID).WaitForContainerLock(ctx, 600, 5, true)
|
err = api.Node(nodeName).Container(vmID).WaitForContainerConfigUnlock(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -1738,10 +1774,8 @@ func containerCreateStart(ctx context.Context, d *schema.ResourceData, m interfa
|
|||||||
|
|
||||||
containerAPI := api.Node(nodeName).Container(vmID)
|
containerAPI := api.Node(nodeName).Container(vmID)
|
||||||
|
|
||||||
startTimeout := d.Get(mkTimeoutStart).(int)
|
|
||||||
|
|
||||||
// Start the container and wait for it to reach a running state before continuing.
|
// Start the container and wait for it to reach a running state before continuing.
|
||||||
err = containerAPI.StartContainer(ctx, startTimeout)
|
err = containerAPI.StartContainer(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -1919,7 +1953,7 @@ func containerRead(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
|
|
||||||
api, e := config.GetClient()
|
client, e := config.GetClient()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
@ -1931,13 +1965,12 @@ func containerRead(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
containerAPI := api.Node(nodeName).Container(vmID)
|
containerAPI := client.Node(nodeName).Container(vmID)
|
||||||
|
|
||||||
// Retrieve the entire configuration in order to compare it to the state.
|
// Retrieve the entire configuration in order to compare it to the state.
|
||||||
containerConfig, e := containerAPI.GetContainer(ctx)
|
containerConfig, e := containerAPI.GetContainer(ctx)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
if strings.Contains(e.Error(), "HTTP 404") ||
|
if errors.Is(e, api.ErrResourceDoesNotExist) {
|
||||||
(strings.Contains(e.Error(), "HTTP 500") && strings.Contains(e.Error(), "does not exist")) {
|
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -2540,6 +2573,11 @@ func containerRead(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
}
|
}
|
||||||
|
|
||||||
func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
updateTimeoutSec := d.Get(mkTimeoutUpdate).(int)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(updateTimeoutSec)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
|
|
||||||
api, e := config.GetClient()
|
api, e := config.GetClient()
|
||||||
@ -2941,7 +2979,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
|
|||||||
|
|
||||||
if d.HasChange(mkStarted) && !bool(template) {
|
if d.HasChange(mkStarted) && !bool(template) {
|
||||||
if started {
|
if started {
|
||||||
e = containerAPI.StartContainer(ctx, 60)
|
e = containerAPI.StartContainer(ctx)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
@ -2957,7 +2995,7 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
|
|||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
e = containerAPI.WaitForContainerStatus(ctx, "stopped", 300, 5)
|
e = containerAPI.WaitForContainerStatus(ctx, "stopped")
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
@ -2985,9 +3023,14 @@ func containerUpdate(ctx context.Context, d *schema.ResourceData, m interface{})
|
|||||||
}
|
}
|
||||||
|
|
||||||
func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
deleteTimeoutSec := d.Get(mkTimeoutDelete).(int)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(deleteTimeoutSec)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
|
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -2999,7 +3042,7 @@ func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{})
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
containerAPI := api.Node(nodeName).Container(vmID)
|
containerAPI := client.Node(nodeName).Container(vmID)
|
||||||
|
|
||||||
// Shut down the container before deleting it.
|
// Shut down the container before deleting it.
|
||||||
status, err := containerAPI.GetContainerStatus(ctx)
|
status, err := containerAPI.GetContainerStatus(ctx)
|
||||||
@ -3009,20 +3052,19 @@ func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{})
|
|||||||
|
|
||||||
if status.Status != "stopped" {
|
if status.Status != "stopped" {
|
||||||
forceStop := types.CustomBool(true)
|
forceStop := types.CustomBool(true)
|
||||||
shutdownTimeout := 300
|
|
||||||
|
|
||||||
err = containerAPI.ShutdownContainer(
|
err = containerAPI.ShutdownContainer(
|
||||||
ctx,
|
ctx,
|
||||||
&containers.ShutdownRequestBody{
|
&containers.ShutdownRequestBody{
|
||||||
ForceStop: &forceStop,
|
ForceStop: &forceStop,
|
||||||
Timeout: &shutdownTimeout,
|
Timeout: &deleteTimeoutSec,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = containerAPI.WaitForContainerStatus(ctx, "stopped", 30, 5)
|
err = containerAPI.WaitForContainerStatus(ctx, "stopped")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -3030,7 +3072,7 @@ func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{})
|
|||||||
|
|
||||||
err = containerAPI.DeleteContainer(ctx)
|
err = containerAPI.DeleteContainer(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "HTTP 404") {
|
if errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -3040,7 +3082,7 @@ func containerDelete(ctx context.Context, d *schema.ResourceData, m interface{})
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the state to become unavailable as that clearly indicates the destruction of the container.
|
// Wait for the state to become unavailable as that clearly indicates the destruction of the container.
|
||||||
err = containerAPI.WaitForContainerStatus(ctx, "", 60, 2)
|
err = containerAPI.WaitForContainerStatus(ctx, "")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return diag.Errorf("failed to delete container \"%d\"", vmID)
|
return diag.Errorf("failed to delete container \"%d\"", vmID)
|
||||||
}
|
}
|
||||||
|
@ -11,6 +11,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
@ -307,6 +308,11 @@ func fileParseImportID(id string) (string, fileVolumeID, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func fileCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func fileCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
uploadTimeout := d.Get(mkResourceVirtualEnvironmentFileTimeoutUpload).(int)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(uploadTimeout)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
var diags diag.Diagnostics
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
contentType, dg := fileGetContentType(d)
|
contentType, dg := fileGetContentType(d)
|
||||||
@ -537,9 +543,9 @@ func fileCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
|
|||||||
|
|
||||||
switch *contentType {
|
switch *contentType {
|
||||||
case "iso", "vztmpl":
|
case "iso", "vztmpl":
|
||||||
uploadTimeout := d.Get(mkResourceVirtualEnvironmentFileTimeoutUpload).(int)
|
_, err = capi.Node(nodeName).Storage(datastoreID).APIUpload(
|
||||||
|
ctx, request, config.TempDir(),
|
||||||
_, err = capi.Node(nodeName).Storage(datastoreID).APIUpload(ctx, request, uploadTimeout, config.TempDir())
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
diags = append(diags, diag.FromErr(err)...)
|
diags = append(diags, diag.FromErr(err)...)
|
||||||
return diags
|
return diags
|
||||||
@ -905,13 +911,7 @@ func fileDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag
|
|||||||
nodeName := d.Get(mkResourceVirtualEnvironmentFileNodeName).(string)
|
nodeName := d.Get(mkResourceVirtualEnvironmentFileNodeName).(string)
|
||||||
|
|
||||||
err = capi.Node(nodeName).Storage(datastoreID).DeleteDatastoreFile(ctx, d.Id())
|
err = capi.Node(nodeName).Storage(datastoreID).DeleteDatastoreFile(ctx, d.Id())
|
||||||
|
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "HTTP 404") {
|
|
||||||
d.SetId("")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,12 +8,13 @@ package resource
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
"errors"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/access"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/access"
|
||||||
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
||||||
)
|
)
|
||||||
@ -143,15 +144,15 @@ func groupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.
|
|||||||
var diags diag.Diagnostics
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
groupID := d.Id()
|
groupID := d.Id()
|
||||||
group, err := api.Access().GetGroup(ctx, groupID)
|
group, err := client.Access().GetGroup(ctx, groupID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "HTTP 404") {
|
if errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -159,7 +160,7 @@ func groupRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
acl, err := api.Access().GetACL(ctx)
|
acl, err := client.Access().GetACL(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -271,7 +272,7 @@ func groupUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) dia
|
|||||||
|
|
||||||
func groupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func groupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -294,20 +295,15 @@ func groupDelete(ctx context.Context, d *schema.ResourceData, m interface{}) dia
|
|||||||
Roles: []string{aclEntry[mkResourceVirtualEnvironmentGroupACLRoleID].(string)},
|
Roles: []string{aclEntry[mkResourceVirtualEnvironmentGroupACLRoleID].(string)},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = api.Access().UpdateACL(ctx, aclBody)
|
err = client.Access().UpdateACL(ctx, aclBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = api.Access().DeleteGroup(ctx, groupID)
|
err = client.Access().DeleteGroup(ctx, groupID)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
if strings.Contains(err.Error(), "HTTP 404") {
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,12 +8,13 @@ package resource
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
|
||||||
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/pools"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/pools"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
||||||
)
|
)
|
||||||
@ -129,15 +130,15 @@ func poolRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
|||||||
var diags diag.Diagnostics
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
poolID := d.Id()
|
poolID := d.Id()
|
||||||
pool, err := api.Pool().GetPool(ctx, poolID)
|
pool, err := client.Pool().GetPool(ctx, poolID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "HTTP 404") {
|
if errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -206,21 +207,15 @@ func poolUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
|
|||||||
|
|
||||||
func poolDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func poolDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
poolID := d.Id()
|
poolID := d.Id()
|
||||||
err = api.Pool().DeletePool(ctx, poolID)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "HTTP 404") {
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
|
err = client.Pool().DeletePool(ctx, poolID)
|
||||||
|
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,12 +8,13 @@ package resource
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
"errors"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
|
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/access"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/access"
|
||||||
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
||||||
)
|
)
|
||||||
@ -82,15 +83,15 @@ func roleCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
|
|||||||
|
|
||||||
func roleRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func roleRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
roleID := d.Id()
|
roleID := d.Id()
|
||||||
role, err := api.Access().GetRole(ctx, roleID)
|
role, err := client.Access().GetRole(ctx, roleID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "HTTP 404") {
|
if errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -139,20 +140,15 @@ func roleUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
|
|||||||
|
|
||||||
func roleDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func roleDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
roleID := d.Id()
|
roleID := d.Id()
|
||||||
err = api.Access().DeleteRole(ctx, roleID)
|
|
||||||
|
|
||||||
if err != nil {
|
err = client.Access().DeleteRole(ctx, roleID)
|
||||||
if strings.Contains(err.Error(), "HTTP 404") {
|
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@ package resource
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"strings"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
|
||||||
@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||||
|
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/access"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/access"
|
||||||
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/types"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
"github.com/bpg/terraform-provider-proxmox/proxmoxtf"
|
||||||
)
|
)
|
||||||
@ -231,15 +232,15 @@ func userCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
|
|||||||
|
|
||||||
func userRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func userRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
userID := d.Id()
|
userID := d.Id()
|
||||||
user, err := api.Access().GetUser(ctx, userID)
|
user, err := client.Access().GetUser(ctx, userID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "HTTP 404") {
|
if errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -247,7 +248,7 @@ func userRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
acl, err := api.Access().GetACL(ctx)
|
acl, err := client.Access().GetACL(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -453,7 +454,7 @@ func userUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag
|
|||||||
|
|
||||||
func userDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func userDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -476,20 +477,14 @@ func userDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag
|
|||||||
Users: []string{userID},
|
Users: []string{userID},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = api.Access().UpdateACL(ctx, aclBody)
|
err = client.Access().UpdateACL(ctx, aclBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = api.Access().DeleteUser(ctx, userID)
|
err = client.Access().DeleteUser(ctx, userID)
|
||||||
|
if err != nil && !errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "HTTP 404") {
|
|
||||||
d.SetId("")
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,17 +178,15 @@ func CreateClone(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
timeout := d.Get(MkTimeoutMoveDisk).(int)
|
|
||||||
|
|
||||||
if moveDisk {
|
if moveDisk {
|
||||||
err := vmAPI.MoveVMDisk(ctx, diskMoveBody, timeout)
|
err := vmAPI.MoveVMDisk(ctx, diskMoveBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("disk move fails: %w", err)
|
return fmt.Errorf("disk move fails: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if diskSize > currentDiskInfo.Size.InGigabytes() {
|
if diskSize > currentDiskInfo.Size.InGigabytes() {
|
||||||
err := vmAPI.ResizeVMDisk(ctx, diskResizeBody, timeout)
|
err := vmAPI.ResizeVMDisk(ctx, diskResizeBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("disk resize fails: %w", err)
|
return fmt.Errorf("disk resize fails: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -41,9 +41,6 @@ const (
|
|||||||
mkDiskSpeedWrite = "write"
|
mkDiskSpeedWrite = "write"
|
||||||
mkDiskSpeedWriteBurstable = "write_burstable"
|
mkDiskSpeedWriteBurstable = "write_burstable"
|
||||||
mkDiskSSD = "ssd"
|
mkDiskSSD = "ssd"
|
||||||
|
|
||||||
// MkTimeoutMoveDisk is the name of the timeout_move_disk attribute.
|
|
||||||
MkTimeoutMoveDisk = "timeout_move_disk"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Schema returns the schema for the disk resource.
|
// Schema returns the schema for the disk resource.
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
|
||||||
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
|
||||||
|
|
||||||
|
"github.com/bpg/terraform-provider-proxmox/proxmox/api"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/cluster"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/cluster"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/vms"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/nodes/vms"
|
||||||
"github.com/bpg/terraform-provider-proxmox/proxmox/pools"
|
"github.com/bpg/terraform-provider-proxmox/proxmox/pools"
|
||||||
@ -120,7 +121,6 @@ const (
|
|||||||
dvTemplate = false
|
dvTemplate = false
|
||||||
dvTimeoutClone = 1800
|
dvTimeoutClone = 1800
|
||||||
dvTimeoutCreate = 1800
|
dvTimeoutCreate = 1800
|
||||||
dvTimeoutMoveDisk = 1800
|
|
||||||
dvTimeoutMigrate = 1800
|
dvTimeoutMigrate = 1800
|
||||||
dvTimeoutReboot = 1800
|
dvTimeoutReboot = 1800
|
||||||
dvTimeoutShutdownVM = 1800
|
dvTimeoutShutdownVM = 1800
|
||||||
@ -263,7 +263,7 @@ const (
|
|||||||
mkTemplate = "template"
|
mkTemplate = "template"
|
||||||
mkTimeoutClone = "timeout_clone"
|
mkTimeoutClone = "timeout_clone"
|
||||||
mkTimeoutCreate = "timeout_create"
|
mkTimeoutCreate = "timeout_create"
|
||||||
mkTimeoutMigrate = "timeout_migrate"
|
mkTimeoutMigrate = "timeout_migrate" // this is essentially an "timeout_update", needs to be refactored
|
||||||
mkTimeoutReboot = "timeout_reboot"
|
mkTimeoutReboot = "timeout_reboot"
|
||||||
mkTimeoutShutdownVM = "timeout_shutdown_vm"
|
mkTimeoutShutdownVM = "timeout_shutdown_vm"
|
||||||
mkTimeoutStartVM = "timeout_start_vm"
|
mkTimeoutStartVM = "timeout_start_vm"
|
||||||
@ -1344,11 +1344,13 @@ func VM() *schema.Resource {
|
|||||||
Optional: true,
|
Optional: true,
|
||||||
Default: dvTimeoutCreate,
|
Default: dvTimeoutCreate,
|
||||||
},
|
},
|
||||||
disk.MkTimeoutMoveDisk: {
|
"timeout_move_disk": {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
Description: "MoveDisk timeout",
|
Description: "MoveDisk timeout",
|
||||||
Optional: true,
|
Optional: true,
|
||||||
Default: dvTimeoutMoveDisk,
|
Default: 1800,
|
||||||
|
Deprecated: "This field is deprecated and will be removed in a future release. " +
|
||||||
|
"An overall operation timeout (timeout_create / timeout_clone / timeout_migrate) is used instead.",
|
||||||
},
|
},
|
||||||
mkTimeoutMigrate: {
|
mkTimeoutMigrate: {
|
||||||
Type: schema.TypeInt,
|
Type: schema.TypeInt,
|
||||||
@ -1653,15 +1655,18 @@ func deleteIdeDrives(ctx context.Context, vmAPI *vms.Client, itf1 string, itf2 s
|
|||||||
|
|
||||||
// Start the VM, then wait for it to actually start; it may not be started immediately if running in HA mode.
|
// Start the VM, then wait for it to actually start; it may not be started immediately if running in HA mode.
|
||||||
func vmStart(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) diag.Diagnostics {
|
func vmStart(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) diag.Diagnostics {
|
||||||
var diags diag.Diagnostics
|
|
||||||
|
|
||||||
tflog.Debug(ctx, "Starting VM")
|
tflog.Debug(ctx, "Starting VM")
|
||||||
|
|
||||||
startVMTimeout := d.Get(mkTimeoutStartVM).(int)
|
startTimeoutSec := d.Get(mkTimeoutStartVM).(int)
|
||||||
|
|
||||||
log, e := vmAPI.StartVM(ctx, startVMTimeout)
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(startTimeoutSec)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
var diags diag.Diagnostics
|
||||||
|
|
||||||
|
log, e := vmAPI.StartVM(ctx, startTimeoutSec)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return append(diags, diag.FromErr(e)...)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(log) > 0 {
|
if len(log) > 0 {
|
||||||
@ -1672,7 +1677,7 @@ func vmStart(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) dia
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return append(diags, diag.FromErr(vmAPI.WaitForVMStatus(ctx, "running", startVMTimeout, 1))...)
|
return append(diags, diag.FromErr(vmAPI.WaitForVMStatus(ctx, "running"))...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown the VM, then wait for it to actually shut down (it may not be shut down immediately if
|
// Shutdown the VM, then wait for it to actually shut down (it may not be shut down immediately if
|
||||||
@ -1681,17 +1686,20 @@ func vmShutdown(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData)
|
|||||||
tflog.Debug(ctx, "Shutting down VM")
|
tflog.Debug(ctx, "Shutting down VM")
|
||||||
|
|
||||||
forceStop := types.CustomBool(true)
|
forceStop := types.CustomBool(true)
|
||||||
shutdownTimeout := d.Get(mkTimeoutShutdownVM).(int)
|
shutdownTimeoutSec := d.Get(mkTimeoutShutdownVM).(int)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(shutdownTimeoutSec)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
e := vmAPI.ShutdownVM(ctx, &vms.ShutdownRequestBody{
|
e := vmAPI.ShutdownVM(ctx, &vms.ShutdownRequestBody{
|
||||||
ForceStop: &forceStop,
|
ForceStop: &forceStop,
|
||||||
Timeout: &shutdownTimeout,
|
Timeout: &shutdownTimeoutSec,
|
||||||
}, shutdownTimeout+30)
|
})
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped", shutdownTimeout, 1))
|
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Forcefully stop the VM, then wait for it to actually stop.
|
// Forcefully stop the VM, then wait for it to actually stop.
|
||||||
@ -1700,18 +1708,26 @@ func vmStop(ctx context.Context, vmAPI *vms.Client, d *schema.ResourceData) diag
|
|||||||
|
|
||||||
stopTimeout := d.Get(mkTimeoutStopVM).(int)
|
stopTimeout := d.Get(mkTimeoutStopVM).(int)
|
||||||
|
|
||||||
e := vmAPI.StopVM(ctx, stopTimeout+30)
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(stopTimeout)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
e := vmAPI.StopVM(ctx)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
|
|
||||||
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped", stopTimeout, 1))
|
return diag.FromErr(vmAPI.WaitForVMStatus(ctx, "stopped"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
cloneTimeoutSec := d.Get(mkTimeoutClone).(int)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(cloneTimeoutSec)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
|
|
||||||
api, e := config.GetClient()
|
client, e := config.GetClient()
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
@ -1733,7 +1749,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
vmID := vmIDUntyped.(int)
|
vmID := vmIDUntyped.(int)
|
||||||
|
|
||||||
if !hasVMID {
|
if !hasVMID {
|
||||||
vmIDNew, err := api.Cluster().GetVMID(ctx)
|
vmIDNew, err := client.Cluster().GetVMID(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -1769,11 +1785,9 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
cloneBody.PoolID = &poolID
|
cloneBody.PoolID = &poolID
|
||||||
}
|
}
|
||||||
|
|
||||||
cloneTimeout := d.Get(mkTimeoutClone).(int)
|
|
||||||
|
|
||||||
if cloneNodeName != "" && cloneNodeName != nodeName {
|
if cloneNodeName != "" && cloneNodeName != nodeName {
|
||||||
// Check if any used datastores of the source VM are not shared
|
// Check if any used datastores of the source VM are not shared
|
||||||
vmConfig, err := api.Node(cloneNodeName).VM(cloneVMID).GetVM(ctx)
|
vmConfig, err := client.Node(cloneNodeName).VM(cloneVMID).GetVM(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -1783,7 +1797,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
onlySharedDatastores := true
|
onlySharedDatastores := true
|
||||||
|
|
||||||
for _, datastore := range datastores {
|
for _, datastore := range datastores {
|
||||||
datastoreStatus, err2 := api.Node(cloneNodeName).Storage(datastore).GetDatastoreStatus(ctx)
|
datastoreStatus, err2 := client.Node(cloneNodeName).Storage(datastore).GetDatastoreStatus(ctx)
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
return diag.FromErr(err2)
|
return diag.FromErr(err2)
|
||||||
}
|
}
|
||||||
@ -1800,12 +1814,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
// on a different node is currently not supported by proxmox.
|
// on a different node is currently not supported by proxmox.
|
||||||
cloneBody.TargetNodeName = &nodeName
|
cloneBody.TargetNodeName = &nodeName
|
||||||
|
|
||||||
err = api.Node(cloneNodeName).VM(cloneVMID).CloneVM(
|
err = client.Node(cloneNodeName).VM(cloneVMID).CloneVM(ctx, cloneRetries, cloneBody)
|
||||||
ctx,
|
|
||||||
cloneRetries,
|
|
||||||
cloneBody,
|
|
||||||
cloneTimeout,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -1816,14 +1825,14 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
// https://forum.proxmox.com/threads/500-cant-clone-to-non-shared-storage-local.49078/#post-229727
|
// https://forum.proxmox.com/threads/500-cant-clone-to-non-shared-storage-local.49078/#post-229727
|
||||||
|
|
||||||
// Temporarily clone to local node
|
// Temporarily clone to local node
|
||||||
err = api.Node(cloneNodeName).VM(cloneVMID).CloneVM(ctx, cloneRetries, cloneBody, cloneTimeout)
|
err = client.Node(cloneNodeName).VM(cloneVMID).CloneVM(ctx, cloneRetries, cloneBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the virtual machine to be created and its configuration lock to be released before migrating.
|
// Wait for the virtual machine to be created and its configuration lock to be released before migrating.
|
||||||
|
|
||||||
err = api.Node(cloneNodeName).VM(vmID).WaitForVMConfigUnlock(ctx, 600, 5, true)
|
err = client.Node(cloneNodeName).VM(vmID).WaitForVMConfigUnlock(ctx, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -1839,13 +1848,13 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
migrateBody.TargetStorage = &cloneDatastoreID
|
migrateBody.TargetStorage = &cloneDatastoreID
|
||||||
}
|
}
|
||||||
|
|
||||||
err = api.Node(cloneNodeName).VM(vmID).MigrateVM(ctx, migrateBody, cloneTimeout)
|
err = client.Node(cloneNodeName).VM(vmID).MigrateVM(ctx, migrateBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
e = api.Node(nodeName).VM(cloneVMID).CloneVM(ctx, cloneRetries, cloneBody, cloneTimeout)
|
e = client.Node(nodeName).VM(cloneVMID).CloneVM(ctx, cloneRetries, cloneBody)
|
||||||
}
|
}
|
||||||
|
|
||||||
if e != nil {
|
if e != nil {
|
||||||
@ -1854,10 +1863,10 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
|
|
||||||
d.SetId(strconv.Itoa(vmID))
|
d.SetId(strconv.Itoa(vmID))
|
||||||
|
|
||||||
vmAPI := api.Node(nodeName).VM(vmID)
|
vmAPI := client.Node(nodeName).VM(vmID)
|
||||||
|
|
||||||
// Wait for the virtual machine to be created and its configuration lock to be released.
|
// Wait for the virtual machine to be created and its configuration lock to be released.
|
||||||
e = vmAPI.WaitForVMConfigUnlock(ctx, 600, 5, true)
|
e = vmAPI.WaitForVMConfigUnlock(ctx, true)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
@ -1985,7 +1994,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Only the root account is allowed to change the CPU architecture, which makes this check necessary.
|
// Only the root account is allowed to change the CPU architecture, which makes this check necessary.
|
||||||
if api.API().IsRootTicket() ||
|
if client.API().IsRootTicket() ||
|
||||||
cpuArchitecture != dvCPUArchitecture {
|
cpuArchitecture != dvCPUArchitecture {
|
||||||
updateBody.CPUArchitecture = &cpuArchitecture
|
updateBody.CPUArchitecture = &cpuArchitecture
|
||||||
}
|
}
|
||||||
@ -2184,10 +2193,8 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
|
|
||||||
vmConfig, e = vmAPI.GetVM(ctx)
|
vmConfig, e = vmAPI.GetVM(ctx)
|
||||||
if e != nil {
|
if e != nil {
|
||||||
if strings.Contains(e.Error(), "HTTP 404") ||
|
if errors.Is(e, api.ErrResourceDoesNotExist) {
|
||||||
(strings.Contains(e.Error(), "HTTP 500") && strings.Contains(e.Error(), "does not exist")) {
|
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2259,9 +2266,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
}
|
}
|
||||||
|
|
||||||
if moveDisk {
|
if moveDisk {
|
||||||
moveDiskTimeout := d.Get(disk.MkTimeoutMoveDisk).(int)
|
e = vmAPI.MoveVMDisk(ctx, diskMoveBody)
|
||||||
|
|
||||||
e = vmAPI.MoveVMDisk(ctx, diskMoveBody, moveDiskTimeout)
|
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
@ -2312,9 +2317,7 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
}
|
}
|
||||||
|
|
||||||
if moveDisk {
|
if moveDisk {
|
||||||
moveDiskTimeout := d.Get(disk.MkTimeoutMoveDisk).(int)
|
e = vmAPI.MoveVMDisk(ctx, diskMoveBody)
|
||||||
|
|
||||||
e = vmAPI.MoveVMDisk(ctx, diskMoveBody, moveDiskTimeout)
|
|
||||||
if e != nil {
|
if e != nil {
|
||||||
return diag.FromErr(e)
|
return diag.FromErr(e)
|
||||||
}
|
}
|
||||||
@ -2325,6 +2328,11 @@ func vmCreateClone(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
}
|
}
|
||||||
|
|
||||||
func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
createTimeoutSec := d.Get(mkTimeoutCreate).(int)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(createTimeoutSec)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
|
|
||||||
api, err := config.GetClient()
|
api, err := config.GetClient()
|
||||||
@ -2732,9 +2740,7 @@ func vmCreateCustom(ctx context.Context, d *schema.ResourceData, m interface{})
|
|||||||
createBody.HookScript = &hookScript
|
createBody.HookScript = &hookScript
|
||||||
}
|
}
|
||||||
|
|
||||||
createTimeout := d.Get(mkTimeoutClone).(int)
|
err = api.Node(nodeName).VM(0).CreateVM(ctx, createBody)
|
||||||
|
|
||||||
err = api.Node(nodeName).VM(0).CreateVM(ctx, createBody, createTimeout)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -2780,14 +2786,13 @@ func vmCreateStart(ctx context.Context, d *schema.ResourceData, m interface{}) d
|
|||||||
}
|
}
|
||||||
|
|
||||||
if reboot {
|
if reboot {
|
||||||
rebootTimeout := d.Get(mkTimeoutReboot).(int)
|
rebootTimeoutSec := d.Get(mkTimeoutReboot).(int)
|
||||||
|
|
||||||
err := vmAPI.RebootVM(
|
err := vmAPI.RebootVM(
|
||||||
ctx,
|
ctx,
|
||||||
&vms.RebootRequestBody{
|
&vms.RebootRequestBody{
|
||||||
Timeout: &rebootTimeout,
|
Timeout: &rebootTimeoutSec,
|
||||||
},
|
},
|
||||||
rebootTimeout+30,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
@ -3349,7 +3354,7 @@ func vmGetVGADeviceObject(d *schema.ResourceData) (*vms.CustomVGADevice, error)
|
|||||||
func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
|
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -3359,7 +3364,7 @@ func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Dia
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
vmNodeName, err := api.Cluster().GetVMNodeName(ctx, vmID)
|
vmNodeName, err := client.Cluster().GetVMNodeName(ctx, vmID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, cluster.ErrVMDoesNotExist) {
|
if errors.Is(err, cluster.ErrVMDoesNotExist) {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
@ -3379,15 +3384,13 @@ func vmRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Dia
|
|||||||
|
|
||||||
nodeName := d.Get(mkNodeName).(string)
|
nodeName := d.Get(mkNodeName).(string)
|
||||||
|
|
||||||
vmAPI := api.Node(nodeName).VM(vmID)
|
vmAPI := client.Node(nodeName).VM(vmID)
|
||||||
|
|
||||||
// Retrieve the entire configuration in order to compare it to the state.
|
// Retrieve the entire configuration in order to compare it to the state.
|
||||||
vmConfig, err := vmAPI.GetVM(ctx)
|
vmConfig, err := vmAPI.GetVM(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "HTTP 404") ||
|
if errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
(strings.Contains(err.Error(), "HTTP 500") && strings.Contains(err.Error(), "does not exist")) {
|
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4730,11 +4733,15 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
|||||||
|
|
||||||
// If the node name has changed we need to migrate the VM to the new node before we do anything else.
|
// If the node name has changed we need to migrate the VM to the new node before we do anything else.
|
||||||
if d.HasChange(mkNodeName) {
|
if d.HasChange(mkNodeName) {
|
||||||
|
migrateTimeoutSec := d.Get(mkTimeoutMigrate).(int)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(migrateTimeoutSec)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
oldNodeNameValue, _ := d.GetChange(mkNodeName)
|
oldNodeNameValue, _ := d.GetChange(mkNodeName)
|
||||||
oldNodeName := oldNodeNameValue.(string)
|
oldNodeName := oldNodeNameValue.(string)
|
||||||
vmAPI := api.Node(oldNodeName).VM(vmID)
|
vmAPI := api.Node(oldNodeName).VM(vmID)
|
||||||
|
|
||||||
migrateTimeout := d.Get(mkTimeoutMigrate).(int)
|
|
||||||
trueValue := types.CustomBool(true)
|
trueValue := types.CustomBool(true)
|
||||||
migrateBody := &vms.MigrateRequestBody{
|
migrateBody := &vms.MigrateRequestBody{
|
||||||
TargetNode: nodeName,
|
TargetNode: nodeName,
|
||||||
@ -4742,7 +4749,7 @@ func vmUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
|||||||
OnlineMigration: &trueValue,
|
OnlineMigration: &trueValue,
|
||||||
}
|
}
|
||||||
|
|
||||||
err := vmAPI.MigrateVM(ctx, migrateBody, migrateTimeout)
|
err := vmAPI.MigrateVM(ctx, migrateBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -5516,17 +5523,15 @@ func vmUpdateDiskLocationAndSize(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
timeout := d.Get(disk.MkTimeoutMoveDisk).(int)
|
|
||||||
|
|
||||||
for _, reqBody := range diskMoveBodies {
|
for _, reqBody := range diskMoveBodies {
|
||||||
err = vmAPI.MoveVMDisk(ctx, reqBody, timeout)
|
err = vmAPI.MoveVMDisk(ctx, reqBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, reqBody := range diskResizeBodies {
|
for _, reqBody := range diskResizeBodies {
|
||||||
err = vmAPI.ResizeVMDisk(ctx, reqBody, timeout)
|
err = vmAPI.ResizeVMDisk(ctx, reqBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -5550,14 +5555,13 @@ func vmUpdateDiskLocationAndSize(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if vmStatus.Status != "stopped" {
|
if vmStatus.Status != "stopped" {
|
||||||
rebootTimeout := d.Get(mkTimeoutReboot).(int)
|
rebootTimeoutSec := d.Get(mkTimeoutReboot).(int)
|
||||||
|
|
||||||
err := vmAPI.RebootVM(
|
err := vmAPI.RebootVM(
|
||||||
ctx,
|
ctx,
|
||||||
&vms.RebootRequestBody{
|
&vms.RebootRequestBody{
|
||||||
Timeout: &rebootTimeout,
|
Timeout: &rebootTimeoutSec,
|
||||||
},
|
},
|
||||||
rebootTimeout+30,
|
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
@ -5569,9 +5573,19 @@ func vmUpdateDiskLocationAndSize(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
|
||||||
|
timeout := d.Get(mkTimeoutStopVM).(int)
|
||||||
|
shutdownTimeout := d.Get(mkTimeoutShutdownVM).(int)
|
||||||
|
|
||||||
|
if shutdownTimeout > timeout {
|
||||||
|
timeout = shutdownTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeout)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
config := m.(proxmoxtf.ProviderConfiguration)
|
config := m.(proxmoxtf.ProviderConfiguration)
|
||||||
|
|
||||||
api, err := config.GetClient()
|
client, err := config.GetClient()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
@ -5583,7 +5597,7 @@ func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
|||||||
return diag.FromErr(err)
|
return diag.FromErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
vmAPI := api.Node(nodeName).VM(vmID)
|
vmAPI := client.Node(nodeName).VM(vmID)
|
||||||
|
|
||||||
// Stop or shut down the virtual machine before deleting it.
|
// Stop or shut down the virtual machine before deleting it.
|
||||||
status, err := vmAPI.GetVMStatus(ctx)
|
status, err := vmAPI.GetVMStatus(ctx)
|
||||||
@ -5608,10 +5622,8 @@ func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
|||||||
|
|
||||||
err = vmAPI.DeleteVM(ctx)
|
err = vmAPI.DeleteVM(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "HTTP 404") ||
|
if errors.Is(err, api.ErrResourceDoesNotExist) {
|
||||||
(strings.Contains(err.Error(), "HTTP 500") && strings.Contains(err.Error(), "does not exist")) {
|
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5619,7 +5631,7 @@ func vmDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.D
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Wait for the state to become unavailable as that clearly indicates the destruction of the VM.
|
// Wait for the state to become unavailable as that clearly indicates the destruction of the VM.
|
||||||
err = vmAPI.WaitForVMStatus(ctx, "", 60, 2)
|
err = vmAPI.WaitForVMStatus(ctx, "")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return diag.Errorf("failed to delete VM \"%d\"", vmID)
|
return diag.Errorf("failed to delete VM \"%d\"", vmID)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user