mirror of
https://github.com/bpg/terraform-provider-proxmox.git
synced 2025-06-30 10:33:46 +00:00
feat(provider): use sudo
to execute commands over SSH (#950)
* feat(provider): use `sudo` to execute commands over SSH Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com> * fix: simplify everything, use sudo per command Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com> * feat: add documentation Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com> * minor doc fix Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com> * chore: cleanup docs Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com> --------- Signed-off-by: Pavel Boldyrev <627562+bpg@users.noreply.github.com>
This commit is contained in:
parent
8722121002
commit
9d764e5889
@ -1,4 +1,5 @@
|
||||
{
|
||||
"MD007": false,
|
||||
"MD013": false,
|
||||
"MD025": false,
|
||||
"MD041": false
|
||||
|
@ -115,6 +115,69 @@ Instead, it uses the SSH protocol directly, and supports the `SSH_AUTH_SOCK` env
|
||||
This allows the provider to use the SSH agent configured by the user, and to support multiple SSH agents running on the same machine.
|
||||
You can find more details on the SSH Agent [here](https://www.digitalocean.com/community/tutorials/ssh-essentials-working-with-ssh-servers-clients-and-keys#adding-your-ssh-keys-to-an-ssh-agent-to-avoid-typing-the-passphrase).
|
||||
|
||||
### SSH User
|
||||
|
||||
By default, the provider will use the same username for the SSH connection as the one used for the Proxmox API connection (when using PAM authentication).
|
||||
This can be overridden by specifying the `username` argument in the `ssh` block (or alternatively a username in the `PROXMOX_VE_SSH_USERNAME` environment variable):
|
||||
|
||||
```terraform
|
||||
provider "proxmox" {
|
||||
...
|
||||
|
||||
ssh {
|
||||
agent = true
|
||||
username = "terraform"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
-> When using API Token or non-PAM authentication for Proxmox API, the `username` field in the `ssh` block (or alternatively a username in `PROXMOX_VE_USERNAME` or `PROXMOX_VE_SSH_USERNAME` environment variable) is **required**.
|
||||
This is because the provider needs to know which PAM user to use for the SSH connection.
|
||||
|
||||
When using a non-root user for the SSH connection, the user **must** have the `sudo` privilege on the target node without requiring a password.
|
||||
|
||||
You can configure the `sudo` privilege for the user via the command line on the Proxmox host. In the example below, we create a user `terraform` and assign the `sudo` privilege to it:
|
||||
|
||||
- Create a new system user:
|
||||
|
||||
```sh
|
||||
sudo useradd -m terraform
|
||||
```
|
||||
|
||||
- Add the user to the `sudo` group:
|
||||
|
||||
```sh
|
||||
sudo usermod -aG sudo terraform
|
||||
```
|
||||
|
||||
- Configure the `sudo` privilege for the user:
|
||||
|
||||
```sh
|
||||
sudo visudo
|
||||
```
|
||||
|
||||
Add the following line to the end of the file:
|
||||
|
||||
```sh
|
||||
terraform ALL=(ALL) NOPASSWD:ALL
|
||||
```
|
||||
|
||||
Save the file and exit.
|
||||
|
||||
- Copy your SSH public key to the new user on the target node:
|
||||
|
||||
```sh
|
||||
ssh-copy-id terraform@<target-node>
|
||||
```
|
||||
|
||||
- Test the SSH connection and password-less `sudo`:
|
||||
|
||||
```sh
|
||||
ssh terraform@<target-node> sudo ls -la /root
|
||||
```
|
||||
|
||||
You should be able to connect to the target node and see content of the `/root` folder without password.
|
||||
|
||||
### Node IP address used for SSH connection
|
||||
|
||||
In order to make the SSH connection, the provider needs to be able to resolve the target node name to an IP.
|
||||
@ -194,21 +257,18 @@ provider "proxmox" {
|
||||
insecure = true
|
||||
ssh {
|
||||
agent = true
|
||||
username = "root"
|
||||
username = "terraform"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
-> The token authentication is taking precedence over the password authentication.
|
||||
|
||||
-> The `username` field in the `ssh` block (or alternatively a username in `PROXMOX_VE_USERNAME` or `PROXMOX_VE_SSH_USERNAME` environment variable) is **required** when using API Token authentication.
|
||||
This is because the provider needs to know which user to use for the SSH connection.
|
||||
|
||||
-> Not all Proxmox API operations are supported via API Token.
|
||||
You may see errors like `error creating container: received an HTTP 403 response - Reason: Permission check failed (changing feature flags for privileged container is only allowed for root@pam)` or `error creating VM: received an HTTP 500 response - Reason: only root can set 'arch' config` when using API Token authentication, even when `Administrator` role or the `root@pam` user is used with the token.
|
||||
The workaround is to use password authentication for those operations.
|
||||
|
||||
-> You can also configure additional users and roles using [`virtual_environment_user`](https://registry.terraform.io/providers/bpg/proxmox/latest/docs/data-sources/virtual_environment_user) and [`virtual_environment_role`](https://registry.terraform.io/providers/bpg/proxmox/latest/docs/data-sources/virtual_environment_role) resources of the provider.
|
||||
-> You can also configure additional Proxmox users and roles using [`virtual_environment_user`](https://registry.terraform.io/providers/bpg/proxmox/latest/docs/data-sources/virtual_environment_user) and [`virtual_environment_role`](https://registry.terraform.io/providers/bpg/proxmox/latest/docs/data-sources/virtual_environment_role) resources of the provider.
|
||||
|
||||
## Temporary Directory
|
||||
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
// Client is an interface for performing SSH requests against the Proxmox Nodes.
|
||||
type Client interface {
|
||||
// ExecuteNodeCommands executes a command on a node.
|
||||
ExecuteNodeCommands(ctx context.Context, nodeName string, commands []string) error
|
||||
ExecuteNodeCommands(ctx context.Context, nodeName string, commands []string) ([]byte, error)
|
||||
|
||||
// NodeUpload uploads a file to a node.
|
||||
NodeUpload(ctx context.Context, nodeName string,
|
||||
@ -42,16 +42,15 @@ type client struct {
|
||||
password string
|
||||
agent bool
|
||||
agentSocket string
|
||||
nodeLookup NodeResolver
|
||||
nodeResolver NodeResolver
|
||||
}
|
||||
|
||||
// NewClient creates a new SSH client.
|
||||
func NewClient(
|
||||
username string, password string,
|
||||
agent bool, agentSocket string,
|
||||
nodeLookup NodeResolver,
|
||||
nodeResolver NodeResolver,
|
||||
) (Client, error) {
|
||||
//goland:noinspection GoBoolExpressions
|
||||
if agent && runtime.GOOS != "linux" && runtime.GOOS != "darwin" && runtime.GOOS != "freebsd" {
|
||||
return nil, errors.New(
|
||||
"the ssh agent flag is only supported on POSIX systems, please set it to 'false'" +
|
||||
@ -59,8 +58,8 @@ func NewClient(
|
||||
)
|
||||
}
|
||||
|
||||
if nodeLookup == nil {
|
||||
return nil, errors.New("node lookup is required")
|
||||
if nodeResolver == nil {
|
||||
return nil, errors.New("node resolver is required")
|
||||
}
|
||||
|
||||
return &client{
|
||||
@ -68,15 +67,15 @@ func NewClient(
|
||||
password: password,
|
||||
agent: agent,
|
||||
agentSocket: agentSocket,
|
||||
nodeLookup: nodeLookup,
|
||||
nodeResolver: nodeResolver,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ExecuteNodeCommands executes commands on a given node.
|
||||
func (c *client) ExecuteNodeCommands(ctx context.Context, nodeName string, commands []string) error {
|
||||
node, err := c.nodeLookup.Resolve(ctx, nodeName)
|
||||
func (c *client) ExecuteNodeCommands(ctx context.Context, nodeName string, commands []string) ([]byte, error) {
|
||||
node, err := c.nodeResolver.Resolve(ctx, nodeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find node endpoint: %w", err)
|
||||
return nil, fmt.Errorf("failed to find node endpoint: %w", err)
|
||||
}
|
||||
|
||||
tflog.Debug(ctx, "executing commands on the node using SSH", map[string]interface{}{
|
||||
@ -89,31 +88,24 @@ func (c *client) ExecuteNodeCommands(ctx context.Context, nodeName string, comma
|
||||
|
||||
sshClient, err := c.openNodeShell(ctx, node)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer closeOrLogError(sshClient)
|
||||
|
||||
sshSession, err := sshClient.NewSession()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SSH session: %w", err)
|
||||
return nil, fmt.Errorf("failed to create SSH session: %w", err)
|
||||
}
|
||||
|
||||
defer closeOrLogError(sshSession)
|
||||
|
||||
script := strings.Join(commands, " && \\\n")
|
||||
|
||||
output, err := sshSession.CombinedOutput(
|
||||
fmt.Sprintf(
|
||||
"/bin/bash -c '%s'",
|
||||
strings.ReplaceAll(script, "'", "'\"'\"'"),
|
||||
),
|
||||
)
|
||||
output, err := sshSession.CombinedOutput(strings.Join(commands, "; "))
|
||||
if err != nil {
|
||||
return errors.New(string(output))
|
||||
return nil, errors.New(string(output))
|
||||
}
|
||||
|
||||
return nil
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (c *client) NodeUpload(
|
||||
@ -122,7 +114,7 @@ func (c *client) NodeUpload(
|
||||
remoteFileDir string,
|
||||
d *api.FileUploadRequest,
|
||||
) error {
|
||||
ip, err := c.nodeLookup.Resolve(ctx, nodeName)
|
||||
ip, err := c.nodeResolver.Resolve(ctx, nodeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find node endpoint: %w", err)
|
||||
}
|
||||
|
@ -2982,11 +2982,11 @@ func vmCreateCustomDisks(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
fmt.Sprintf(`disk_interface="%s"`, diskInterface),
|
||||
fmt.Sprintf(`file_path_tmp="%s"`, filePathTmp),
|
||||
fmt.Sprintf(`vm_id="%d"`, vmID),
|
||||
`source_image=$(pvesm path "$file_id")`,
|
||||
`imported_disk="$(qm importdisk "$vm_id" "$source_image" "$datastore_id_target" -format $file_format | grep "unused0" | cut -d ":" -f 3 | cut -d "'" -f 1)"`,
|
||||
`source_image=$(sudo pvesm path "$file_id")`,
|
||||
`imported_disk="$(sudo qm importdisk "$vm_id" "$source_image" "$datastore_id_target" -format $file_format | grep "unused0" | cut -d ":" -f 3 | cut -d "'" -f 1)"`,
|
||||
`disk_id="${datastore_id_target}:$imported_disk${disk_options}"`,
|
||||
`qm set "$vm_id" "-${disk_interface}" "$disk_id"`,
|
||||
`qm resize "$vm_id" "${disk_interface}" "${disk_size}G"`,
|
||||
`sudo qm set "$vm_id" "-${disk_interface}" "$disk_id"`,
|
||||
`sudo qm resize "$vm_id" "${disk_interface}" "${disk_size}G"`,
|
||||
)
|
||||
|
||||
importedDiskCount++
|
||||
@ -3004,10 +3004,14 @@ func vmCreateCustomDisks(ctx context.Context, d *schema.ResourceData, m interfac
|
||||
|
||||
nodeName := d.Get(mkResourceVirtualEnvironmentVMNodeName).(string)
|
||||
|
||||
err = api.SSH().ExecuteNodeCommands(ctx, nodeName, commands)
|
||||
out, err := api.SSH().ExecuteNodeCommands(ctx, nodeName, commands)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
tflog.Debug(ctx, "vmCreateCustomDisks", map[string]interface{}{
|
||||
"output": string(out),
|
||||
})
|
||||
}
|
||||
|
||||
return vmCreateStart(ctx, d, m)
|
||||
|
Loading…
Reference in New Issue
Block a user