Compare commits

...

39 Commits

Author SHA1 Message Date
ef0e5bf8f2 change: bridge vmbr0 and vlan tag by vlan66 2025-09-12 06:46:26 +00:00
89be2412ef feat: variabilize disk size 2025-09-10 08:15:25 +00:00
2f60387d70 change: disk size from 4 to 10 2025-09-04 13:23:46 +00:00
b73837c028 fix: cloud-init config file is now named with the VM hostname 2025-07-16 08:44:43 +00:00
3a991010d5 feat: introduce vm roles 2025-07-07 07:06:01 +00:00
4d92a926be fix: declare providers at project level 2025-07-04 13:53:51 +00:00
c524868cf5 feat: rework terraform pve_vm module 2025-07-02 07:15:55 +00:00
a78860c3b8 docs: update README 2025-05-28 08:20:16 +00:00
e9f084443d fix: add lifecycle section for idempotency 2025-05-28 07:45:21 +00:00
e17e6be329 docs: add comments to describe the code 2025-05-28 07:45:11 +00:00
54317c0a1c restore simple-vm without module 2025-05-28 07:44:57 +00:00
12807ca973 add update workspace path 2025-04-11 20:53:10 +00:00
fa52fdeed9 variabilize vm environment 2025-04-09 20:17:28 +00:00
bfd5db44a0 rearrange variables and add parallelism 2025-04-09 19:42:12 +00:00
b13a0b4ed9 change hostnaming convention 2025-04-07 19:46:46 +00:00
e45eb9de69 add env next to tags 2025-04-06 20:08:02 +00:00
db31372abb add tag to the VMs based on a variable 2025-04-06 19:16:11 +00:00
bc64652310 create dynamic ansible group 2025-04-04 22:01:03 +00:00
e6d453fce3 add vars for mono node deployment 2025-04-04 21:14:21 +00:00
99c3183cb4 remove defaults from role 2025-04-04 21:06:54 +00:00
62f3ac22b2 define vm_attr at play level 2025-04-04 21:00:03 +00:00
af91955f47 use complex default variables in the terraform task 2025-04-04 20:46:57 +00:00
bc19a7638b the terransible project can deploy on one or multiple nodes 2025-03-28 22:46:13 +00:00
c3db663e9e create role terraform_vm 2025-03-28 21:16:42 +00:00
995e99c1e6 create ansible folder 2025-03-26 22:37:13 +00:00
de457e8e38 add terransible project 2025-03-26 21:47:02 +00:00
8c793d3d59 use nested map to deploy 2 VM per node 2025-03-24 14:52:18 +00:00
285917f104 replace static set with data source for node names 2025-03-24 10:31:38 +00:00
a5e79eaba7 replace count by for_each with declared set 2025-03-24 10:28:05 +00:00
3e5d65773c create multiple-vm project 2025-03-23 22:55:30 +00:00
0513f40af0 add lifecycle section for idempotency 2025-03-23 22:03:34 +00:00
fdaa406a94 create pve_vm module 2025-03-23 20:26:37 +00:00
d24c18ebcb format code 2025-03-22 22:35:22 +00:00
fd2cd3441b add node_name lookup for the clone 2025-03-22 21:32:52 +00:00
8e980d859c add variables related to the VM 2025-03-21 22:47:25 +00:00
753d5c38e4 add package upgrades and output of VM IP 2025-03-21 21:57:26 +00:00
a39d0ceacb use env file for cloud-init 2025-03-21 21:09:50 +00:00
65fdcc8b00 update ssh keys 2025-03-21 19:29:26 +00:00
631afcafba tweaking configuration to at least provision a VM 2025-02-03 20:40:44 +00:00
26 changed files with 713 additions and 84 deletions

View File

@@ -1,3 +1,20 @@
# Homelab
# 🧪 Homelab
Hello world !
> ⚠️ **Work in Progress** This repository is actively evolving as I automate and expand my homelab.
Welcome to my homelab repository! This is where I manage and document the infrastructure powering my personal lab environment using modern DevOps tools and best practices.
## 🚀 Goals
- Automate VM and infrastructure deployment with **Terraform**
- Configure systems and services using **Ansible**
- Deploy and manage Kubernetes with **Flux CD** using a **GitOps** approach
- Keep everything **declarative**, **reproducible**, and **version-controlled**
## 📌 Notes
This repository is intended for **educational and experimental purposes**. Feel free to explore, fork, and adapt ideas for your own homelab setup.
---
Stay tuned — more coming soon! 🚧

View File

@@ -0,0 +1,5 @@
---
plugin: cloud.terraform.terraform_provider
project_path:
- /home/vez/homelab/terraform/projects/terransible
state_file: terraform.tfstate.d/lab/terraform.tfstate

View File

@@ -0,0 +1,31 @@
---
- name: Deploy a Terraform infrastructure
hosts: localhost
gather_facts: false
tasks:
- ansible.builtin.import_role:
name: terraform_vm
vars:
terraform_vm_state: "{{ state | default(omit) }}"
terraform_vm_project_path: /home/vez/homelab/terraform/projects/terransible
terraform_vars_vm_env: "{{ env | default('lab')}}"
terraform_vars_vm_tags:
- "{{ env | default('lab')}}"
terraform_vars_multi_node_deployment: "{{ multi_node_deployment | default(true) }}"
terraform_vars_target_node: "{{ target_node | default(omit) }}"
terraform_vars_vm_attr: {"master": { "ram": 2048, "cpu": 2, "vlan": 66 }, "worker": { "ram": 1024, "cpu": 1, "vlan": 66 }}
- name: Ping
hosts: all
tasks:
- name: Ping all the hosts
ansible.builtin.ping:
delegate_to:
- name: Ping the master
ansible.builtin.ping:
when: inventory_hostname in groups['master']
- name: Ping the worker
ansible.builtin.ping:
when: inventory_hostname in groups['worker']

View File

@@ -0,0 +1,3 @@
---
terraform_ansible_inventory: /home/vez/homelab/ansible/inventories/terraform.yml
terraform_vm_state: present

View File

@@ -0,0 +1,25 @@
---
- name: Deploy a Terraform infrastructure
delegate_to: localhost
cloud.terraform.terraform:
project_path: "{{ terraform_vm_project_path }}"
state: "{{ terraform_vm_state }}"
workspace: "{{ terraform_vars_vm_env | default(omit)}}"
parallelism: 3
complex_vars: true
variables:
multi_node_deployment: "{{ terraform_vars_multi_node_deployment | default(omit)}}"
target_node: "{{ terraform_vars_target_node | default(omit)}}"
vm_attr: "{{ terraform_vars_vm_attr | default(omit)}}"
vm_env: "{{ terraform_vars_vm_env | default(omit)}}"
vm_tags: "{{ terraform_vars_vm_tags | default(omit)}}"
- name: Update Terraform workspace path
ansible.builtin.lineinfile:
path: "{{ terraform_ansible_inventory }}"
regexp: '^state_file:.*tfstate$'
line: "state_file: terraform.tfstate.d/{{ terraform_vars_vm_env }}/terraform.tfstate"
when: terraform_vars_vm_env is defined
- name: Refresh inventory
ansible.builtin.meta: refresh_inventory

View File

@@ -1,64 +0,0 @@
data "proxmox_virtual_environment_vms" "template" {
filter {
name = "name"
values = ["ubuntu-cloud"]
}
}
resource "proxmox_virtual_environment_vm" "simple_vm" {
name = "simple-vm"
node_name = "zenith"
tags = ["terraform", "test"]
agent {
enabled = false
}
# if agent is not enabled, the VM may not be able to shutdown properly, and may need to be forced off
stop_on_destroy = true
clone {
vm_id = data.proxmox_virtual_environment_vms.template.vms[0].vm_id
}
cpu {
cores = 2
type = "x86-64-v2-AES" # recommended for modern CPUs
}
memory {
dedicated = 2048
floating = 2048 # set equal to dedicated to enable ballooning
}
disk {
datastore_id = "ceph-workload"
#file_id = proxmox_virtual_environment_download_file.latest_ubuntu_22_jammy_qcow2_img.id
interface = "scsi0"
}
initialization {
ip_config {
ipv4 {
address = "dhcp"
}
}
#user_account {
# keys = [trimspace(tls_private_key.ubuntu_vm_key.public_key_openssh)]
# password = random_password.ubuntu_vm_password.result
# username = "ubuntu"
#}
#user_data_file_id = proxmox_virtual_environment_file.cloud_config.id
}
network_device {
bridge = "vmbr0"
}
operating_system {
type = "l26"
}
}

View File

@@ -1,17 +0,0 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
}
}
}
provider "proxmox" {
endpoint = var.proxmox_endpoint
api_token = var.proxmox_api_token
insecure = false
ssh {
agent = true
username = "root"
}
}

View File

@@ -0,0 +1,108 @@
# Retrieve VM templates available in Proxmox that match the specified name
data "proxmox_virtual_environment_vms" "template" {
filter {
name = "name"
values = ["${var.vm_template}"] # The name of the template to clone from
}
}
# Create a cloud-init configuration file as a Proxmox snippet
resource "proxmox_virtual_environment_file" "cloud_config" {
content_type = "snippets" # Cloud-init files are stored as snippets in Proxmox
datastore_id = "local" # Local datastore used to store the snippet
node_name = var.node_name # The Proxmox node where the file will be uploaded
source_raw {
file_name = "${var.vm_name}.cloud-config.yaml" # The name of the snippet file
data = <<-EOF
#cloud-config
hostname: ${var.vm_name}
package_update: true
package_upgrade: true
packages:
- qemu-guest-agent # Ensures the guest agent is installed
users:
- default
- name: ${var.vm_user}
groups: sudo
shell: /bin/bash
ssh-authorized-keys:
- "${var.vm_user_sshkey}" # Inject user's SSH key
sudo: ALL=(ALL) NOPASSWD:ALL
runcmd:
- systemctl enable qemu-guest-agent
- reboot # Reboot the VM after provisioning
EOF
}
}
# Define and provision a new VM by cloning the template and applying initialization
resource "proxmox_virtual_environment_vm" "vm" {
name = var.vm_name # VM name
node_name = var.node_name # Proxmox node to deploy the VM
tags = var.vm_tags # Optional VM tags for categorization
agent {
enabled = true # Enable the QEMU guest agent
}
stop_on_destroy = true # Ensure VM is stopped gracefully when destroyed
clone {
vm_id = data.proxmox_virtual_environment_vms.template.vms[0].vm_id # ID of the source template
node_name = data.proxmox_virtual_environment_vms.template.vms[0].node_name # Node of the source template
}
bios = var.vm_bios # BIOS type (e.g., seabios or ovmf)
machine = var.vm_machine # Machine type (e.g., q35)
cpu {
cores = var.vm_cpu # Number of CPU cores
type = "host" # Use host CPU type for best compatibility/performance
}
memory {
dedicated = var.vm_ram # RAM in MB
}
disk {
datastore_id = var.node_datastore # Datastore to hold the disk
interface = "scsi0" # Primary disk interface
size = var.vm_disk_size # Disk size in GB
}
initialization {
user_data_file_id = proxmox_virtual_environment_file.cloud_config.id # Link the cloud-init file
datastore_id = var.node_datastore
interface = "scsi1" # Separate interface for cloud-init
ip_config {
ipv4 {
address = "dhcp" # Get IP via DHCP
}
}
}
network_device {
bridge = "vlan${var.vm_vlan}" # VNet used with VLAN ID
}
operating_system {
type = "l26" # Linux 2.6+ kernel
}
vga {
type = "std" # Standard VGA type
}
lifecycle {
ignore_changes = [ # Ignore initialization section after first depoloyment for idempotency
initialization
]
}
}
# Output the assigned IP address of the VM after provisioning
output "vm_ip" {
value = proxmox_virtual_environment_vm.vm.ipv4_addresses[1][0] # Second network interface's first IP
description = "VM IP"
}

View File

@@ -0,0 +1,7 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
}
}
}

View File

@@ -0,0 +1,75 @@
variable "node_name" {
description = "Proxmox host for the VM"
type = string
}
variable "node_datastore" {
description = "Datastore used for VM storage"
type = string
default = "ceph-workload"
}
variable "vm_template" {
description = "Template of the VM"
type = string
default = "ubuntu-cloud"
}
variable "vm_name" {
description = "Hostname of the VM"
type = string
}
variable "vm_user" {
description = "Admin user of the VM"
type = string
default = "vez"
}
variable "vm_user_sshkey" {
description = "Admin user SSH key of the VM"
type = string
default = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID62LmYRu1rDUha3timAIcA39LtcIOny1iAgFLnxoBxm vez@bastion"
}
variable "vm_cpu" {
description = "Number of CPU cores of the VM"
type = number
default = 1
}
variable "vm_ram" {
description = "Number of RAM (MB) of the VM"
type = number
default = 2048
}
variable "vm_disk_size" {
description = "Size of the disk (GB) of the VM"
type = number
default = 10
}
variable "vm_bios" {
description = "Type of BIOS used for the VM"
type = string
default = "ovmf"
}
variable "vm_machine" {
description = "Type of machine used for the VM"
type = string
default = "q35"
}
variable "vm_vlan" {
description = "VLAN of the VM"
type = number
default = 66
}
variable "vm_tags" {
description = "Tags for the VM"
type = list(any)
default = ["test"]
}

View File

@@ -0,0 +1,36 @@
module "pve_vm" {
source = "../../modules/pve_vm"
for_each = local.vm_list
node_name = each.value.node_name
vm_name = each.value.vm_name
vm_cpu = each.value.vm_cpu
vm_ram = each.value.vm_ram
vm_vlan = each.value.vm_vlan
}
locals {
vm_attr = {
"master" = { ram = 2048, cpu = 2, vlan = 66 }
"worker" = { ram = 1024, cpu = 1, vlan = 66 }
}
vm_list = {
for vm in flatten([
for node in data.proxmox_virtual_environment_nodes.pve_nodes.names : [
for role, config in local.vm_attr : {
node_name = node
vm_name = "${node}-${role}"
vm_cpu = config.cpu
vm_ram = config.ram
vm_vlan = config.vlan
}
]
]) : vm.vm_name => vm
}
}
data "proxmox_virtual_environment_nodes" "pve_nodes" {}
output "vm_ip" {
value = { for k, v in module.pve_vm : k => v.vm_ip }
}

View File

@@ -0,0 +1,18 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
}
}
}
provider "proxmox" {
endpoint = var.proxmox_endpoint
api_token = var.proxmox_api_token
insecure = false
ssh {
agent = false
private_key = file("~/.ssh/id_ed25519")
username = "root"
}
}

View File

@@ -0,0 +1,10 @@
variable "proxmox_endpoint" {
description = "Proxmox URL endpoint"
type = string
}
variable "proxmox_api_token" {
description = "Proxmox API token"
type = string
sensitive = true
}

View File

@@ -0,0 +1,12 @@
module "pve_vm" {
source = "../../modules/pve_vm"
node_name = "zenith"
vm_name = "zenith-vm"
vm_cpu = 2
vm_ram = 2048
vm_vlan = 66
}
output "vm_ip" {
value = module.pve_vm.vm_ip
}

View File

@@ -0,0 +1,18 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
}
}
}
provider "proxmox" {
endpoint = var.proxmox_endpoint
api_token = var.proxmox_api_token
insecure = false
ssh {
agent = false
private_key = file("~/.ssh/id_ed25519")
username = "root"
}
}

View File

@@ -6,5 +6,5 @@ variable "proxmox_endpoint" {
variable "proxmox_api_token" {
description = "Proxmox API token"
type = string
sensitive = true
sensitive = true
}

View File

@@ -0,0 +1,108 @@
# Retrieve VM templates available in Proxmox that match the specified name
data "proxmox_virtual_environment_vms" "template" {
filter {
name = "name"
values = ["${var.vm_template}"] # The name of the template to clone from
}
}
# Create a cloud-init configuration file as a Proxmox snippet
resource "proxmox_virtual_environment_file" "cloud_config" {
content_type = "snippets" # Cloud-init files are stored as snippets in Proxmox
datastore_id = "local" # Local datastore used to store the snippet
node_name = var.node_name # The Proxmox node where the file will be uploaded
source_raw {
file_name = "vm.cloud-config.yaml" # The name of the snippet file
data = <<-EOF
#cloud-config
hostname: ${var.vm_name}
package_update: true
package_upgrade: true
packages:
- qemu-guest-agent # Ensures the guest agent is installed
users:
- default
- name: ${var.vm_user}
groups: sudo
shell: /bin/bash
ssh-authorized-keys:
- "${var.vm_user_sshkey}" # Inject user's SSH key
sudo: ALL=(ALL) NOPASSWD:ALL
runcmd:
- systemctl enable qemu-guest-agent
- reboot # Reboot the VM after provisioning
EOF
}
}
# Define and provision a new VM by cloning the template and applying initialization
resource "proxmox_virtual_environment_vm" "vm" {
name = var.vm_name # VM name
node_name = var.node_name # Proxmox node to deploy the VM
tags = var.vm_tags # Optional VM tags for categorization
agent {
enabled = true # Enable the QEMU guest agent
}
stop_on_destroy = true # Ensure VM is stopped gracefully when destroyed
clone {
vm_id = data.proxmox_virtual_environment_vms.template.vms[0].vm_id # ID of the source template
node_name = data.proxmox_virtual_environment_vms.template.vms[0].node_name # Node of the source template
}
bios = var.vm_bios # BIOS type (e.g., seabios or ovmf)
machine = var.vm_machine # Machine type (e.g., q35)
cpu {
cores = var.vm_cpu # Number of CPU cores
type = "host" # Use host CPU type for best compatibility/performance
}
memory {
dedicated = var.vm_ram # RAM in MB
}
disk {
datastore_id = var.node_datastore # Datastore to hold the disk
interface = "scsi0" # Primary disk interface
size = 4 # Disk size in GB
}
initialization {
user_data_file_id = proxmox_virtual_environment_file.cloud_config.id # Link the cloud-init file
datastore_id = var.node_datastore
interface = "scsi1" # Separate interface for cloud-init
ip_config {
ipv4 {
address = "dhcp" # Get IP via DHCP
}
}
}
network_device {
bridge = "vlan${var.vm_vlan}" # VNet used with VLAN ID
}
operating_system {
type = "l26" # Linux 2.6+ kernel
}
vga {
type = "std" # Standard VGA type
}
lifecycle {
ignore_changes = [ # Ignore initialization section after first depoloyment for idempotency
initialization
]
}
}
# Output the assigned IP address of the VM after provisioning
output "vm_ip" {
value = proxmox_virtual_environment_vm.vm.ipv4_addresses[1][0] # Second network interface's first IP
description = "VM IP"
}

View File

@@ -0,0 +1,22 @@
# Define the required Terraform provider block
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox" # Use the community Proxmox provider from the bpg namespace
}
}
}
# Configure the Proxmox provider with API and SSH access
provider "proxmox" {
endpoint = var.proxmox_endpoint # Proxmox API URL (e.g., https://proxmox.local:8006/api2/json)
api_token = var.proxmox_api_token # API token for authentication (should have appropriate permissions)
insecure = false # Reject self-signed or invalid TLS certificates (set to true only in trusted/test environments)
# Optional SSH settings used for VM customization via SSH
ssh {
agent = false # Do not use the local SSH agent; use key file instead
private_key = file("~/.ssh/id_ed25519") # Load SSH private key from the local file system
username = "root" # SSH username for connecting to the Proxmox host
}
}

View File

@@ -0,0 +1,5 @@
node_name = "zenith" # Name of the Proxmox node where the VM will be deployed
vm_name = "zenith-vm" # Desired name for the new virtual machine
vm_cpu = 2 # Number of CPU cores to allocate to the VM
vm_ram = 2048 # Amount of RAM in MB (2 GB)
vm_vlan = 66 # VLAN ID for network segmentation

View File

@@ -0,0 +1,80 @@
variable "proxmox_endpoint" {
description = "Proxmox URL endpoint"
type = string
}
variable "proxmox_api_token" {
description = "Proxmox API token"
type = string
sensitive = true
}
variable "node_name" {
description = "Proxmox host for the VM"
type = string
}
variable "node_datastore" {
description = "Datastore used for VM storage"
type = string
default = "ceph-workload"
}
variable "vm_template" {
description = "Template of the VM"
type = string
default = "ubuntu-cloud"
}
variable "vm_name" {
description = "Hostname of the VM"
type = string
}
variable "vm_user" {
description = "Admin user of the VM"
type = string
default = "vez"
}
variable "vm_user_sshkey" {
description = "Admin user SSH key of the VM"
type = string
default = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID62LmYRu1rDUha3timAIcA39LtcIOny1iAgFLnxoBxm vez@bastion"
}
variable "vm_cpu" {
description = "Number of CPU cores of the VM"
type = number
default = 1
}
variable "vm_ram" {
description = "Number of RAM (MB) of the VM"
type = number
default = 2048
}
variable "vm_bios" {
description = "Type of BIOS used for the VM"
type = string
default = "ovmf"
}
variable "vm_machine" {
description = "Type of machine used for the VM"
type = string
default = "q35"
}
variable "vm_vlan" {
description = "VLAN of the VM"
type = number
default = 66
}
variable "vm_tags" {
description = "Tags for the VM"
type = list(any)
default = ["test"]
}

View File

@@ -0,0 +1,63 @@
module "pve_vm" {
source = "../../modules/pve_vm"
for_each = local.vm_list
node_name = each.value.node_name
vm_name = each.value.vm_name
vm_cpu = each.value.vm_cpu
vm_ram = each.value.vm_ram
vm_vlan = each.value.vm_vlan
vm_tags = var.vm_tags
}
locals {
all_nodes = data.proxmox_virtual_environment_nodes.pve_nodes.names
selected_nodes = var.multi_node_deployment == false ? [var.target_node] : local.all_nodes
env_digit_map = {
"test" = 1
"lab" = 2
"dev" = 3
"val" = 4
"prod" = 5
}
env_digit = lookup(local.env_digit_map, var.vm_env, 0)
vm_list = {
for vm in flatten([
for node in local.selected_nodes : [
for role, config in var.vm_attr : {
node_name = node
vm_name = "${role}-${var.vm_env}-${node}"
vm_name = "kub-${substr(role, 0, 1)}${local.env_digit}${substr(node, 0, 1)}"
vm_cpu = config.cpu
vm_ram = config.ram
vm_vlan = config.vlan
vm_role = role
}
]
]) : vm.vm_name => vm
}
roles = toset([for vm in local.vm_list : vm.vm_role])
}
data "proxmox_virtual_environment_nodes" "pve_nodes" {}
output "vm_ip" {
value = { for k, v in module.pve_vm : k => v.vm_ip }
}
resource "ansible_group" "vm_groups" {
for_each = local.roles
name = each.key
}
resource "ansible_host" "vm_hosts" {
for_each = module.pve_vm
name = each.key
variables = {
ansible_host = each.value.vm_ip
}
groups = [local.vm_list[each.key].vm_role]
}

View File

@@ -0,0 +1,21 @@
terraform {
required_providers {
proxmox = {
source = "bpg/proxmox"
}
ansible = {
source = "ansible/ansible"
}
}
}
provider "proxmox" {
endpoint = var.proxmox_endpoint
api_token = var.proxmox_api_token
insecure = false
ssh {
agent = false
private_key = file("~/.ssh/id_ed25519")
username = "root"
}
}

View File

@@ -0,0 +1,46 @@
variable "proxmox_endpoint" {
description = "Proxmox URL endpoint"
type = string
}
variable "proxmox_api_token" {
description = "Proxmox API token"
type = string
sensitive = true
}
variable "multi_node_deployment" {
description = "true : deploy VMs on each node, false : deploy only on a given node"
type = bool
default = true
}
variable "target_node" {
description = "Node which host the VM if multi_node_deployment = false"
type = string
default = ""
}
variable "vm_attr" {
description = "VM attributes"
type = map(object({
ram = number
cpu = number
vlan = number
}))
default = {
"vm" = { ram = 2048, cpu = 2, vlan = 66 }
}
}
variable "vm_env" {
description = "VM environment"
type = string
default = "test"
}
variable "vm_tags" {
description = "Tags for the VM"
type = list(any)
default = ["test"]
}