Автоматизации развёртывания Kubernetes-кластера

This commit is contained in:
admin 2026-03-10 17:22:58 +03:00
commit 533f678ab7
19 changed files with 532 additions and 0 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
ssh

16
.terraform.lock.hcl generated Normal file
View File

@ -0,0 +1,16 @@
# This file is maintained automatically by "tofu init".
# Manual edits may be lost in future updates.
provider "registry.opentofu.org/bpg/proxmox" {
version = "0.98.1"
hashes = [
"h1:/3n9NevrIwRAI/0HOEbeO4uQfCCivjspG8pDgzhqOSU=",
]
}
provider "registry.opentofu.org/hashicorp/local" {
version = "2.7.0"
hashes = [
"h1:uWL9nhlxLY2xW3GMh8IFQv0S1UP3HMlN/B+2Nr7fsZE=",
]
}

View File

@ -0,0 +1 @@
{"Modules":[{"Key":"","Source":"","Dir":"."},{"Key":"cluster","Source":"./modules/k8s-node","Dir":"modules/k8s-node"}]}

View File

@ -0,0 +1 @@
/home/andy/.terraform.d/plugins/registry.opentofu.org/bpg/proxmox/0.98.1/linux_amd64

View File

@ -0,0 +1 @@
/home/andy/.terraform.d/plugins/registry.opentofu.org/hashicorp/local/2.7.0/linux_amd64

51
README.md Normal file
View File

@ -0,0 +1,51 @@
Что содержит проект
Модуль OpenTofu для создания Kubernetes-нод в Proxmox.
Генерацию cloud-init конфигураций.
Параметризованную выдачу VMID, IP, hostname.
Готовый root-конфиг, использующий модуль k8s-node.
Возможность добавлять и удалять конкретные ноды без count.
Подготовку виртуальных машин к установке Kubernetes.
Структура репозитория
├── README.md
├── locals.tf
├── main.tf
├── modules
│ └── k8s-node
│ ├── cloud-config
│ ├── locals.tf
│ ├── main.tf
│ ├── outputs.tf
│ └── variables.tf
├── outputs.tf
├── providers.tf
├── terraform.tfstate
├── terraform.tfstate.backup
├── terraform.tfvars
└── variables.tf
Быстрый старт
1. Инициализация проекта
tofu init
2. Проверка плана
tofu plan
3. Создание инфраструктуры
tofu apply
После выполнения этой команды Proxmox создаст виртуальные машины, сгенерирует userdata для cloud-init и развернёт требуемые ноды.
Управление нодами
Модуль принимает объект вида:
nodes = {
master1 = { role = "master", cpu = 2, memory = 4096 }
worker1 = { role = "worker", cpu = 2, memory = 4096 }
worker2 = { role = "worker", cpu = 2, memory = 4096 }
}
Вы можете:
Добавить новую ноду, просто вписав её в map.
Удалить ноду, удалив её ключ из map.
Иметь несколько кластеров, копируя модуль в разные окружения.

43
locals.tf Normal file
View File

@ -0,0 +1,43 @@
locals {
ssh_public_key = trimspace(file("./ssh/id_terraform.pub"))
nodes = {
master1 = {
role = "master"
cpu = var.master_cpu
memory = var.master_memory
disk = var.master_disk
datastore = var.master_datastore
ip_offset = var.master_ip_offset
}
worker1 = {
role = "worker"
cpu = var.worker_cpu
memory = var.worker_memory
disk = var.worker_disk
datastore = var.worker_datastore
ip_offset = var.worker_ip_offset
}
# worker2 = {
# role = "worker"
# cpu = var.worker_cpu
# memory = var.worker_memory
# disk = var.worker_disk
# datastore = var.worker_datastore
# ip_offset = var.worker_ip_offset
# }
worker3 = {
role = "worker"
cpu = var.worker_cpu
memory = var.worker_memory
disk = var.worker_disk
datastore = var.worker_datastore
ip_offset = var.worker_ip_offset
}
}
}

27
main.tf Normal file
View File

@ -0,0 +1,27 @@
data "local_file" "ssh_key" {
filename = pathexpand("./ssh/id_terraform.pub")
}
module "cluster" {
source = "./modules/k8s-node"
nodes = local.nodes
ssh_key = trimspace(data.local_file.ssh_key.content)
hostname_prefix = var.hostname_prefix
cluster_ip_start = var.cluster_ip_start
master_vmid_start = var.master_vmid_start
worker_vmid_start = var.worker_vmid_start
cloudinit_datastore = var.cloudinit_datastore
proxmox_node = var.proxmox_node
node_bridge = var.node_bridge
image_datastore = var.image_datastore
image_file = var.image_file
disk_interface = var.disk_interface
network_base = var.network_base
network_cidr = var.network_cidr
cluster_gateway = var.cluster_gateway
}

View File

@ -0,0 +1,20 @@
#cloud-config
timezone: Europe/Moscow
users:
- default
- name: ubuntu
groups: [sudo]
shell: /bin/bash
ssh_authorized_keys:
- ${ssh_key}
package_update: true
packages:
- qemu-guest-agent
runcmd:
- systemctl enable --now qemu-guest-agent
- hostnamectl set-hostname ${hostname}

View File

@ -0,0 +1,58 @@
locals {
# ssh-ключ приходит снаружи, файл не читаем
ssh_public_key = var.ssh_key
# Разделяем ноды по ролям
masters = {
for name, node in var.nodes :
name => node if node.role == "master"
}
workers = {
for name, node in var.nodes :
name => node if node.role == "worker"
}
# Даём каждой ноде индекс внутри своей роли (master1, master2, worker1...)
# Индекс определяется по отсортированным именам, чтобы был стабильным.
indexed_masters = {
for name, node in local.masters :
name => merge(node, {
index = index(sort(keys(local.masters)), name) + 1
})
}
indexed_workers = {
for name, node in local.workers :
name => merge(node, {
index = index(sort(keys(local.workers)), name) + 1
})
}
# Общая карта нод
nodes = merge(local.indexed_masters, local.indexed_workers)
# IP-адреса
ip_map = {
for name, node in local.nodes :
name => var.cluster_ip_start + node.ip_offset + node.index
}
# VMID: разные диапазоны для master/worker
vmid_map = {
for name, node in local.nodes :
name => (
node.role == "master"
? var.master_vmid_start + node.index
: var.worker_vmid_start + node.index
)
}
# hostname: prefix-role-index (k8s-master-1, k8s-worker-2 и т.п.)
hostname_map = {
for name, node in local.nodes :
name => "${var.hostname_prefix}-${node.role}-${node.index}"
}
}

71
modules/k8s-node/main.tf Normal file
View File

@ -0,0 +1,71 @@
terraform {
required_providers {
proxmox = {
source = "registry.opentofu.org/bpg/proxmox"
}
}
}
#Создание cloud-init для каждой ноды
resource "proxmox_virtual_environment_file" "cloudinit" {
for_each = local.nodes
content_type = "snippets"
datastore_id = var.cloudinit_datastore
node_name = var.proxmox_node
source_raw {
file_name = "${each.key}.yml"
data = templatefile(
"${path.module}/cloud-config/node-base.yml",
{
hostname = local.hostname_map[each.key]
ssh_key = local.ssh_public_key
}
)
}
}
# Создание виртуальных машин
resource "proxmox_virtual_environment_vm" "nodes" {
for_each = local.nodes
name = local.hostname_map[each.key]
node_name = var.proxmox_node
vm_id = local.vmid_map[each.key]
agent {
enabled = true
}
cpu {
cores = each.value.cpu
}
memory {
dedicated = each.value.memory
}
network_device {
bridge = var.node_bridge
}
disk {
datastore_id = each.value.datastore
import_from = "${var.image_datastore}:${var.image_file}"
interface = var.disk_interface
size = each.value.disk
}
initialization {
datastore_id = each.value.datastore
user_data_file_id = proxmox_virtual_environment_file.cloudinit[each.key].id
ip_config {
ipv4 {
address = "${var.network_base}.${local.ip_map[each.key]}/${var.network_cidr}"
gateway = var.cluster_gateway
}
}
}
}

View File

@ -0,0 +1,17 @@
output "ip_addresses" {
description = "IP addresses of all created nodes"
value = {
for name, _ in local.nodes :
name => proxmox_virtual_environment_vm.nodes[name].ipv4_addresses[1][0]
}
}
output "hostnames" {
description = "Hostnames of all created nodes"
value = local.hostname_map
}
output "vmids" {
description = "VMIDs of all created nodes"
value = local.vmid_map
}

View File

@ -0,0 +1,66 @@
variable "ssh_key" {
type = string
}
variable "nodes" {
type = map(object({
role = string
cpu = number
memory = number
disk = number
datastore = string
ip_offset = number
}))
}
variable "hostname_prefix" {
type = string
}
variable "cluster_ip_start" {
type = number
}
variable "master_vmid_start" {
type = number
}
variable "worker_vmid_start" {
type = number
}
variable "cloudinit_datastore" {
type = string
}
variable "proxmox_node" {
type = string
}
variable "node_bridge" {
type = string
}
variable "image_datastore" {
type = string
}
variable "image_file" {
type = string
}
variable "disk_interface" {
type = string
}
variable "network_base" {
type = string
}
variable "network_cidr" {
type = number
}
variable "cluster_gateway" {
type = string
}

11
outputs.tf Normal file
View File

@ -0,0 +1,11 @@
output "nodes_ipv4" {
value = module.cluster.ip_addresses
}
output "nodes_hostnames" {
value = module.cluster.hostnames
}
output "nodes_vmid" {
value = module.cluster.vmids
}

20
providers.tf Normal file
View File

@ -0,0 +1,20 @@
terraform {
required_providers {
proxmox = {
source = "registry.opentofu.org/bpg/proxmox"
}
}
}
provider "proxmox" {
endpoint = var.proxmox_endpoint
api_token = "${var.proxmox_token_id}=${var.proxmox_token_secret}"
insecure = true
ssh {
agent = true
username = "root"
private_key = file("./ssh/id_terraform")
}
}

24
terraform.tfvars Normal file
View File

@ -0,0 +1,24 @@
proxmox_endpoint = "https://185.78.29.7:8006/api2/json"
proxmox_token_id = "terraform@pve!tf"
proxmox_token_secret = "API_TOKEN"
master_vmid_start = 4000
worker_vmid_start = 4010
master_cpu = 2
master_memory = 2048
master_disk = 20
master_datastore = "local"
worker_cpu = 2
worker_memory = 4096
worker_disk = 30
worker_datastore = "local"
image_datastore = "local"
image_file = "import/ubuntu-24.qcow2"
cluster_gateway = "10.10.10.1"
network_base = "10.10.10"
network_cidr = "24"
cluster_ip_start = 40

103
variables.tf Normal file
View File

@ -0,0 +1,103 @@
variable "proxmox_endpoint" {}
variable "proxmox_token_id" {}
variable "proxmox_token_secret" {}
variable "proxmox_node" {
type = string
default = "px"
}
variable "cloudinit_datastore" {
type = string
default = "local"
}
variable "disk_interface" {
type = string
default = "virtio0"
}
variable "image_datastore" {
type = string
default = "local"
}
variable "image_file" {
type = string
default = "import/ubuntu-24.qcow2"
}
variable "hostname_prefix" {
type = string
default = "k8s"
}
variable "master_cpu" {
default = 2
}
variable "worker_cpu" {
default = 2
}
variable "master_memory" {
default = 4096
}
variable "worker_memory" {
default = 4096
}
variable "master_disk" {
default = 20
}
variable "worker_disk" {
default = 20
}
variable "network_base" {
default = "10.10.10"
}
variable "network_cidr" {
default = "24"
}
variable "cluster_gateway" {
default = "10.10.10.1"
}
variable "cluster_ip_start" {
default = 40
}
variable "master_ip_offset" {
default = 0
}
variable "worker_ip_offset" {
default = 5
}
variable "node_bridge" {
default = "vmbr1"
}
variable "master_datastore" {
type = string
default = "local"
}
variable "worker_datastore" {
type = string
default = "local"
}
variable "master_vmid_start" {
type = number
default = 2000
}
variable "worker_vmid_start" {
type = number
default = 2010
}