Added alpine
This commit is contained in:
46
ubuntu/Vagrantfile
vendored
Normal file
46
ubuntu/Vagrantfile
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
# Configuració Global
|
||||
NUM_WORKER_NODES = 2
|
||||
IP_NW = "10.0.3." # Use the 10.0.3.x subnet for eth1
|
||||
IP_controlplane = "10.0.3.15" # Control plane IP for eth1
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
|
||||
config.vm.define "controlplane" do |controlplane|
|
||||
controlplane.vm.box = "bento/ubuntu-24.04"
|
||||
controlplane.vm.network "private_network", ip: IP_controlplane # eth1 IP
|
||||
controlplane.vm.hostname = "controlplane"
|
||||
controlplane.vm.synced_folder ".", "/home/vagrant/sync", type: "rsync"
|
||||
controlplane.vm.provision :shell, :path => "k3s_control.sh"
|
||||
|
||||
# Forward necessary ports
|
||||
controlplane.vm.network :forwarded_port, guest: 80, host: 80
|
||||
controlplane.vm.network :forwarded_port, guest: 8080, host: 8080
|
||||
controlplane.vm.network :forwarded_port, guest: 6443, host: 6443
|
||||
controlplane.vm.network :forwarded_port, guest: 8472, host: 8472 # Flannel
|
||||
controlplane.vm.network :forwarded_port, guest: 10250, host: 10250 # Kubelet
|
||||
controlplane.vm.network :forwarded_port, guest: 10256, host: 10256 # Kube Proxy
|
||||
|
||||
# Forward NodePort range
|
||||
for i in 30000..32767
|
||||
controlplane.vm.network :forwarded_port, guest: i, host: i
|
||||
end
|
||||
|
||||
controlplane.vm.provider "virtualbox" do |vb|
|
||||
vb.memory = 2048 # Memoria RAM asignada
|
||||
vb.cpus = 2 # Número de CPUs asignades
|
||||
end
|
||||
end
|
||||
|
||||
# --- Configuració dels WORKERS ---
|
||||
(1..NUM_WORKER_NODES).each do |i|
|
||||
config.vm.define "node0#{i}" do |node|
|
||||
node.vm.box = "bento/ubuntu-24.04"
|
||||
node.vm.network "private_network", ip: IP_NW + "#{15 + i}" # eth1 IPs (10.0.3.16 and 10.0.3.17)
|
||||
node.vm.hostname = "worker-node0#{i}"
|
||||
node.vm.provider "virtualbox" do |vb|
|
||||
vb.memory = 1024 # 1GB per worker
|
||||
vb.cpus = 1
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
14
ubuntu/k3s_control.sh
Normal file
14
ubuntu/k3s_control.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
echo "🚀 Instal·lant K3s Server (Master)..."
|
||||
curl -sfL https://get.k3s.io | sh -s - --node-ip 10.0.3.15
|
||||
#curl -sfL https://get.k3s.io | sh -
|
||||
sudo chown vagrant:vagrant /etc/rancher/k3s/k3s.yaml
|
||||
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
|
||||
|
||||
kubectl version
|
||||
kubectl get nodes
|
||||
kubectl get pods -A
|
||||
|
||||
curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash
|
||||
sudo mv kustomize /usr/local/bin
|
||||
|
||||
echo "✅ Master llest!"
|
||||
30
ubuntu/k3s_worker.sh
Normal file
30
ubuntu/k3s_worker.sh
Normal file
@@ -0,0 +1,30 @@
|
||||
# Aquest script s'ha de fer servir amb el node agent ja provisionat
|
||||
# Anem a buscar el node token al Master
|
||||
# sudo cat /var/lib/rancher/k3s/server/node-token
|
||||
# Això dóna un string com algun_string_random::server:segon_string_random
|
||||
|
||||
# NOTA: Executar les següents comandes als nodes que voleu que siguin workers
|
||||
# K3S_TOKEN=algun_string_random::server:segon_string_random
|
||||
# Ex: K3S_TOKEN=K108085cf047fe22375b5c2ac96fec70afffd8a36fa66e8066f0218233e97c8ce6b::server:f3e19f5b6ed1c93e4c1bf0992bb5dc68
|
||||
# Quan ja tinguem els nodes preparats i penjats
|
||||
# Recordar que 10.0.3.15 és la IP del controlplane
|
||||
echo "🚀 Instal·lant K3s Agent (Worker) connectant a 10.0.3.15..."
|
||||
|
||||
curl -sfL https://get.k3s.io | K3S_URL=https://10.0.3.15:6443 K3S_TOKEN=$K3S_TOKEN sh -
|
||||
|
||||
echo "✅ Worker unit al clúster!"
|
||||
|
||||
|
||||
#Executar això al Controplane, per tenir-los amb role de worker
|
||||
#kubectl label node worker-node01 node-role.kubernetes.io/worker=worker
|
||||
#kubectl label node worker-node02 node-role.kubernetes.io/worker=worker
|
||||
#Desactivar que al node controlplane es puguin executar pods
|
||||
#kubectl taint nodes controlplane node-role.kubernetes.io/master=true:NoSchedule
|
||||
|
||||
#Estabilitzar les IPS
|
||||
# Al Control Plane
|
||||
# curl -sfL https://get.k3s.io | sh -s - --node-ip 10.0.3.15 --flannel-iface eth1
|
||||
# Al Worker01
|
||||
# curl -sfL https://get.k3s.io | K3S_URL=https://10.0.3.15:6443 K3S_TOKEN=$K3S_TOKEN sh -s - --node-ip 10.0.3.16
|
||||
# Al Worker02
|
||||
# curl -sfL https://get.k3s.io | K3S_URL=https://10.0.3.15:6443 K3S_TOKEN=$K3S_TOKEN sh -s - --node-ip 10.0.3.17
|
||||
41
ubuntu/libvirtVagrantfile
Normal file
41
ubuntu/libvirtVagrantfile
Normal file
@@ -0,0 +1,41 @@
|
||||
#Passo el Vagrantfile per utilitzar amb libvit:
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Configuració Global
|
||||
NUM_WORKER_NODES = 2
|
||||
IP_NW = "192.168.3."
|
||||
IP_MASTER = "192.168.3.10" # Fixem la IP del Master per referenciar-la als workers
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.define "master" do |master|
|
||||
master.vm.box = "bento/ubuntu-24.04"
|
||||
master.vm.network "private_network", ip: "192.168.3.10"
|
||||
master.vm.hostname = "master"
|
||||
master.vm.synced_folder ".", "/home/vagrant/sync", type: "rsync"
|
||||
master.vm.provision :shell, path: "k3s_master.sh"
|
||||
master.vm.network :forwarded_port, guest: 6443, host: 6443
|
||||
master.vm.network :forwarded_port, guest: 80, host: 8080
|
||||
|
||||
master.vm.provider :libvirt do |v|
|
||||
v.memory = 2048
|
||||
v.cpus = 2
|
||||
v.graphics_type = "spice"
|
||||
end
|
||||
end
|
||||
|
||||
# --- Configuració dels WORKERS ---
|
||||
(1..NUM_WORKER_NODES).each do |i|
|
||||
config.vm.define "node0#{i}" do |node|
|
||||
node.vm.box = "bento/ubuntu-24.04"
|
||||
node.vm.hostname = "worker-node0#{i}"
|
||||
node.vm.network "private_network", ip: IP_NW + "#{10 + i}"
|
||||
|
||||
node.vm.provider :libvirt do |v|
|
||||
v.memory = 1024
|
||||
v.cpus = 1
|
||||
v.graphics_type = "spice"
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
Reference in New Issue
Block a user