Added Vagrantfile without pods in the server
This commit is contained in:
79
ubuntu/Vagrantfile
vendored
79
ubuntu/Vagrantfile
vendored
@@ -1,46 +1,53 @@
|
||||
# Configuració Global
|
||||
NUM_WORKER_NODES = 2
|
||||
IP_NW = "10.0.3." # Use the 10.0.3.x subnet for eth1
|
||||
IP_controlplane = "10.0.3.15" # Control plane IP for eth1
|
||||
server_ip = "192.168.33.10"
|
||||
|
||||
agents = { "agent1" => "192.168.33.11",
|
||||
"agent2" => "192.168.33.12",
|
||||
"agent3" => "192.168.33.13" }
|
||||
|
||||
server_script = <<-SHELL
|
||||
export INSTALL_K3S_EXEC="--bind-address=#{server_ip} --node-external-ip=#{server_ip} --flannel-iface=eth1"
|
||||
curl -sfL https://get.k3s.io | sh -
|
||||
echo "Sleeping for 10 seconds to wait for k3s to start"
|
||||
sleep 10
|
||||
sudo chown vagrant:vagrant /etc/rancher/k3s/k3s.yaml
|
||||
export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
|
||||
cp /var/lib/rancher/k3s/server/token /vagrant_shared
|
||||
cp /etc/rancher/k3s/k3s.yaml /vagrant_shared
|
||||
sleep 10
|
||||
kubectl taint nodes server node-role.kubernetes.io/master=true:NoSchedule
|
||||
SHELL
|
||||
|
||||
agent_script = <<-SHELL
|
||||
export K3S_TOKEN_FILE=/vagrant_shared/token
|
||||
export K3S_URL=https://#{server_ip}:6443
|
||||
export INSTALL_K3S_EXEC="--flannel-iface=eth1"
|
||||
curl -sfL https://get.k3s.io | sh -
|
||||
SHELL
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
|
||||
config.vm.define "controlplane" do |controlplane|
|
||||
controlplane.vm.box = "bento/ubuntu-24.04"
|
||||
controlplane.vm.network "private_network", ip: IP_controlplane # eth1 IP
|
||||
controlplane.vm.hostname = "controlplane"
|
||||
controlplane.vm.synced_folder ".", "/home/vagrant/sync", type: "rsync"
|
||||
controlplane.vm.provision :shell, :path => "k3s_control.sh"
|
||||
|
||||
# Forward necessary ports
|
||||
controlplane.vm.network :forwarded_port, guest: 80, host: 80
|
||||
controlplane.vm.network :forwarded_port, guest: 8080, host: 8080
|
||||
controlplane.vm.network :forwarded_port, guest: 6443, host: 6443
|
||||
controlplane.vm.network :forwarded_port, guest: 8472, host: 8472 # Flannel
|
||||
controlplane.vm.network :forwarded_port, guest: 10250, host: 10250 # Kubelet
|
||||
controlplane.vm.network :forwarded_port, guest: 10256, host: 10256 # Kube Proxy
|
||||
config.vm.box = "bento/ubuntu-24.04"
|
||||
|
||||
# Forward NodePort range
|
||||
for i in 30000..32767
|
||||
controlplane.vm.network :forwarded_port, guest: i, host: i
|
||||
config.vm.define "server", primary: true do |server|
|
||||
server.vm.network "private_network", ip: server_ip
|
||||
server.vm.synced_folder "./shared", "/vagrant_shared"
|
||||
server.vm.hostname = "server"
|
||||
server.vm.provider "virtualbox" do |vb|
|
||||
vb.memory = "2048"
|
||||
vb.cpus = "2"
|
||||
end
|
||||
|
||||
controlplane.vm.provider "virtualbox" do |vb|
|
||||
vb.memory = 2048 # Memoria RAM asignada
|
||||
vb.cpus = 2 # Número de CPUs asignades
|
||||
end
|
||||
server.vm.provision "shell", inline: server_script
|
||||
end
|
||||
|
||||
# --- Configuració dels WORKERS ---
|
||||
(1..NUM_WORKER_NODES).each do |i|
|
||||
config.vm.define "node0#{i}" do |node|
|
||||
node.vm.box = "bento/ubuntu-24.04"
|
||||
node.vm.network "private_network", ip: IP_NW + "#{15 + i}" # eth1 IPs (10.0.3.16 and 10.0.3.17)
|
||||
node.vm.hostname = "worker-node0#{i}"
|
||||
node.vm.provider "virtualbox" do |vb|
|
||||
vb.memory = 1024 # 1GB per worker
|
||||
vb.cpus = 1
|
||||
agents.each do |agent_name, agent_ip|
|
||||
config.vm.define agent_name do |agent|
|
||||
agent.vm.network "private_network", ip: agent_ip
|
||||
agent.vm.synced_folder "./shared", "/vagrant_shared"
|
||||
agent.vm.hostname = agent_name
|
||||
agent.vm.provider "virtualbox" do |vb|
|
||||
vb.memory = "1024"
|
||||
vb.cpus = "1"
|
||||
end
|
||||
agent.vm.provision "shell", inline: agent_script
|
||||
end
|
||||
end
|
||||
end
|
||||
Reference in New Issue
Block a user