.
This commit is contained in:
parent
10cc64ab5c
commit
44bf0fd4a7
5
salt/states/packages/init.sls
Normal file
5
salt/states/packages/init.sls
Normal file
@ -0,0 +1,5 @@
|
||||
Install common packages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- vim
|
||||
- jq
|
212
salt/states/tlu-harvester/files/shutdown_harvester
Executable file
212
salt/states/tlu-harvester/files/shutdown_harvester
Executable file
@ -0,0 +1,212 @@
|
||||
#!/usr/bin/env python3
|
||||
import curses
|
||||
import subprocess
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
COLOR_STANDARD = 1
|
||||
COLOR_VM_OFF = 2
|
||||
COLOR_VM_PAUSE = 3
|
||||
COLOR_VM_RUNNING = 4
|
||||
COLOR_CORDONED = 5
|
||||
COLOR_ACTIVE = 6
|
||||
COLOR_ALERT = 7
|
||||
|
||||
def _exec(cmd):
|
||||
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = process.communicate()
|
||||
if process.returncode == 0:
|
||||
return json.loads(stdout)
|
||||
else:
|
||||
sys.exit(stderr)
|
||||
|
||||
|
||||
def get_vms():
|
||||
return _exec("kubectl get vms -A -o json".split())
|
||||
|
||||
def stop_vm(name, ns):
|
||||
patch = json.dumps({"spec":{"running": False}})
|
||||
cmd = f"kubectl -n {ns} -o json patch vm {name} --type merge -p".split()
|
||||
cmd.append(json.dumps({"spec":{"running": False}}))
|
||||
_exec(cmd)
|
||||
|
||||
def ping(host):
|
||||
cmd = f"ping -c 1 {host}".split()
|
||||
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = process.communicate()
|
||||
if process.returncode == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def update_vm_status(stdscr, vms):
|
||||
|
||||
y_pos = 3
|
||||
for item in vms['items']:
|
||||
if item['status']['printableStatus'] == "Running" or item['status']['printableStatus'] == "Starting":
|
||||
stdscr.attron(curses.color_pair(COLOR_VM_RUNNING))
|
||||
elif item['status']['printableStatus'] == "Paused":
|
||||
stdscr.attron(curses.color_pair(COLOR_VM_PAUSE))
|
||||
else:
|
||||
stdscr.attron(curses.color_pair(COLOR_VM_OFF))
|
||||
if "reason" in item['status']['conditions'][0] and item['status']['conditions'][0]['reason'] == "PodTerminating" or item['status']['printableStatus'] == "Starting":
|
||||
stdscr.attron(curses.A_BLINK)
|
||||
else:
|
||||
stdscr.attroff(curses.A_BLINK)
|
||||
if "reason" in item['status']['conditions'][0] and item['status']['conditions'][0]['reason'] == "PodTerminating":
|
||||
status = "Stopping"
|
||||
else:
|
||||
status=item['status']['printableStatus']
|
||||
stdscr.addstr(y_pos, 3, status.ljust(12))
|
||||
stdscr.attroff(curses.A_BLINK)
|
||||
stdscr.attron(curses.color_pair(COLOR_STANDARD))
|
||||
stdscr.addstr(y_pos, 15, item['metadata']['namespace'])
|
||||
stdscr.addstr(y_pos, 40, item['metadata']['name'])
|
||||
|
||||
y_pos = y_pos +1
|
||||
|
||||
stdscr.refresh()
|
||||
|
||||
def update_node_status(stdscr, nodes):
|
||||
y_pos = 3
|
||||
for item in nodes['items']:
|
||||
hostname = ""
|
||||
ip = ""
|
||||
status = ""
|
||||
kubelet = ""
|
||||
pingresult = ""
|
||||
for address in item['status']['addresses']:
|
||||
if address['type'] == 'InternalIP':
|
||||
ip = address['address']
|
||||
elif address['type'] == 'Hostname':
|
||||
hostname = address['address']
|
||||
|
||||
|
||||
if ping(hostname):
|
||||
pingresult = "Ok"
|
||||
stdscr.attron(curses.color_pair(COLOR_ACTIVE))
|
||||
else:
|
||||
pingresult = "no"
|
||||
stdscr.attron(curses.color_pair(COLOR_ALERT))
|
||||
stdscr.addstr(y_pos, 2, pingresult.ljust(8))
|
||||
if item['status']['conditions'][-1]['status'] == "Unknown":
|
||||
kubelet = "Unknown"
|
||||
stdscr.attron(curses.color_pair(COLOR_ALERT))
|
||||
else:
|
||||
kubelet = "Ok"
|
||||
stdscr.attron(curses.color_pair(COLOR_ACTIVE))
|
||||
stdscr.addstr(y_pos, 10, kubelet.ljust(8))
|
||||
|
||||
if "unschedulable" in item['spec'] and item['spec']['unschedulable']:
|
||||
state = "Cordoned"
|
||||
stdscr.attron(curses.color_pair(COLOR_CORDONED))
|
||||
else:
|
||||
state = "Active"
|
||||
stdscr.attron(curses.color_pair(COLOR_ACTIVE))
|
||||
stdscr.addstr(y_pos, 20, state.ljust(8))
|
||||
stdscr.attron(curses.color_pair(COLOR_STANDARD))
|
||||
stdscr.addstr(y_pos, 30, hostname)
|
||||
stdscr.addstr(y_pos, 45, ip)
|
||||
y_pos = y_pos +1
|
||||
|
||||
|
||||
stdscr.refresh()
|
||||
|
||||
|
||||
def cordon_nodes(nodes):
|
||||
for item in nodes['items']:
|
||||
cmd = f"kubectl cordon {item['metadata']['name']}".split()
|
||||
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = process.communicate()
|
||||
if process.returncode == 0:
|
||||
pass
|
||||
else:
|
||||
sys.exit(stderr)
|
||||
|
||||
def shutdown_nodes(nodes):
|
||||
for item in nodes['items']:
|
||||
if ping(item['metadata']['name']):
|
||||
cmd = f"ssh {item['metadata']['name']} sudo shutdown -h now".split()
|
||||
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = process.communicate()
|
||||
|
||||
def main(stdscr):
|
||||
stdscr.clear()
|
||||
stdscr.refresh()
|
||||
|
||||
curses.curs_set(0)
|
||||
curses.start_color()
|
||||
curses.init_pair(COLOR_STANDARD, curses.COLOR_WHITE, curses.COLOR_BLACK)
|
||||
curses.init_pair(COLOR_VM_OFF, curses.COLOR_RED, curses.COLOR_BLACK)
|
||||
curses.init_pair(COLOR_VM_PAUSE, curses.COLOR_YELLOW, curses.COLOR_BLACK)
|
||||
curses.init_pair(COLOR_VM_RUNNING, curses.COLOR_GREEN, curses.COLOR_BLACK)
|
||||
curses.init_pair(COLOR_CORDONED, curses.COLOR_WHITE, curses.COLOR_CYAN)
|
||||
curses.init_pair(COLOR_ACTIVE, curses.COLOR_WHITE, curses.COLOR_GREEN)
|
||||
curses.init_pair(COLOR_ALERT, curses.COLOR_WHITE, curses.COLOR_RED)
|
||||
|
||||
|
||||
stdscr.addstr(0, 0, "Stopping all VMs", curses.A_BOLD)
|
||||
running_vms = True
|
||||
vms = get_vms()
|
||||
stdscr.addstr(2, 3, "State", curses.A_UNDERLINE)
|
||||
stdscr.addstr(2, 15, "Namespace", curses.A_UNDERLINE)
|
||||
stdscr.addstr(2, 40, "Name", curses.A_UNDERLINE)
|
||||
update_vm_status(stdscr, vms)
|
||||
for item in vms['items']:
|
||||
stop_vm(item['metadata']['name'], item['metadata']['namespace'])
|
||||
|
||||
while running_vms:
|
||||
vms = get_vms()
|
||||
update_vm_status(stdscr, vms)
|
||||
running_vms = False
|
||||
for item in vms['items']:
|
||||
if item['spec']['running']:
|
||||
running_vms = True
|
||||
break
|
||||
if "reason" in item['status']['conditions'][0] and item['status']['conditions'][0]['reason'] == "PodTerminating":
|
||||
running_vms = True
|
||||
break
|
||||
|
||||
time.sleep(0.5)
|
||||
time.sleep(0.5)
|
||||
|
||||
stdscr.clear()
|
||||
stdscr.refresh()
|
||||
stdscr.addstr(0, 0, "Cordon all nodes", curses.A_BOLD)
|
||||
stdscr.addstr(2, 2, "Ping", curses.A_UNDERLINE)
|
||||
stdscr.addstr(2, 10, "kubelet", curses.A_UNDERLINE)
|
||||
stdscr.addstr(2, 20, "State", curses.A_UNDERLINE)
|
||||
stdscr.addstr(2, 30, "Name", curses.A_UNDERLINE)
|
||||
stdscr.addstr(2, 45, "Host IP", curses.A_UNDERLINE)
|
||||
nodes = _exec("kubectl get nodes -o json".split())
|
||||
update_node_status(stdscr, nodes)
|
||||
uncorded_nodes = True
|
||||
cordon_nodes(nodes)
|
||||
while uncorded_nodes:
|
||||
nodes = _exec("kubectl get nodes -o json".split())
|
||||
update_node_status(stdscr, nodes)
|
||||
uncorded_nodes = False
|
||||
for item in nodes['items']:
|
||||
if not "unschedulable" in item['spec'] or not item['spec']['unschedulable']:
|
||||
uncorded_nodes = True
|
||||
|
||||
stdscr.addstr(0, 0, "Shutting down all hosts", curses.A_BOLD)
|
||||
shutdown_nodes(nodes)
|
||||
nodes_up = True
|
||||
while nodes_up:
|
||||
update_node_status(stdscr, nodes)
|
||||
nodes_up = False
|
||||
for item in nodes['items']:
|
||||
if ping(item['metadata']['name']):
|
||||
nodes_up = True
|
||||
break
|
||||
time.sleep(2)
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
curses.wrapper(main)
|
||||
|
@ -2,3 +2,11 @@ include:
|
||||
- tlu-harvester.pxe
|
||||
- tlu-harvester.manifests
|
||||
- tlu-harvester.images
|
||||
|
||||
Copy shutdown_harvester script:
|
||||
file.managed:
|
||||
- name: /home/{{ pillar['username'] }}/bin/shutdown_harvester
|
||||
- source: salt://tlu-harvester/files/shutdown_harvester
|
||||
- user: {{ pillar['username'] }}
|
||||
- group: users
|
||||
- mode: "0755"
|
||||
|
@ -1,5 +1,6 @@
|
||||
base:
|
||||
'*':
|
||||
- packages
|
||||
- hosts
|
||||
- vlan
|
||||
- hostapd
|
||||
|
Loading…
Reference in New Issue
Block a user