Merge pull request 'Secret mgmt problem' (#767) from Qubasa-main into main
All checks were successful
checks-impure / test (push) Successful in 1m40s
checks / test (push) Successful in 2m25s

This commit is contained in:
clan-bot 2024-01-29 08:15:44 +00:00
commit 549a5800a5
5 changed files with 92 additions and 76 deletions

View File

@ -35,6 +35,7 @@ class Machine:
self.deployment_info = json.loads(
self.build_nix("config.system.clan.deployment.file").read_text()
)
print(f"self_deployment_info: {self.deployment_info}")
@property
def deployment_address(self) -> str:
@ -46,6 +47,7 @@ class Machine:
def secrets_module(self) -> str:
if not hasattr(self, "deployment_info"):
self.get_deployment_info()
print(f"self_deployment_info2: {self.deployment_info}")
return self.deployment_info["secretsModule"]
@property
@ -77,7 +79,8 @@ class Machine:
return Path(self.flake_path)
print(nix_eval([f"{self.flake}"]))
self.flake_path = run(nix_eval([f"{self.flake}"])).stdout.strip()
print(f"self.flake:{self.flake}. Type: {type(self.flake)}")
self.flake_path = run(nix_eval([f"{self.flake} "])).stdout.strip()
return Path(self.flake_path)
@property
@ -95,6 +98,7 @@ class Machine:
system = config["system"]
attr = f'clanInternals.machines."{system}".{self.name}.{attr}'
print(f"attr: {attr}")
if attr in self.eval_cache and not refresh:
return self.eval_cache[attr]
@ -107,8 +111,10 @@ class Machine:
else:
flake = self.flake
log.info(f"evaluating {flake}#{attr}")
output = run(nix_eval([f"{flake}#{attr}"])).stdout.strip()
print(f"evaluating {flake}#{attr}")
cmd = nix_eval([f"{flake}#{attr}"])
print(f"cmd: {cmd}")
output = run(cmd).stdout.strip()
self.eval_cache[attr] = output
return output

View File

@ -19,7 +19,7 @@
"**/.mypy_cache": true,
"**/.reports": true,
"**/.ruff_cache": true,
"**/result": true,
"**/result/**": true,
"/nix/store/**": true
},
"search.exclude": {
@ -29,6 +29,7 @@
"**/.mypy_cache": true,
"**/.reports": true,
"**/.ruff_cache": true,
"**/result/": true,
"/nix/store/**": true
},
"files.autoSave": "off"

View File

@ -18,8 +18,7 @@ from collections.abc import Callable
# Kill the new process and all its children by sending a SIGTERM signal to the process group
def _kill_group(proc: mp.Process) -> None:
pid = proc.pid
assert pid is not None
if proc.is_alive():
if proc.is_alive() and pid:
os.killpg(pid, signal.SIGTERM)
else:
print(f"Process {proc.name} with pid {pid} is already dead", file=sys.stderr)

View File

@ -1,7 +1,5 @@
import sys
import tempfile
import weakref
from collections.abc import Callable
from pathlib import Path
from typing import Any, ClassVar
@ -18,16 +16,20 @@ from clan_vm_manager.models.interfaces import VMStatus
from .executor import MPProcess, spawn
gi.require_version("Gtk", "4.0")
import logging
import multiprocessing as mp
import threading
from clan_cli.machines.machines import Machine
from gi.repository import Gio, GLib, GObject
log = logging.getLogger(__name__)
class VM(GObject.Object):
# Define a custom signal with the name "vm_stopped" and a string argument for the message
__gsignals__: ClassVar = {
"vm_started": (GObject.SignalFlags.RUN_FIRST, None, [GObject.Object]),
"vm_stopped": (GObject.SignalFlags.RUN_FIRST, None, [GObject.Object]),
"vm_status_changed": (GObject.SignalFlags.RUN_FIRST, None, [GObject.Object]),
}
def __init__(
@ -35,63 +37,78 @@ class VM(GObject.Object):
icon: Path,
status: VMStatus,
data: HistoryEntry,
process: MPProcess | None = None,
) -> None:
super().__init__()
self.data = data
self.process = process
self.process = MPProcess("dummy", mp.Process(), Path("./dummy"))
self._watcher_id: int = 0
self.status = status
self._last_liveness: bool = False
self.log_dir = tempfile.TemporaryDirectory(
prefix="clan_vm-", suffix=f"-{self.data.flake.flake_attr}"
)
self._finalizer = weakref.finalize(self, self.stop)
def start(self) -> None:
if self.process is not None:
show_error_dialog(ClanError("VM is already running"))
def __start(self) -> None:
if self.is_running():
log.warn("VM is already running")
return
vm = vms.run.inspect_vm(
flake_url=self.data.flake.flake_url, flake_attr=self.data.flake.flake_attr
machine = Machine(
name=self.data.flake.flake_attr,
flake=Path(self.data.flake.flake_url),
)
vm = vms.run.inspect_vm(machine)
self.process = spawn(
on_except=None,
log_dir=Path(str(self.log_dir.name)),
func=vms.run.run_vm,
vm=vm,
)
self.emit("vm_started", self)
GLib.timeout_add(50, self.vm_stopped_task)
def start_async(self) -> None:
threading.Thread(target=self.start).start()
def start(self) -> None:
if self.is_running():
log.warn("VM is already running")
return
threading.Thread(target=self.__start).start()
if self._watcher_id == 0:
# Every 50ms check if the VM is still running
self._watcher_id = GLib.timeout_add(50, self._vm_watcher_task)
if self._watcher_id == 0:
log.error("Failed to add watcher")
raise ClanError("Failed to add watcher")
def _vm_watcher_task(self) -> bool:
if self.is_running() != self._last_liveness:
self.emit("vm_status_changed", self)
prev_liveness = self._last_liveness
self._last_liveness = self.is_running()
# If the VM was running and now it is not, remove the watcher
if prev_liveness and not self.is_running():
return GLib.SOURCE_REMOVE
def vm_stopped_task(self) -> bool:
if not self.is_running():
self.emit("vm_stopped", self)
return GLib.SOURCE_REMOVE
return GLib.SOURCE_CONTINUE
def is_running(self) -> bool:
if self.process is not None:
return self.process.proc.is_alive()
return False
return self.process.proc.is_alive()
def get_id(self) -> str:
return f"{self.data.flake.flake_url}#{self.data.flake.flake_attr}"
def stop_async(self) -> None:
threading.Thread(target=self.stop).start()
def stop(self) -> None:
if self.process is None:
print("VM is already stopped", file=sys.stderr)
log.info("Stopping VM")
if not self.is_running():
log.error("VM already stopped")
return
self.process.kill_group()
self.process = None
def read_log(self) -> str:
if self.process is None:
if not self.process.out_file.exists():
log.error(f"Log file {self.process.out_file} does not exist")
return ""
return self.process.out_file.read_text()
@ -137,14 +154,6 @@ class VMS:
else:
self.refresh()
def handle_vm_stopped(self, func: Callable[[VM, VM], None]) -> None:
for vm in self.list_store:
vm.connect("vm_stopped", func)
def handle_vm_started(self, func: Callable[[VM, VM], None]) -> None:
for vm in self.list_store:
vm.connect("vm_started", func)
def get_running_vms(self) -> list[VM]:
return list(filter(lambda vm: vm.is_running(), self.list_store))

View File

@ -8,7 +8,7 @@ from clan_vm_manager.models.use_join import Join, JoinValue
from clan_vm_manager.models.use_views import Views
gi.require_version("Adw", "1")
from gi.repository import Adw, Gdk, Gio, GObject, Gtk
from gi.repository import Adw, Gio, GObject, Gtk
from clan_vm_manager.models.use_vms import VM, VMS
@ -45,10 +45,6 @@ class ClanList(Gtk.Box):
vms = VMS.use()
join = Join.use()
# TODO: Move this up to create_widget and connect every VM signal to its corresponding switch
vms.handle_vm_stopped(self.stopped_vm)
vms.handle_vm_started(self.started_vm)
self.join_boxed_list = create_boxed_list(
model=join.list_store, render_row=self.render_join_row
)
@ -77,10 +73,10 @@ class ClanList(Gtk.Box):
if not VMS.use().list_store.get_n_items():
self.vm_boxed_list.add_css_class("no-shadow")
def render_vm_row(self, boxed_list: Gtk.ListBox, item: VM) -> Gtk.Widget:
def render_vm_row(self, boxed_list: Gtk.ListBox, vm: VM) -> Gtk.Widget:
if boxed_list.has_css_class("no-shadow"):
boxed_list.remove_css_class("no-shadow")
flake = item.data.flake
flake = vm.data.flake
row = Adw.ActionRow()
# Title
@ -90,16 +86,16 @@ class ClanList(Gtk.Box):
row.set_title_selectable(True)
# Subtitle
row.set_subtitle(item.get_id())
row.set_subtitle(vm.get_id())
row.set_subtitle_lines(1)
# Avatar
avatar = Adw.Avatar()
avatar.set_custom_image(Gdk.Texture.new_from_filename(flake.icon))
avatar.set_text(flake.clan_name + " " + flake.flake_attr)
avatar.set_show_initials(True)
avatar.set_size(50)
row.add_prefix(avatar)
# # Avatar
# avatar = Adw.Avatar()
# avatar.set_custom_image(Gdk.Texture.new_from_filename(flake.icon))
# avatar.set_text(flake.clan_name + " " + flake.flake_attr)
# avatar.set_show_initials(True)
# avatar.set_size(50)
# row.add_prefix(avatar)
# Switch
switch = Gtk.Switch()
@ -107,7 +103,8 @@ class ClanList(Gtk.Box):
box.set_valign(Gtk.Align.CENTER)
box.append(switch)
switch.connect("notify::active", partial(self.on_row_toggle, item))
switch.connect("notify::active", partial(self.on_row_toggle, vm))
vm.connect("vm_status_changed", partial(self.vm_status_changed, switch))
row.add_suffix(box)
return row
@ -148,22 +145,17 @@ class ClanList(Gtk.Box):
return row
def started_vm(self, vm: VM, _vm: VM) -> None:
print("VM started", vm.data.flake.flake_attr)
def stopped_vm(self, vm: VM, _vm: VM) -> None:
print("VM stopped", vm.data.flake.flake_attr)
def show_error_dialog(self, error: str) -> None:
dialog = Gtk.MessageDialog(
parent=self.get_toplevel(),
modal=True,
message_type=Gtk.MessageType.ERROR,
buttons=Gtk.ButtonsType.OK,
text=error,
)
dialog.run()
dialog.destroy()
p = Views.use().main_window
# app = Gio.Application.get_default()
# p = Gtk.Application.get_active_window(app)
dialog = Adw.MessageDialog(heading="Error")
dialog.add_response("ok", "ok")
dialog.set_body(error)
dialog.set_transient_for(p) # set the parent window of the dialog
dialog.choose()
def on_trust_clicked(self, item: JoinValue, widget: Gtk.Widget) -> None:
def on_join(_history: list[HistoryEntry]) -> None:
@ -187,7 +179,16 @@ class ClanList(Gtk.Box):
print("Toggled", vm.data.flake.flake_attr, "active:", row.get_active())
if row.get_active():
vm.start_async()
row.set_state(False)
vm.start()
if not row.get_active():
vm.stop_async()
row.set_state(True)
vm.stop()
def vm_status_changed(self, switch: Gtk.Switch, vm: VM, _vm: VM) -> None:
switch.set_active(vm.is_running())
switch.set_state(vm.is_running())
if not vm.is_running() and vm.process.proc.exitcode != 0:
self.show_error_dialog(vm.read_log())