diff --git a/agent/cli.py b/agent/cli.py
index 97ba13c..16b9211 100644
--- a/agent/cli.py
+++ b/agent/cli.py
@@ -1,410 +1,410 @@
-from __future__ import annotations
-
-import json
-import os
-import shutil
-import sys
-from pathlib import Path
-from typing import TYPE_CHECKING
-
-import click
-import requests
-
-from agent.proxy import Proxy
-from agent.server import Server
-from agent.utils import get_timestamp
-
-if TYPE_CHECKING:
- from IPython.terminal.embed import InteractiveShellEmbed
-
-
-@click.group()
-def cli():
- pass
-
-
-@cli.group()
-def setup():
- pass
-
-
-@cli.command()
-@click.option("--restart-web-workers", default=True)
-@click.option("--restart-rq-workers", default=True)
-@click.option("--restart-redis", default=True)
-@click.option("--skip-repo-setup", default=False)
-@click.option("--skip-patches", default=False)
-def update(restart_web_workers, restart_rq_workers, restart_redis, skip_repo_setup, skip_patches):
- Server().update_agent_cli(
- restart_redis=restart_redis,
- restart_rq_workers=restart_rq_workers,
- restart_web_workers=restart_web_workers,
- skip_repo_setup=skip_repo_setup,
- skip_patches=skip_patches,
- )
-
-
-@cli.command()
-def run_patches():
- from agent.patch_handler import run_patches
-
- run_patches()
-
-
-@cli.command()
-@click.option("--password", required=True)
-def ping_server(password: str):
- """Ping web api on localhost and check for pong."""
- res = requests.get(
- "http://localhost:25052/ping",
- headers={"Authorization": f"bearer {password}"},
- )
- res = res.json()
- if res["message"] != "pong":
- raise Exception("pong not in response")
- print(res)
-
-
-@setup.command()
-@click.option("--name", required=True)
-@click.option("--user", default="jingrow")
-@click.option("--workers", required=True, type=int)
-@click.option("--proxy-ip", required=False, type=str, default=None)
-@click.option("--sentry-dsn", required=False, type=str)
-@click.option("--jcloud-url", required=False, type=str)
-def config(name, user, workers, proxy_ip=None, sentry_dsn=None, jcloud_url=None):
- config = {
- "benches_directory": f"/home/{user}/benches",
- "name": name,
- "tls_directory": f"/home/{user}/agent/tls",
- "nginx_directory": f"/home/{user}/agent/nginx",
- "redis_port": 25025,
- "user": user,
- "workers": workers,
- "gunicorn_workers": 2,
- "web_port": 25052,
- "jcloud_url": "https://jcloud.jingrow.com",
- }
- if jcloud_url:
- config["jcloud_url"] = jcloud_url
- if proxy_ip:
- config["proxy_ip"] = proxy_ip
- if sentry_dsn:
- config["sentry_dsn"] = sentry_dsn
-
- with open("config.json", "w") as f:
- json.dump(config, f, sort_keys=True, indent=4)
-
-
-@setup.command()
-def pyspy():
- privileges_line = "jingrow ALL = (root) NOPASSWD: /home/jingrow/agent/env/bin/py-spy"
- with open("/etc/sudoers.d/jingrow", "a+") as sudoers:
- sudoers.seek(0)
- lines = sudoers.read().splitlines()
-
- if privileges_line not in lines:
- sudoers.write(privileges_line + "\n")
-
-
-@setup.command()
-@click.option("--password", prompt=True, hide_input=True)
-def authentication(password):
- Server().setup_authentication(password)
-
-
-@setup.command()
-@click.option("--sentry-dsn", required=True)
-def sentry(sentry_dsn):
- Server().setup_sentry(sentry_dsn)
-
-
-@setup.command()
-def supervisor():
- Server().setup_supervisor()
-
-
-@setup.command()
-def nginx():
- Server().setup_nginx()
-
-
-@setup.command()
-@click.option("--domain")
-@click.option("--jcloud-url")
-def proxy(domain=None, jcloud_url=None):
- proxy = Proxy()
- if domain:
- config = proxy.get_config(for_update=True)
- config["domain"] = domain
- config["jcloud_url"] = jcloud_url
- proxy.set_config(config, indent=4)
- proxy.setup_proxy()
-
-
-@setup.command()
-@click.option("--domain")
-def standalone(domain=None):
- server = Server()
- if domain:
- config = server.get_config(for_update=True)
- config["domain"] = domain
- config["standalone"] = True
- server.set_config(config, indent=4)
-
-
-@setup.command()
-def database():
- from agent.job import JobModel, PatchLogModel, StepModel
- from agent.job import agent_database as database
-
- database.create_tables([JobModel, StepModel, PatchLogModel])
-
-
-@setup.command()
-def site_analytics():
- from crontab import CronTab
-
- script_directory = os.path.dirname(__file__)
- agent_directory = os.path.dirname(os.path.dirname(script_directory))
- logs_directory = os.path.join(agent_directory, "logs")
- script = os.path.join(script_directory, "analytics.py")
- stdout = os.path.join(logs_directory, "analytics.log")
- stderr = os.path.join(logs_directory, "analytics.error.log")
-
- cron = CronTab(user=True)
- command = f"cd {agent_directory} && {sys.executable} {script} 1>> {stdout} 2>> {stderr}"
-
- if command in str(cron):
- cron.remove_all(command=command)
-
- job = cron.new(command=command)
- job.hour.on(23)
- job.minute.on(0)
- cron.write()
-
-
-@setup.command()
-def usage():
- from crontab import CronTab
-
- script_directory = os.path.dirname(__file__)
- agent_directory = os.path.dirname(os.path.dirname(script_directory))
- logs_directory = os.path.join(agent_directory, "logs")
- script = os.path.join(script_directory, "usage.py")
- stdout = os.path.join(logs_directory, "usage.log")
- stderr = os.path.join(logs_directory, "usage.error.log")
-
- cron = CronTab(user=True)
- command = f"cd {agent_directory} && {sys.executable} {script} 1>> {stdout} 2>> {stderr}"
-
- if command not in str(cron):
- job = cron.new(command=command)
- job.every(6).hours()
- job.minute.on(30)
- cron.write()
-
-
-@setup.command()
-def nginx_defer_reload():
- from crontab import CronTab
-
- script_directory = os.path.dirname(__file__)
- agent_directory = os.path.dirname(os.path.dirname(script_directory))
- logs_directory = os.path.join(agent_directory, "logs")
- script = os.path.join(script_directory, "nginx_defer_reload.py")
- stdout = os.path.join(logs_directory, "nginx_defer_reload.log")
- stderr = os.path.join(logs_directory, "nginx_defer_reload.error.log")
-
- cron = CronTab(user=True)
- command = f"cd {agent_directory} && {sys.executable} {script} 1>> {stdout} 2>> {stderr}"
-
- if command not in str(cron):
- job = cron.new(command=command)
- job.minute.every(2)
- cron.write()
-
-
-@setup.command()
-def registry():
- Server().setup_registry()
-
-
-@setup.command()
-@click.option("--url", required=True)
-@click.option("--token", required=True)
-def monitor(url, token):
- from agent.monitor import Monitor
-
- server = Monitor()
- server.update_config({"monitor": True, "jcloud_url": url, "jcloud_token": token})
- server.discover_targets()
-
-
-@setup.command()
-def log():
- Server().setup_log()
-
-
-@setup.command()
-def analytics():
- Server().setup_analytics()
-
-
-@setup.command()
-def trace():
- Server().setup_trace()
-
-
-@setup.command()
-@click.option("--password", prompt=True, hide_input=True)
-def proxysql(password):
- Server().setup_proxysql(password)
-
-
-@cli.group()
-def run():
- pass
-
-
-@run.command()
-def web():
- executable = shutil.which("gunicorn")
- port = Server().config["web_port"]
- arguments = [
- executable,
- "--bind",
- f"127.0.0.1:{port}",
- "--reload",
- "--preload",
- "agent.web:application",
- ]
- os.execv(executable, arguments)
-
-
-@run.command()
-def worker():
- executable = shutil.which("rq")
- port = Server().config["redis_port"]
- arguments = [
- executable,
- "worker",
- "--url",
- f"redis://127.0.0.1:{port}",
- ]
- os.execv(executable, arguments)
-
-
-@cli.command()
-def discover():
- from agent.monitor import Monitor
-
- Monitor().discover_targets()
-
-
-@cli.group()
-def bench():
- pass
-
-
-@bench.command()
-@click.argument("bench", required=False)
-def start(bench):
- if bench:
- return Server().benches[bench].start()
- return Server().start_all_benches()
-
-
-@bench.command()
-@click.argument("bench", required=False)
-def stop(bench):
- if bench:
- return Server().benches[bench].stop()
- return Server().stop_all_benches()
-
-
-@cli.command(help="Run iPython console.")
-@click.option(
- "--config-path",
- required=False,
- type=str,
- help="Path to agent config.json.",
-)
-def console(config_path):
- from atexit import register
-
- from IPython.terminal.embed import InteractiveShellEmbed
-
- terminal = InteractiveShellEmbed.instance()
-
- config_dir = get_config_dir(config_path)
- if config_dir:
- try:
- locals()["server"] = Server(config_dir)
- print(f"In namespace:\nserver = agent.server.Server('{config_dir}')")
- except Exception:
- print(f"Could not initialize agent.server.Server('{config_dir}')")
-
- elif config_path:
- print(f"Could not find config.json at '{config_path}'")
- else:
- print("Could not find config.json use --config-path to specify")
-
- register(store_ipython_logs, terminal, config_dir)
-
- # ref: https://stackoverflow.com/a/74681224
- try:
- from IPython.core import ultratb
-
- ultratb.VerboseTB._tb_highlight = "bg:ansibrightblack"
- except Exception:
- pass
-
- terminal.colors = "neutral"
- terminal.display_banner = False
- terminal()
-
-
-def get_config_dir(config_path: str | None = None) -> str | None:
- cwd = os.getcwd()
- if config_path is None:
- config_path = cwd
-
- config_dir = Path(config_path)
-
- if config_dir.suffix == "json" and config_dir.exists():
- return config_dir.parent.as_posix()
-
- if config_dir.suffix != "":
- config_dir = config_dir.parent
-
- potential = [
- Path("/home/jingrow/agent/config.json"),
- config_dir / "config.json",
- config_dir / ".." / "config.json",
- ]
-
- for p in potential:
- if not p.exists():
- continue
- try:
- return p.parent.relative_to(cwd).as_posix()
- except Exception:
- return p.parent.as_posix()
- return None
-
-
-def store_ipython_logs(terminal: InteractiveShellEmbed, config_dir: str | None):
- if not config_dir:
- config_dir = os.getcwd()
-
- log_path = Path(config_dir) / "logs" / "agent_console.log"
- log_path.parent.mkdir(exist_ok=True)
-
- with log_path.open("a") as file:
- timestamp = get_timestamp()
-
- file.write(f"# SESSION BEGIN {timestamp}\n")
- for line in terminal.history_manager.get_range():
- file.write(f"{line[2]}\n")
- file.write(f"# SESSION END {timestamp}\n\n")
+from __future__ import annotations
+
+import json
+import os
+import shutil
+import sys
+from pathlib import Path
+from typing import TYPE_CHECKING
+
+import click
+import requests
+
+from agent.proxy import Proxy
+from agent.server import Server
+from agent.utils import get_timestamp
+
+if TYPE_CHECKING:
+ from IPython.terminal.embed import InteractiveShellEmbed
+
+
+@click.group()
+def cli():
+ pass
+
+
+@cli.group()
+def setup():
+ pass
+
+
+@cli.command()
+@click.option("--restart-web-workers", default=True)
+@click.option("--restart-rq-workers", default=True)
+@click.option("--restart-redis", default=True)
+@click.option("--skip-repo-setup", default=False)
+@click.option("--skip-patches", default=False)
+def update(restart_web_workers, restart_rq_workers, restart_redis, skip_repo_setup, skip_patches):
+ Server().update_agent_cli(
+ restart_redis=restart_redis,
+ restart_rq_workers=restart_rq_workers,
+ restart_web_workers=restart_web_workers,
+ skip_repo_setup=skip_repo_setup,
+ skip_patches=skip_patches,
+ )
+
+
+@cli.command()
+def run_patches():
+ from agent.patch_handler import run_patches
+
+ run_patches()
+
+
+@cli.command()
+@click.option("--password", required=True)
+def ping_server(password: str):
+ """Ping web api on localhost and check for pong."""
+ res = requests.get(
+ "http://localhost:25052/ping",
+ headers={"Authorization": f"bearer {password}"},
+ )
+ res = res.json()
+ if res["message"] != "pong":
+ raise Exception("pong not in response")
+ print(res)
+
+
+@setup.command()
+@click.option("--name", required=True)
+@click.option("--user", default="jingrow")
+@click.option("--workers", required=True, type=int)
+@click.option("--proxy-ip", required=False, type=str, default=None)
+@click.option("--sentry-dsn", required=False, type=str)
+@click.option("--jcloud-url", required=False, type=str)
+def config(name, user, workers, proxy_ip=None, sentry_dsn=None, jcloud_url=None):
+ config = {
+ "benches_directory": f"/home/{user}/benches",
+ "name": name,
+ "tls_directory": f"/home/{user}/agent/tls",
+ "nginx_directory": f"/home/{user}/agent/nginx",
+ "redis_port": 25025,
+ "user": user,
+ "workers": workers,
+ "gunicorn_workers": 2,
+ "web_port": 25052,
+ "jcloud_url": "https://cloud.jingrow.com",
+ }
+ if jcloud_url:
+ config["jcloud_url"] = jcloud_url
+ if proxy_ip:
+ config["proxy_ip"] = proxy_ip
+ if sentry_dsn:
+ config["sentry_dsn"] = sentry_dsn
+
+ with open("config.json", "w") as f:
+ json.dump(config, f, sort_keys=True, indent=4)
+
+
+@setup.command()
+def pyspy():
+ privileges_line = "jingrow ALL = (root) NOPASSWD: /home/jingrow/agent/env/bin/py-spy"
+ with open("/etc/sudoers.d/jingrow", "a+") as sudoers:
+ sudoers.seek(0)
+ lines = sudoers.read().splitlines()
+
+ if privileges_line not in lines:
+ sudoers.write(privileges_line + "\n")
+
+
+@setup.command()
+@click.option("--password", prompt=True, hide_input=True)
+def authentication(password):
+ Server().setup_authentication(password)
+
+
+@setup.command()
+@click.option("--sentry-dsn", required=True)
+def sentry(sentry_dsn):
+ Server().setup_sentry(sentry_dsn)
+
+
+@setup.command()
+def supervisor():
+ Server().setup_supervisor()
+
+
+@setup.command()
+def nginx():
+ Server().setup_nginx()
+
+
+@setup.command()
+@click.option("--domain")
+@click.option("--jcloud-url")
+def proxy(domain=None, jcloud_url=None):
+ proxy = Proxy()
+ if domain:
+ config = proxy.get_config(for_update=True)
+ config["domain"] = domain
+ config["jcloud_url"] = jcloud_url
+ proxy.set_config(config, indent=4)
+ proxy.setup_proxy()
+
+
+@setup.command()
+@click.option("--domain")
+def standalone(domain=None):
+ server = Server()
+ if domain:
+ config = server.get_config(for_update=True)
+ config["domain"] = domain
+ config["standalone"] = True
+ server.set_config(config, indent=4)
+
+
+@setup.command()
+def database():
+ from agent.job import JobModel, PatchLogModel, StepModel
+ from agent.job import agent_database as database
+
+ database.create_tables([JobModel, StepModel, PatchLogModel])
+
+
+@setup.command()
+def site_analytics():
+ from crontab import CronTab
+
+ script_directory = os.path.dirname(__file__)
+ agent_directory = os.path.dirname(os.path.dirname(script_directory))
+ logs_directory = os.path.join(agent_directory, "logs")
+ script = os.path.join(script_directory, "analytics.py")
+ stdout = os.path.join(logs_directory, "analytics.log")
+ stderr = os.path.join(logs_directory, "analytics.error.log")
+
+ cron = CronTab(user=True)
+ command = f"cd {agent_directory} && {sys.executable} {script} 1>> {stdout} 2>> {stderr}"
+
+ if command in str(cron):
+ cron.remove_all(command=command)
+
+ job = cron.new(command=command)
+ job.hour.on(23)
+ job.minute.on(0)
+ cron.write()
+
+
+@setup.command()
+def usage():
+ from crontab import CronTab
+
+ script_directory = os.path.dirname(__file__)
+ agent_directory = os.path.dirname(os.path.dirname(script_directory))
+ logs_directory = os.path.join(agent_directory, "logs")
+ script = os.path.join(script_directory, "usage.py")
+ stdout = os.path.join(logs_directory, "usage.log")
+ stderr = os.path.join(logs_directory, "usage.error.log")
+
+ cron = CronTab(user=True)
+ command = f"cd {agent_directory} && {sys.executable} {script} 1>> {stdout} 2>> {stderr}"
+
+ if command not in str(cron):
+ job = cron.new(command=command)
+ job.every(6).hours()
+ job.minute.on(30)
+ cron.write()
+
+
+@setup.command()
+def nginx_defer_reload():
+ from crontab import CronTab
+
+ script_directory = os.path.dirname(__file__)
+ agent_directory = os.path.dirname(os.path.dirname(script_directory))
+ logs_directory = os.path.join(agent_directory, "logs")
+ script = os.path.join(script_directory, "nginx_defer_reload.py")
+ stdout = os.path.join(logs_directory, "nginx_defer_reload.log")
+ stderr = os.path.join(logs_directory, "nginx_defer_reload.error.log")
+
+ cron = CronTab(user=True)
+ command = f"cd {agent_directory} && {sys.executable} {script} 1>> {stdout} 2>> {stderr}"
+
+ if command not in str(cron):
+ job = cron.new(command=command)
+ job.minute.every(2)
+ cron.write()
+
+
+@setup.command()
+def registry():
+ Server().setup_registry()
+
+
+@setup.command()
+@click.option("--url", required=True)
+@click.option("--token", required=True)
+def monitor(url, token):
+ from agent.monitor import Monitor
+
+ server = Monitor()
+ server.update_config({"monitor": True, "jcloud_url": url, "jcloud_token": token})
+ server.discover_targets()
+
+
+@setup.command()
+def log():
+ Server().setup_log()
+
+
+@setup.command()
+def analytics():
+ Server().setup_analytics()
+
+
+@setup.command()
+def trace():
+ Server().setup_trace()
+
+
+@setup.command()
+@click.option("--password", prompt=True, hide_input=True)
+def proxysql(password):
+ Server().setup_proxysql(password)
+
+
+@cli.group()
+def run():
+ pass
+
+
+@run.command()
+def web():
+ executable = shutil.which("gunicorn")
+ port = Server().config["web_port"]
+ arguments = [
+ executable,
+ "--bind",
+ f"127.0.0.1:{port}",
+ "--reload",
+ "--preload",
+ "agent.web:application",
+ ]
+ os.execv(executable, arguments)
+
+
+@run.command()
+def worker():
+ executable = shutil.which("rq")
+ port = Server().config["redis_port"]
+ arguments = [
+ executable,
+ "worker",
+ "--url",
+ f"redis://127.0.0.1:{port}",
+ ]
+ os.execv(executable, arguments)
+
+
+@cli.command()
+def discover():
+ from agent.monitor import Monitor
+
+ Monitor().discover_targets()
+
+
+@cli.group()
+def bench():
+ pass
+
+
+@bench.command()
+@click.argument("bench", required=False)
+def start(bench):
+ if bench:
+ return Server().benches[bench].start()
+ return Server().start_all_benches()
+
+
+@bench.command()
+@click.argument("bench", required=False)
+def stop(bench):
+ if bench:
+ return Server().benches[bench].stop()
+ return Server().stop_all_benches()
+
+
+@cli.command(help="Run iPython console.")
+@click.option(
+ "--config-path",
+ required=False,
+ type=str,
+ help="Path to agent config.json.",
+)
+def console(config_path):
+ from atexit import register
+
+ from IPython.terminal.embed import InteractiveShellEmbed
+
+ terminal = InteractiveShellEmbed.instance()
+
+ config_dir = get_config_dir(config_path)
+ if config_dir:
+ try:
+ locals()["server"] = Server(config_dir)
+ print(f"In namespace:\nserver = agent.server.Server('{config_dir}')")
+ except Exception:
+ print(f"Could not initialize agent.server.Server('{config_dir}')")
+
+ elif config_path:
+ print(f"Could not find config.json at '{config_path}'")
+ else:
+ print("Could not find config.json use --config-path to specify")
+
+ register(store_ipython_logs, terminal, config_dir)
+
+ # ref: https://stackoverflow.com/a/74681224
+ try:
+ from IPython.core import ultratb
+
+ ultratb.VerboseTB._tb_highlight = "bg:ansibrightblack"
+ except Exception:
+ pass
+
+ terminal.colors = "neutral"
+ terminal.display_banner = False
+ terminal()
+
+
+def get_config_dir(config_path: str | None = None) -> str | None:
+ cwd = os.getcwd()
+ if config_path is None:
+ config_path = cwd
+
+ config_dir = Path(config_path)
+
+ if config_dir.suffix == "json" and config_dir.exists():
+ return config_dir.parent.as_posix()
+
+ if config_dir.suffix != "":
+ config_dir = config_dir.parent
+
+ potential = [
+ Path("/home/jingrow/agent/config.json"),
+ config_dir / "config.json",
+ config_dir / ".." / "config.json",
+ ]
+
+ for p in potential:
+ if not p.exists():
+ continue
+ try:
+ return p.parent.relative_to(cwd).as_posix()
+ except Exception:
+ return p.parent.as_posix()
+ return None
+
+
+def store_ipython_logs(terminal: InteractiveShellEmbed, config_dir: str | None):
+ if not config_dir:
+ config_dir = os.getcwd()
+
+ log_path = Path(config_dir) / "logs" / "agent_console.log"
+ log_path.parent.mkdir(exist_ok=True)
+
+ with log_path.open("a") as file:
+ timestamp = get_timestamp()
+
+ file.write(f"# SESSION BEGIN {timestamp}\n")
+ for line in terminal.history_manager.get_range():
+ file.write(f"{line[2]}\n")
+ file.write(f"# SESSION END {timestamp}\n\n")
diff --git a/agent/pages/deactivated.html b/agent/pages/deactivated.html
index e89466c..effe4ba 100644
--- a/agent/pages/deactivated.html
+++ b/agent/pages/deactivated.html
@@ -1,123 +1,303 @@
-
-
-
-
- This Site is Inactive
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Jingrow Cloud
-
-
-
-
-
- This Site is Inactive.
-
-
- This site has been deactivated. If you are the owner of this site, you can activate it from your
- Jingrow Cloud Dashboard .
-
-
-
-
-
-
-
+
+
+
+
+ This Site is Inactive
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Jingrow Cloud
+
+
+
+
+
+ This Site is Inactive.
+
+
+ This site has been deactivated due to expiration or other reasons. If you are the owner, you can activate it in the
+ Jingrow Cloud Dashboard .
+
+
+
+
+
+
+
diff --git a/agent/pages/exceeded.html b/agent/pages/exceeded.html
index e75b89a..5365a18 100644
--- a/agent/pages/exceeded.html
+++ b/agent/pages/exceeded.html
@@ -1,125 +1,304 @@
-
-
-
-
-
- Daily Usage Limit Reached
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Jingrow Cloud
-
-
-
-
-
- Daily Usage Limit Reached.
-
-
- Daily usage limit is reached for this site. If you are the owner of this site, you can upgrade your plan
- from your Jingrow Cloud Dashboard .
-
-
-
-
-
-
-
+
+
+
+
+
+ Daily Usage Limit Reached
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Jingrow Cloud
+
+
+
+
+
+ Daily Usage Limit Reached.
+
+
+ Daily usage limit is reached for this site. If you are the owner of this site, you can upgrade your plan from your Jingrow Cloud Dashboard .
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/agent/pages/home.html b/agent/pages/home.html
index c8e65d6..4e7ef09 100644
--- a/agent/pages/home.html
+++ b/agent/pages/home.html
@@ -1,115 +1,296 @@
-
-
-
-
- Jingrow Cloud
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Jingrow Cloud
-
-
-
-
-
- Are you lost?
-
-
- Why don't you start over from Jingrow Cloud .
-
-
-
-
-
+
+
+
+
+ Jingrow Cloud
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Jingrow Cloud
+
+
+
+
+
+ Are you lost?
+
+
+ Why don't you start over from Jingrow Cloud .
+
+
+
+
+
+
diff --git a/agent/pages/suspended.html b/agent/pages/suspended.html
index c44cc58..d59b358 100644
--- a/agent/pages/suspended.html
+++ b/agent/pages/suspended.html
@@ -1,122 +1,301 @@
-
-
-
-
- This Site is Suspended
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
Jingrow Cloud
-
-
-
-
-
- This Site is Suspended.
-
-
- This site has been suspended due to exceeding site limits or a payment failure. If you are the owner
- of this site, resolve issues related to your site's plan from the Jingrow Cloud Dashboard .
-
-
-
-
-
-
-
+
+
+
+
+ This Site is Suspended
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Jingrow Cloud
+
+
+
+
+
+ This Site is Suspended.
+
+
+ This site has been suspended due to exceeding site limits or a payment failure. If you are the owner of this site, resolve issues related to your site's plan from the Jingrow Cloud Dashboard .
+
+
+
+
+
+
+
diff --git a/agent/pages/suspended_saas.html b/agent/pages/suspended_saas.html
index 242b924..57ba15a 100644
--- a/agent/pages/suspended_saas.html
+++ b/agent/pages/suspended_saas.html
@@ -1,88 +1,120 @@
-
-
-
-
- This Site is Suspended
-
-
-
-
-
-
-
-
- This Site is Suspended.
-
-
- This site has been suspended due to exceeding site limits or a payment failure. If you are the owner
- of this site, resolve issues related to your site's plan from the Dashboard .
-
-
-
-
-
-
+
+
+
+
+ This Site is Suspended
+
+
+
+
+
+
+
+
+ This Site is Suspended.
+
+
+ This site has been suspended due to exceeding site limits or a payment failure. If you are the owner of this site, resolve issues related to your site's plan from the Dashboard .
+
+
+
+
+
+
+
diff --git a/agent/server.py b/agent/server.py
index 66b37e7..daadf1b 100644
--- a/agent/server.py
+++ b/agent/server.py
@@ -1,831 +1,831 @@
-from __future__ import annotations
-
-import json
-import os
-import platform
-import shutil
-import tempfile
-import time
-from contextlib import suppress
-from datetime import datetime
-
-from jinja2 import Environment, PackageLoader
-from passlib.hash import pbkdf2_sha256 as pbkdf2
-from peewee import MySQLDatabase
-
-from agent.base import AgentException, Base
-from agent.bench import Bench
-from agent.exceptions import BenchNotExistsException
-from agent.job import Job, Step, job, step
-from agent.patch_handler import run_patches
-from agent.site import Site
-from agent.utils import get_supervisor_processes_status
-
-
-class Server(Base):
- def __init__(self, directory=None):
- super().__init__()
-
- self.directory = directory or os.getcwd()
- self.config_file = os.path.join(self.directory, "config.json")
- self.name = self.config["name"]
- self.benches_directory = self.config["benches_directory"]
- self.archived_directory = os.path.join(os.path.dirname(self.benches_directory), "archived")
- self.nginx_directory = self.config["nginx_directory"]
- self.hosts_directory = os.path.join(self.nginx_directory, "hosts")
-
- self.error_pages_directory = os.path.join(self.directory, "repo", "agent", "pages")
- self.job = None
- self.step = None
-
- @property
- def jcloud_url(self):
- return self.config.get("jcloud_url", "https://jcloud.jingrow.com")
-
- def docker_login(self, registry):
- url = registry["url"]
- username = registry["username"]
- password = registry["password"]
- return self.execute(f"docker login -u {username} -p {password} {url}")
-
- @step("Initialize Bench")
- def bench_init(self, name, config):
- bench_directory = os.path.join(self.benches_directory, name)
- os.mkdir(bench_directory)
- directories = ["logs", "sites", "config"]
- for directory in directories:
- os.mkdir(os.path.join(bench_directory, directory))
-
- bench_config_file = os.path.join(bench_directory, "config.json")
- with open(bench_config_file, "w") as f:
- json.dump(config, f, indent=1, sort_keys=True)
-
- config.update({"directory": bench_directory, "name": name})
- docker_compose = os.path.join(bench_directory, "docker-compose.yml")
- self._render_template("bench/docker-compose.yml.jinja2", config, docker_compose)
-
- config_directory = os.path.join(bench_directory, "config")
- command = (
- "docker run --rm --net none "
- f"-v {config_directory}:/home/jingrow/jingrow-bench/configmount "
- f"{config['docker_image']} cp -LR config/. configmount"
- )
- self.execute(command, directory=bench_directory)
-
- sites_directory = os.path.join(bench_directory, "sites")
- # Copy sites directory from image to host system
- command = (
- "docker run --rm --net none "
- f"-v {sites_directory}:/home/jingrow/jingrow-bench/sitesmount "
- f"{config['docker_image']} cp -LR sites/. sitesmount"
- )
- return self.execute(command, directory=bench_directory)
-
- def dump(self):
- return {
- "name": self.name,
- "benches": {name: bench.dump() for name, bench in self.benches.items()},
- "config": self.config,
- }
-
- @job("New Bench", priority="low")
- def new_bench(self, name, bench_config, common_site_config, registry, mounts=None):
- self.docker_login(registry)
- self.bench_init(name, bench_config)
- bench = Bench(name, self, mounts=mounts)
- bench.update_config(common_site_config, bench_config)
- if bench.bench_config.get("single_container"):
- bench.generate_supervisor_config()
- bench.deploy()
- bench.setup_nginx()
-
- def container_exists(self, name: str):
- """
- Throw if container exists
- """
- try:
- self.execute(f"""docker ps --filter "name=^{name}$" | grep {name}""")
- except AgentException:
- pass # container does not exist
- else:
- raise Exception("Container exists")
-
- @job("Archive Bench", priority="low")
- def archive_bench(self, name):
- bench_directory = os.path.join(self.benches_directory, name)
- if not os.path.exists(bench_directory):
- return
- try:
- bench = Bench(name, self)
- except json.JSONDecodeError:
- pass
- except FileNotFoundError as e:
- if not e.filename.endswith("common_site_config.json"):
- raise
- else:
- if bench.sites:
- raise Exception(f"Bench has sites: {bench.sites}")
- bench.disable_production()
- self.container_exists(name)
- self.move_bench_to_archived_directory(name)
-
- @job("Cleanup Unused Files", priority="low")
- def cleanup_unused_files(self):
- self.remove_archived_benches()
- self.remove_temporary_files()
- self.remove_unused_docker_artefacts()
-
- def remove_benches_without_container(self, benches: list[str]):
- for bench in benches:
- try:
- self.execute(f"docker ps -a | grep {bench}")
- except AgentException as e:
- if e.data.returncode:
- self.move_to_archived_directory(Bench(bench, self))
-
- @step("Remove Archived Benches")
- def remove_archived_benches(self):
- now = datetime.now().timestamp()
- removed = []
- if os.path.exists(self.archived_directory):
- for bench in os.listdir(self.archived_directory):
- bench_path = os.path.join(self.archived_directory, bench)
- if now - os.stat(bench_path).st_mtime > 86400:
- removed.append(
- {
- "bench": bench,
- "size": self._get_tree_size(bench_path),
- }
- )
- if os.path.isfile(bench_path):
- os.remove(bench_path)
- elif os.path.isdir(bench_path):
- shutil.rmtree(bench_path)
- return {"benches": removed[:100]}
-
- @step("Remove Temporary Files")
- def remove_temporary_files(self):
- temp_directory = tempfile.gettempdir()
- now = datetime.now().timestamp()
- removed = []
- patterns = ["jingrow-pdf", "snyk-patch", "yarn-", "agent-upload"]
- if os.path.exists(temp_directory):
- for file in os.listdir(temp_directory):
- if not list(filter(lambda x: x in file, patterns)):
- continue
- file_path = os.path.join(temp_directory, file)
- if now - os.stat(file_path).st_mtime > 7200:
- removed.append({"file": file, "size": self._get_tree_size(file_path)})
- if os.path.isfile(file_path):
- os.remove(file_path)
- elif os.path.isdir(file_path):
- shutil.rmtree(file_path)
- return {"files": removed[:100]}
-
- @step("Remove Unused Docker Artefacts")
- def remove_unused_docker_artefacts(self):
- before = self.execute("docker system df -v")["output"].split("\n")
- prune = self.execute("docker system prune -af")["output"].split("\n")
- after = self.execute("docker system df -v")["output"].split("\n")
- return {
- "before": before,
- "prune": prune,
- "after": after,
- }
-
- @step("Move Bench to Archived Directory")
- def move_bench_to_archived_directory(self, bench_name):
- if not os.path.exists(self.archived_directory):
- os.mkdir(self.archived_directory)
- target = os.path.join(self.archived_directory, bench_name)
- if os.path.exists(target):
- shutil.rmtree(target)
- bench_directory = os.path.join(self.benches_directory, bench_name)
- self.execute(f"mv {bench_directory} {self.archived_directory}")
-
- @job("Update Site Pull", priority="low")
- def update_site_pull_job(self, name, source, target, activate):
- source = Bench(source, self)
- target = Bench(target, self)
- site = Site(name, source)
-
- site.enable_maintenance_mode()
- site.wait_till_ready()
-
- self.move_site(site, target)
- source.setup_nginx()
- target.setup_nginx_target()
- self.reload_nginx()
-
- site = Site(name, target)
- with suppress(Exception):
- site.generate_theme_files()
-
- if activate:
- site.disable_maintenance_mode()
-
- @job("Update Site Migrate", priority="low")
- def update_site_migrate_job(
- self,
- name,
- source,
- target,
- activate,
- skip_failing_patches,
- skip_backups,
- before_migrate_scripts: dict[str, str] | None = None,
- skip_search_index: bool = True,
- ):
- if before_migrate_scripts is None:
- before_migrate_scripts = {}
-
- source = Bench(source, self)
- target = Bench(target, self)
- site = Site(name, source)
-
- site.enable_maintenance_mode()
- site.wait_till_ready()
-
- if not skip_backups:
- site.clear_backup_directory()
- site.tablewise_backup()
-
- self.move_site(site, target)
-
- source.setup_nginx()
- target.setup_nginx_target()
- self.reload_nginx()
-
- site = Site(name, target)
-
- if before_migrate_scripts:
- site.run_app_scripts(before_migrate_scripts)
-
- try:
- site.migrate(
- skip_search_index=skip_search_index,
- skip_failing_patches=skip_failing_patches,
- )
- finally:
- site.log_touched_tables()
-
- with suppress(Exception):
- site.bench_execute(
- "execute jingrow.website.doctype.website_theme.website_theme.generate_theme_files_if_not_exist"
- )
-
- if activate:
- site.disable_maintenance_mode()
-
- with suppress(Exception):
- # Don't fail job on failure
- # v12 does not have build_search_index command
- site.build_search_index()
-
- @job("Deactivate Site", priority="high")
- def deactivate_site_job(self, name, bench):
- source = Bench(bench, self)
- site = Site(name, source)
-
- site.enable_maintenance_mode()
- site.wait_till_ready()
-
- @job("Activate Site", priority="high")
- def activate_site_job(self, name, bench):
- source = Bench(bench, self)
- site = Site(name, source)
-
- site.disable_maintenance_mode()
- with suppress(Exception):
- # Don't fail job on failure
- # v12 does not have build_search_index command
- site.build_search_index()
-
- @job("Recover Failed Site Migrate", priority="high")
- def update_site_recover_migrate_job(
- self, name, source, target, activate, rollback_scripts, restore_touched_tables
- ):
- source = Bench(source, self)
- target = Bench(target, self)
-
- site = Site(name, source)
- self.move_site(site, target)
-
- source.setup_nginx()
- target.setup_nginx_target()
- self.reload_nginx()
-
- site = Site(name, target)
- if restore_touched_tables:
- site.restore_touched_tables()
-
- if rollback_scripts:
- site.run_app_scripts(rollback_scripts)
-
- if activate:
- site.disable_maintenance_mode()
-
- @job("Recover Failed Site Pull", priority="high")
- def update_site_recover_pull_job(self, name, source, target, activate):
- source = Bench(source, self)
- target = Bench(target, self)
-
- site = Site(name, source)
- self.move_site(site, target)
-
- source.setup_nginx()
- target.setup_nginx_target()
- self.reload_nginx()
-
- site = Site(name, target)
-
- if activate:
- site.disable_maintenance_mode()
-
- @job("Move Site to Bench")
- def move_site_to_bench(self, name, source, target, deactivate, activate, skip_failing_patches):
- # Dangerous method (no backup),
- # use update_site_migrate if you don't know what you're doing
- source = Bench(source, self)
- target = Bench(target, self)
- site = Site(name, source)
-
- if deactivate: # cases when python is broken in bench
- site.enable_maintenance_mode()
- site.wait_till_ready()
-
- self.move_site(site, target)
-
- source.setup_nginx()
- target.setup_nginx_target()
- self.reload_nginx()
-
- site = Site(name, target)
-
- site.migrate(skip_failing_patches=skip_failing_patches)
-
- with suppress(Exception):
- site.bench_execute(
- "execute jingrow.website.doctype.website_theme.website_theme.generate_theme_files_if_not_exist"
- )
-
- if activate:
- site.disable_maintenance_mode()
-
- @job("Recover Failed Site Update", priority="high")
- def update_site_recover_job(self, name, bench):
- site = self.benches[bench].sites[name]
- site.disable_maintenance_mode()
-
- @step("Move Site")
- def move_site(self, site, target):
- destination = os.path.join(target.sites_directory, site.name)
- destination_site_config = os.path.join(destination, "site_config.json")
- if os.path.exists(destination) and not os.path.exists(destination_site_config):
- # If there's already a site directory in the destination bench
- # and it does not have a site_config.json file,
- # then it is an incomplete site directory.
- # Move it to the sites/archived directory
- archived_sites_directory = os.path.join(target.sites_directory, "archived")
- os.makedirs(archived_sites_directory, exist_ok=True)
- archived_site_path = os.path.join(
- archived_sites_directory,
- f"{site.name}-{datetime.now().isoformat()}",
- )
- shutil.move(destination, archived_site_path)
- shutil.move(site.directory, target.sites_directory)
-
- def execute(self, command, directory=None, skip_output_log=False, non_zero_throw=True):
- return super().execute(
- command, directory=directory, skip_output_log=skip_output_log, non_zero_throw=non_zero_throw
- )
-
- @job("Reload NGINX")
- def restart_nginx(self):
- return self.reload_nginx()
-
- @step("Reload NGINX")
- def reload_nginx(self):
- return self._reload_nginx()
-
- @step("Update Supervisor")
- def update_supervisor(self):
- return self._update_supervisor()
-
- def setup_authentication(self, password):
- self.update_config({"access_token": pbkdf2.hash(password)})
-
- def setup_proxysql(self, password):
- self.update_config({"proxysql_admin_password": password})
-
- def update_config(self, value):
- config = self.get_config(for_update=True)
- config.update(value)
- self.set_config(config, indent=4)
-
- def setup_registry(self):
- self.update_config({"registry": True})
- self.setup_nginx()
-
- def setup_log(self):
- self.update_config({"log": True})
- self.setup_nginx()
-
- def setup_analytics(self):
- self.update_config({"analytics": True})
- self.setup_nginx()
-
- def setup_trace(self):
- self.update_config({"trace": True})
- self.setup_nginx()
-
- def setup_sentry(self, sentry_dsn):
- self.update_config({"sentry_dsn": sentry_dsn})
- self.setup_supervisor()
-
- def setup_nginx(self):
- self._generate_nginx_config()
- self._generate_agent_nginx_config()
- self._reload_nginx()
-
- def setup_supervisor(self):
- self._generate_redis_config()
- self._generate_supervisor_config()
- self._update_supervisor()
-
- def start_all_benches(self):
- for bench in self.benches.values():
- with suppress(Exception):
- bench.start()
-
- def stop_all_benches(self):
- for bench in self.benches.values():
- with suppress(Exception):
- bench.stop()
-
- @property
- def benches(self) -> dict[str, Bench]:
- benches = {}
- for directory in os.listdir(self.benches_directory):
- with suppress(Exception):
- benches[directory] = Bench(directory, self)
- return benches
-
- def get_bench(self, bench):
- try:
- return self.benches[bench]
- except KeyError as exc:
- raise BenchNotExistsException(bench) from exc
-
- @property
- def job_record(self):
- if self.job is None:
- self.job = Job()
- return self.job
-
- @property
- def step_record(self):
- if self.step is None:
- self.step = Step()
- return self.step
-
- @step_record.setter
- def step_record(self, value):
- self.step = value
-
- def update_agent_web(self, url=None, branch="master"):
- directory = os.path.join(self.directory, "repo")
- self.execute("git reset --hard", directory=directory)
- self.execute("git clean -fd", directory=directory)
- if url:
- self.execute(f"git remote set-url upstream {url}", directory=directory)
- self.execute("git fetch upstream", directory=directory)
- self.execute(f"git checkout {branch}", directory=directory)
- self.execute(f"git merge --ff-only upstream/{branch}", directory=directory)
- self.execute("./env/bin/pip install -e repo", directory=self.directory)
-
- self._generate_redis_config()
- self._generate_supervisor_config()
- self.execute("sudo supervisorctl reread")
- self.execute("sudo supervisorctl restart agent:redis")
-
- self.setup_nginx()
- for worker in range(self.config["workers"]):
- worker_name = f"agent:worker-{worker}"
- self.execute(f"sudo supervisorctl restart {worker_name}")
-
- self.execute("sudo supervisorctl restart agent:web")
- run_patches()
-
- def update_agent_cli( # noqa: C901
- self,
- restart_redis=True,
- restart_rq_workers=True,
- restart_web_workers=True,
- skip_repo_setup=False,
- skip_patches=False,
- ):
- directory = os.path.join(self.directory, "repo")
- if skip_repo_setup:
- self.execute("git reset --hard", directory=directory)
- self.execute("git clean -fd", directory=directory)
- self.execute("git fetch upstream", directory=directory)
- self.execute("git merge --ff-only upstream/master", directory=directory)
- self.execute("./env/bin/pip install -e repo", directory=self.directory)
-
- supervisor_status = get_supervisor_processes_status()
-
- # Stop web service
- if restart_web_workers and supervisor_status.get("web") == "RUNNING":
- self.execute("sudo supervisorctl stop agent:web", non_zero_throw=False)
-
- # Stop required services
- if restart_rq_workers:
- for worker_id in supervisor_status.get("worker", {}):
- self.execute(f"sudo supervisorctl stop agent:worker-{worker_id}", non_zero_throw=False)
-
- # Stop redis
- if restart_redis and supervisor_status.get("redis") == "RUNNING":
- self.execute("sudo supervisorctl stop agent:redis", non_zero_throw=False)
-
- self.setup_supervisor()
-
- # Start back services in same order
- supervisor_status = get_supervisor_processes_status()
- if restart_redis or supervisor_status.get("redis") != "RUNNING":
- self.execute("sudo supervisorctl start agent:redis")
-
- if restart_rq_workers:
- for i in range(self.config["workers"]):
- self.execute(f"sudo supervisorctl start agent:worker-{i}")
-
- if restart_web_workers:
- self.execute("sudo supervisorctl start agent:web")
-
- self.setup_nginx()
-
- if not skip_patches:
- run_patches()
-
- def get_agent_version(self):
- directory = os.path.join(self.directory, "repo")
- return {
- "commit": self.execute("git rev-parse HEAD", directory=directory)["output"],
- "status": self.execute("git status --short", directory=directory)["output"],
- "upstream": self.execute("git remote get-url upstream", directory=directory)["output"],
- "show": self.execute("git show", directory=directory)["output"],
- "python": platform.python_version(),
- "services": get_supervisor_processes_status(),
- }
-
- def status(self, mariadb_root_password):
- return {
- "mariadb": self.mariadb_processlist(mariadb_root_password=mariadb_root_password),
- "supervisor": self.supervisor_status(),
- "nginx": self.nginx_status(),
- "stats": self.stats(),
- "processes": self.processes(),
- "timestamp": str(datetime.now()),
- }
-
- def _memory_stats(self):
- free = self.execute("free -t -m")["output"].split("\n")
- memory = {}
- headers = free[0].split()
- for line in free[1:]:
- type, line = line.split(None, 1)
- memory[type.lower()[:-1]] = dict(zip(headers, list(map(int, line.split()))))
- return memory
-
- def _cpu_stats(self):
- prev_proc = self.execute("cat /proc/stat")["output"].split("\n")
- time.sleep(0.5)
- now_proc = self.execute("cat /proc/stat")["output"].split("\n")
-
- # 0 user Time spent in user mode.
- # 1 nice Time spent in user mode with low priority
- # 2 system Time spent in system mode.
- # 3 idle Time spent in the idle task.
- # 4 iowait Time waiting for I/O to complete. This
- # 5 irq Time servicing interrupts.
- # 6 softirq Time servicing softirqs.
- # 7 steal Stolen time
- # 8 guest Time spent running a virtual CPU for guest OS
- # 9 guest_nice Time spent running a niced guest
-
- # IDLE = idle + iowait
- # NONIDLE = user + nice + system + irq + softirq + steal + guest
- # + guest_nice
- # TOTAL = IDLE + NONIDLE
- # USAGE = TOTAL - IDLE / TOTAL
- cpu = {}
- for prev, now in zip(prev_proc, now_proc):
- if prev.startswith("cpu"):
- type = prev.split()[0]
- prev = list(map(int, prev.split()[1:]))
- now = list(map(int, now.split()[1:]))
-
- idle = (now[3] + now[4]) - (prev[3] + prev[4])
- total = sum(now) - sum(prev)
- cpu[type] = int(1000 * (total - idle) / total) / 10
- return cpu
-
- def stats(self):
- load_average = os.getloadavg()
- return {
- "cpu": {
- "usage": self._cpu_stats(),
- "count": os.cpu_count(),
- "load_average": {
- 1: load_average[0],
- 5: load_average[1],
- 15: load_average[2],
- },
- },
- "memory": self._memory_stats(),
- }
-
- def processes(self):
- processes = []
- try:
- output = self.execute("ps --pid 2 --ppid 2 --deselect u")["output"].split("\n")
- headers = list(filter(None, output[0].split()))
- rows = map(lambda s: s.strip().split(None, len(headers) - 1), output[1:])
- processes = [dict(zip(headers, row)) for row in rows]
- except Exception:
- import traceback
-
- traceback.print_exc()
- return processes
-
- def mariadb_processlist(self, mariadb_root_password):
- processes = []
- try:
- mariadb = MySQLDatabase(
- "mysql",
- user="root",
- password=mariadb_root_password,
- host="localhost",
- port=3306,
- )
- cursor = mariadb.execute_sql("SHOW PROCESSLIST")
- rows = cursor.fetchall()
- columns = [d[0] for d in cursor.description]
- processes = list(map(lambda x: dict(zip(columns, x)), rows))
- except Exception:
- import traceback
-
- traceback.print_exc()
- return processes
-
- def supervisor_status(self, name="all"):
- status = []
- try:
- try:
- supervisor = self.execute(f"sudo supervisorctl status {name}")
- except AgentException as e:
- supervisor = e.data
-
- for process in supervisor["output"].split("\n"):
- name, description = process.split(None, 1)
-
- name, *group = name.strip().split(":")
- group = group[0] if group else ""
-
- state, *description = description.strip().split(None, 1)
- state = state.strip()
- description = description[0].strip() if description else ""
-
- status.append(
- {
- "name": name,
- "group": group,
- "state": state,
- "description": description,
- "online": state == "RUNNING",
- }
- )
- except Exception:
- import traceback
-
- traceback.print_exc()
- return status
-
- def nginx_status(self):
- try:
- systemd = self.execute("sudo systemctl status nginx")
- except AgentException as e:
- systemd = e.data
- return systemd["output"]
-
- def _generate_nginx_config(self):
- nginx_config = os.path.join(self.nginx_directory, "nginx.conf")
- self._render_template(
- "nginx/nginx.conf.jinja2",
- {
- "proxy_ip": self.config.get("proxy_ip"),
- "tls_protocols": self.config.get("tls_protocols"),
- "nginx_vts_module_enabled": self.config.get("nginx_vts_module_enabled", True),
- "ip_whitelist": self.config.get("ip_whitelist", []),
- },
- nginx_config,
- )
-
- def _generate_agent_nginx_config(self):
- agent_nginx_config = os.path.join(self.directory, "nginx.conf")
- self._render_template(
- "agent/nginx.conf.jinja2",
- {
- "web_port": self.config["web_port"],
- "name": self.name,
- "registry": self.config.get("registry", False),
- "monitor": self.config.get("monitor", False),
- "log": self.config.get("log", False),
- "analytics": self.config.get("analytics", False),
- "trace": self.config.get("trace", False),
- "tls_directory": self.config["tls_directory"],
- "nginx_directory": self.nginx_directory,
- "nginx_vts_module_enabled": self.config.get("nginx_vts_module_enabled", True),
- "pages_directory": os.path.join(self.directory, "repo", "agent", "pages"),
- "tls_protocols": self.config.get("tls_protocols"),
- "jcloud_url": self.config.get("jcloud_url"),
- },
- agent_nginx_config,
- )
-
- def _generate_redis_config(self):
- redis_config = os.path.join(self.directory, "redis.conf")
- self._render_template(
- "agent/redis.conf.jinja2",
- {"redis_port": self.config["redis_port"]},
- redis_config,
- )
-
- def _generate_supervisor_config(self):
- supervisor_config = os.path.join(self.directory, "supervisor.conf")
- self._render_template(
- "agent/supervisor.conf.jinja2",
- {
- "web_port": self.config["web_port"],
- "redis_port": self.config["redis_port"],
- "gunicorn_workers": self.config.get("gunicorn_workers", 2),
- "workers": self.config["workers"],
- "directory": self.directory,
- "user": self.config["user"],
- "sentry_dsn": self.config.get("sentry_dsn"),
- },
- supervisor_config,
- )
-
- def _reload_nginx(self):
- return self.execute("sudo systemctl reload nginx")
-
- def _render_template(self, template, context, outfile, options=None):
- if options is None:
- options = {}
- options.update({"loader": PackageLoader("agent", "templates")})
- environment = Environment(**options)
- template = environment.get_template(template)
-
- with open(outfile, "w") as f:
- f.write(template.render(**context))
-
- def _update_supervisor(self):
- self.execute("sudo supervisorctl reread")
- self.execute("sudo supervisorctl update")
-
- def _get_tree_size(self, path):
- return self.execute(f"du -sh {path}")["output"].split()[0]
-
- def long_method(
- self,
- ):
- return self.execute("du -h -d 1 /home/aditya/Jingrow")["output"]
-
- @job("Long")
- def long_step(
- self,
- ):
- return self.long_method()
-
- @job("Long")
- def long_job(
- self,
- ):
- return self.long_step()
-
- @job("Ping Job")
- def ping_job(self):
- return self.ping_step()
-
- @step("Ping Step")
- def ping_step(self):
- return {"message": "pong"}
-
- @property
- def wildcards(self) -> list[str]:
- wildcards = []
- for host in os.listdir(self.hosts_directory):
- if "*" in host:
- wildcards.append(host.strip("*."))
- return wildcards
+from __future__ import annotations
+
+import json
+import os
+import platform
+import shutil
+import tempfile
+import time
+from contextlib import suppress
+from datetime import datetime
+
+from jinja2 import Environment, PackageLoader
+from passlib.hash import pbkdf2_sha256 as pbkdf2
+from peewee import MySQLDatabase
+
+from agent.base import AgentException, Base
+from agent.bench import Bench
+from agent.exceptions import BenchNotExistsException
+from agent.job import Job, Step, job, step
+from agent.patch_handler import run_patches
+from agent.site import Site
+from agent.utils import get_supervisor_processes_status
+
+
+class Server(Base):
+ def __init__(self, directory=None):
+ super().__init__()
+
+ self.directory = directory or os.getcwd()
+ self.config_file = os.path.join(self.directory, "config.json")
+ self.name = self.config["name"]
+ self.benches_directory = self.config["benches_directory"]
+ self.archived_directory = os.path.join(os.path.dirname(self.benches_directory), "archived")
+ self.nginx_directory = self.config["nginx_directory"]
+ self.hosts_directory = os.path.join(self.nginx_directory, "hosts")
+
+ self.error_pages_directory = os.path.join(self.directory, "repo", "agent", "pages")
+ self.job = None
+ self.step = None
+
+ @property
+ def jcloud_url(self):
+ return self.config.get("jcloud_url", "https://cloud.jingrow.com")
+
+ def docker_login(self, registry):
+ url = registry["url"]
+ username = registry["username"]
+ password = registry["password"]
+ return self.execute(f"docker login -u {username} -p {password} {url}")
+
+ @step("Initialize Bench")
+ def bench_init(self, name, config):
+ bench_directory = os.path.join(self.benches_directory, name)
+ os.mkdir(bench_directory)
+ directories = ["logs", "sites", "config"]
+ for directory in directories:
+ os.mkdir(os.path.join(bench_directory, directory))
+
+ bench_config_file = os.path.join(bench_directory, "config.json")
+ with open(bench_config_file, "w") as f:
+ json.dump(config, f, indent=1, sort_keys=True)
+
+ config.update({"directory": bench_directory, "name": name})
+ docker_compose = os.path.join(bench_directory, "docker-compose.yml")
+ self._render_template("bench/docker-compose.yml.jinja2", config, docker_compose)
+
+ config_directory = os.path.join(bench_directory, "config")
+ command = (
+ "docker run --rm --net none "
+ f"-v {config_directory}:/home/jingrow/jingrow-bench/configmount "
+ f"{config['docker_image']} cp -LR config/. configmount"
+ )
+ self.execute(command, directory=bench_directory)
+
+ sites_directory = os.path.join(bench_directory, "sites")
+ # Copy sites directory from image to host system
+ command = (
+ "docker run --rm --net none "
+ f"-v {sites_directory}:/home/jingrow/jingrow-bench/sitesmount "
+ f"{config['docker_image']} cp -LR sites/. sitesmount"
+ )
+ return self.execute(command, directory=bench_directory)
+
+ def dump(self):
+ return {
+ "name": self.name,
+ "benches": {name: bench.dump() for name, bench in self.benches.items()},
+ "config": self.config,
+ }
+
+ @job("New Bench", priority="low")
+ def new_bench(self, name, bench_config, common_site_config, registry, mounts=None):
+ self.docker_login(registry)
+ self.bench_init(name, bench_config)
+ bench = Bench(name, self, mounts=mounts)
+ bench.update_config(common_site_config, bench_config)
+ if bench.bench_config.get("single_container"):
+ bench.generate_supervisor_config()
+ bench.deploy()
+ bench.setup_nginx()
+
+ def container_exists(self, name: str):
+ """
+ Throw if container exists
+ """
+ try:
+ self.execute(f"""docker ps --filter "name=^{name}$" | grep {name}""")
+ except AgentException:
+ pass # container does not exist
+ else:
+ raise Exception("Container exists")
+
+ @job("Archive Bench", priority="low")
+ def archive_bench(self, name):
+ bench_directory = os.path.join(self.benches_directory, name)
+ if not os.path.exists(bench_directory):
+ return
+ try:
+ bench = Bench(name, self)
+ except json.JSONDecodeError:
+ pass
+ except FileNotFoundError as e:
+ if not e.filename.endswith("common_site_config.json"):
+ raise
+ else:
+ if bench.sites:
+ raise Exception(f"Bench has sites: {bench.sites}")
+ bench.disable_production()
+ self.container_exists(name)
+ self.move_bench_to_archived_directory(name)
+
+ @job("Cleanup Unused Files", priority="low")
+ def cleanup_unused_files(self):
+ self.remove_archived_benches()
+ self.remove_temporary_files()
+ self.remove_unused_docker_artefacts()
+
+ def remove_benches_without_container(self, benches: list[str]):
+ for bench in benches:
+ try:
+ self.execute(f"docker ps -a | grep {bench}")
+ except AgentException as e:
+ if e.data.returncode:
+ self.move_to_archived_directory(Bench(bench, self))
+
+ @step("Remove Archived Benches")
+ def remove_archived_benches(self):
+ now = datetime.now().timestamp()
+ removed = []
+ if os.path.exists(self.archived_directory):
+ for bench in os.listdir(self.archived_directory):
+ bench_path = os.path.join(self.archived_directory, bench)
+ if now - os.stat(bench_path).st_mtime > 86400:
+ removed.append(
+ {
+ "bench": bench,
+ "size": self._get_tree_size(bench_path),
+ }
+ )
+ if os.path.isfile(bench_path):
+ os.remove(bench_path)
+ elif os.path.isdir(bench_path):
+ shutil.rmtree(bench_path)
+ return {"benches": removed[:100]}
+
+ @step("Remove Temporary Files")
+ def remove_temporary_files(self):
+ temp_directory = tempfile.gettempdir()
+ now = datetime.now().timestamp()
+ removed = []
+ patterns = ["jingrow-pdf", "snyk-patch", "yarn-", "agent-upload"]
+ if os.path.exists(temp_directory):
+ for file in os.listdir(temp_directory):
+ if not list(filter(lambda x: x in file, patterns)):
+ continue
+ file_path = os.path.join(temp_directory, file)
+ if now - os.stat(file_path).st_mtime > 7200:
+ removed.append({"file": file, "size": self._get_tree_size(file_path)})
+ if os.path.isfile(file_path):
+ os.remove(file_path)
+ elif os.path.isdir(file_path):
+ shutil.rmtree(file_path)
+ return {"files": removed[:100]}
+
+ @step("Remove Unused Docker Artefacts")
+ def remove_unused_docker_artefacts(self):
+ before = self.execute("docker system df -v")["output"].split("\n")
+ prune = self.execute("docker system prune -af")["output"].split("\n")
+ after = self.execute("docker system df -v")["output"].split("\n")
+ return {
+ "before": before,
+ "prune": prune,
+ "after": after,
+ }
+
+ @step("Move Bench to Archived Directory")
+ def move_bench_to_archived_directory(self, bench_name):
+ if not os.path.exists(self.archived_directory):
+ os.mkdir(self.archived_directory)
+ target = os.path.join(self.archived_directory, bench_name)
+ if os.path.exists(target):
+ shutil.rmtree(target)
+ bench_directory = os.path.join(self.benches_directory, bench_name)
+ self.execute(f"mv {bench_directory} {self.archived_directory}")
+
+ @job("Update Site Pull", priority="low")
+ def update_site_pull_job(self, name, source, target, activate):
+ source = Bench(source, self)
+ target = Bench(target, self)
+ site = Site(name, source)
+
+ site.enable_maintenance_mode()
+ site.wait_till_ready()
+
+ self.move_site(site, target)
+ source.setup_nginx()
+ target.setup_nginx_target()
+ self.reload_nginx()
+
+ site = Site(name, target)
+ with suppress(Exception):
+ site.generate_theme_files()
+
+ if activate:
+ site.disable_maintenance_mode()
+
+ @job("Update Site Migrate", priority="low")
+ def update_site_migrate_job(
+ self,
+ name,
+ source,
+ target,
+ activate,
+ skip_failing_patches,
+ skip_backups,
+ before_migrate_scripts: dict[str, str] | None = None,
+ skip_search_index: bool = True,
+ ):
+ if before_migrate_scripts is None:
+ before_migrate_scripts = {}
+
+ source = Bench(source, self)
+ target = Bench(target, self)
+ site = Site(name, source)
+
+ site.enable_maintenance_mode()
+ site.wait_till_ready()
+
+ if not skip_backups:
+ site.clear_backup_directory()
+ site.tablewise_backup()
+
+ self.move_site(site, target)
+
+ source.setup_nginx()
+ target.setup_nginx_target()
+ self.reload_nginx()
+
+ site = Site(name, target)
+
+ if before_migrate_scripts:
+ site.run_app_scripts(before_migrate_scripts)
+
+ try:
+ site.migrate(
+ skip_search_index=skip_search_index,
+ skip_failing_patches=skip_failing_patches,
+ )
+ finally:
+ site.log_touched_tables()
+
+ with suppress(Exception):
+ site.bench_execute(
+ "execute jingrow.website.doctype.website_theme.website_theme.generate_theme_files_if_not_exist"
+ )
+
+ if activate:
+ site.disable_maintenance_mode()
+
+ with suppress(Exception):
+ # Don't fail job on failure
+ # v12 does not have build_search_index command
+ site.build_search_index()
+
+ @job("Deactivate Site", priority="high")
+ def deactivate_site_job(self, name, bench):
+ source = Bench(bench, self)
+ site = Site(name, source)
+
+ site.enable_maintenance_mode()
+ site.wait_till_ready()
+
+ @job("Activate Site", priority="high")
+ def activate_site_job(self, name, bench):
+ source = Bench(bench, self)
+ site = Site(name, source)
+
+ site.disable_maintenance_mode()
+ with suppress(Exception):
+ # Don't fail job on failure
+ # v12 does not have build_search_index command
+ site.build_search_index()
+
+ @job("Recover Failed Site Migrate", priority="high")
+ def update_site_recover_migrate_job(
+ self, name, source, target, activate, rollback_scripts, restore_touched_tables
+ ):
+ source = Bench(source, self)
+ target = Bench(target, self)
+
+ site = Site(name, source)
+ self.move_site(site, target)
+
+ source.setup_nginx()
+ target.setup_nginx_target()
+ self.reload_nginx()
+
+ site = Site(name, target)
+ if restore_touched_tables:
+ site.restore_touched_tables()
+
+ if rollback_scripts:
+ site.run_app_scripts(rollback_scripts)
+
+ if activate:
+ site.disable_maintenance_mode()
+
+ @job("Recover Failed Site Pull", priority="high")
+ def update_site_recover_pull_job(self, name, source, target, activate):
+ source = Bench(source, self)
+ target = Bench(target, self)
+
+ site = Site(name, source)
+ self.move_site(site, target)
+
+ source.setup_nginx()
+ target.setup_nginx_target()
+ self.reload_nginx()
+
+ site = Site(name, target)
+
+ if activate:
+ site.disable_maintenance_mode()
+
+ @job("Move Site to Bench")
+ def move_site_to_bench(self, name, source, target, deactivate, activate, skip_failing_patches):
+ # Dangerous method (no backup),
+ # use update_site_migrate if you don't know what you're doing
+ source = Bench(source, self)
+ target = Bench(target, self)
+ site = Site(name, source)
+
+ if deactivate: # cases when python is broken in bench
+ site.enable_maintenance_mode()
+ site.wait_till_ready()
+
+ self.move_site(site, target)
+
+ source.setup_nginx()
+ target.setup_nginx_target()
+ self.reload_nginx()
+
+ site = Site(name, target)
+
+ site.migrate(skip_failing_patches=skip_failing_patches)
+
+ with suppress(Exception):
+ site.bench_execute(
+ "execute jingrow.website.doctype.website_theme.website_theme.generate_theme_files_if_not_exist"
+ )
+
+ if activate:
+ site.disable_maintenance_mode()
+
+ @job("Recover Failed Site Update", priority="high")
+ def update_site_recover_job(self, name, bench):
+ site = self.benches[bench].sites[name]
+ site.disable_maintenance_mode()
+
+ @step("Move Site")
+ def move_site(self, site, target):
+ destination = os.path.join(target.sites_directory, site.name)
+ destination_site_config = os.path.join(destination, "site_config.json")
+ if os.path.exists(destination) and not os.path.exists(destination_site_config):
+ # If there's already a site directory in the destination bench
+ # and it does not have a site_config.json file,
+ # then it is an incomplete site directory.
+ # Move it to the sites/archived directory
+ archived_sites_directory = os.path.join(target.sites_directory, "archived")
+ os.makedirs(archived_sites_directory, exist_ok=True)
+ archived_site_path = os.path.join(
+ archived_sites_directory,
+ f"{site.name}-{datetime.now().isoformat()}",
+ )
+ shutil.move(destination, archived_site_path)
+ shutil.move(site.directory, target.sites_directory)
+
+ def execute(self, command, directory=None, skip_output_log=False, non_zero_throw=True):
+ return super().execute(
+ command, directory=directory, skip_output_log=skip_output_log, non_zero_throw=non_zero_throw
+ )
+
+ @job("Reload NGINX")
+ def restart_nginx(self):
+ return self.reload_nginx()
+
+ @step("Reload NGINX")
+ def reload_nginx(self):
+ return self._reload_nginx()
+
+ @step("Update Supervisor")
+ def update_supervisor(self):
+ return self._update_supervisor()
+
+ def setup_authentication(self, password):
+ self.update_config({"access_token": pbkdf2.hash(password)})
+
+ def setup_proxysql(self, password):
+ self.update_config({"proxysql_admin_password": password})
+
+ def update_config(self, value):
+ config = self.get_config(for_update=True)
+ config.update(value)
+ self.set_config(config, indent=4)
+
+ def setup_registry(self):
+ self.update_config({"registry": True})
+ self.setup_nginx()
+
+ def setup_log(self):
+ self.update_config({"log": True})
+ self.setup_nginx()
+
+ def setup_analytics(self):
+ self.update_config({"analytics": True})
+ self.setup_nginx()
+
+ def setup_trace(self):
+ self.update_config({"trace": True})
+ self.setup_nginx()
+
+ def setup_sentry(self, sentry_dsn):
+ self.update_config({"sentry_dsn": sentry_dsn})
+ self.setup_supervisor()
+
+ def setup_nginx(self):
+ self._generate_nginx_config()
+ self._generate_agent_nginx_config()
+ self._reload_nginx()
+
+ def setup_supervisor(self):
+ self._generate_redis_config()
+ self._generate_supervisor_config()
+ self._update_supervisor()
+
+ def start_all_benches(self):
+ for bench in self.benches.values():
+ with suppress(Exception):
+ bench.start()
+
+ def stop_all_benches(self):
+ for bench in self.benches.values():
+ with suppress(Exception):
+ bench.stop()
+
+ @property
+ def benches(self) -> dict[str, Bench]:
+ benches = {}
+ for directory in os.listdir(self.benches_directory):
+ with suppress(Exception):
+ benches[directory] = Bench(directory, self)
+ return benches
+
+ def get_bench(self, bench):
+ try:
+ return self.benches[bench]
+ except KeyError as exc:
+ raise BenchNotExistsException(bench) from exc
+
+ @property
+ def job_record(self):
+ if self.job is None:
+ self.job = Job()
+ return self.job
+
+ @property
+ def step_record(self):
+ if self.step is None:
+ self.step = Step()
+ return self.step
+
+ @step_record.setter
+ def step_record(self, value):
+ self.step = value
+
+ def update_agent_web(self, url=None, branch="master"):
+ directory = os.path.join(self.directory, "repo")
+ self.execute("git reset --hard", directory=directory)
+ self.execute("git clean -fd", directory=directory)
+ if url:
+ self.execute(f"git remote set-url upstream {url}", directory=directory)
+ self.execute("git fetch upstream", directory=directory)
+ self.execute(f"git checkout {branch}", directory=directory)
+ self.execute(f"git merge --ff-only upstream/{branch}", directory=directory)
+ self.execute("./env/bin/pip install -e repo", directory=self.directory)
+
+ self._generate_redis_config()
+ self._generate_supervisor_config()
+ self.execute("sudo supervisorctl reread")
+ self.execute("sudo supervisorctl restart agent:redis")
+
+ self.setup_nginx()
+ for worker in range(self.config["workers"]):
+ worker_name = f"agent:worker-{worker}"
+ self.execute(f"sudo supervisorctl restart {worker_name}")
+
+ self.execute("sudo supervisorctl restart agent:web")
+ run_patches()
+
+ def update_agent_cli( # noqa: C901
+ self,
+ restart_redis=True,
+ restart_rq_workers=True,
+ restart_web_workers=True,
+ skip_repo_setup=False,
+ skip_patches=False,
+ ):
+ directory = os.path.join(self.directory, "repo")
+ if skip_repo_setup:
+ self.execute("git reset --hard", directory=directory)
+ self.execute("git clean -fd", directory=directory)
+ self.execute("git fetch upstream", directory=directory)
+ self.execute("git merge --ff-only upstream/master", directory=directory)
+ self.execute("./env/bin/pip install -e repo", directory=self.directory)
+
+ supervisor_status = get_supervisor_processes_status()
+
+ # Stop web service
+ if restart_web_workers and supervisor_status.get("web") == "RUNNING":
+ self.execute("sudo supervisorctl stop agent:web", non_zero_throw=False)
+
+ # Stop required services
+ if restart_rq_workers:
+ for worker_id in supervisor_status.get("worker", {}):
+ self.execute(f"sudo supervisorctl stop agent:worker-{worker_id}", non_zero_throw=False)
+
+ # Stop redis
+ if restart_redis and supervisor_status.get("redis") == "RUNNING":
+ self.execute("sudo supervisorctl stop agent:redis", non_zero_throw=False)
+
+ self.setup_supervisor()
+
+ # Start back services in same order
+ supervisor_status = get_supervisor_processes_status()
+ if restart_redis or supervisor_status.get("redis") != "RUNNING":
+ self.execute("sudo supervisorctl start agent:redis")
+
+ if restart_rq_workers:
+ for i in range(self.config["workers"]):
+ self.execute(f"sudo supervisorctl start agent:worker-{i}")
+
+ if restart_web_workers:
+ self.execute("sudo supervisorctl start agent:web")
+
+ self.setup_nginx()
+
+ if not skip_patches:
+ run_patches()
+
+ def get_agent_version(self):
+ directory = os.path.join(self.directory, "repo")
+ return {
+ "commit": self.execute("git rev-parse HEAD", directory=directory)["output"],
+ "status": self.execute("git status --short", directory=directory)["output"],
+ "upstream": self.execute("git remote get-url upstream", directory=directory)["output"],
+ "show": self.execute("git show", directory=directory)["output"],
+ "python": platform.python_version(),
+ "services": get_supervisor_processes_status(),
+ }
+
+ def status(self, mariadb_root_password):
+ return {
+ "mariadb": self.mariadb_processlist(mariadb_root_password=mariadb_root_password),
+ "supervisor": self.supervisor_status(),
+ "nginx": self.nginx_status(),
+ "stats": self.stats(),
+ "processes": self.processes(),
+ "timestamp": str(datetime.now()),
+ }
+
+ def _memory_stats(self):
+ free = self.execute("free -t -m")["output"].split("\n")
+ memory = {}
+ headers = free[0].split()
+ for line in free[1:]:
+ type, line = line.split(None, 1)
+ memory[type.lower()[:-1]] = dict(zip(headers, list(map(int, line.split()))))
+ return memory
+
+ def _cpu_stats(self):
+ prev_proc = self.execute("cat /proc/stat")["output"].split("\n")
+ time.sleep(0.5)
+ now_proc = self.execute("cat /proc/stat")["output"].split("\n")
+
+ # 0 user Time spent in user mode.
+ # 1 nice Time spent in user mode with low priority
+ # 2 system Time spent in system mode.
+ # 3 idle Time spent in the idle task.
+ # 4 iowait Time waiting for I/O to complete. This
+ # 5 irq Time servicing interrupts.
+ # 6 softirq Time servicing softirqs.
+ # 7 steal Stolen time
+ # 8 guest Time spent running a virtual CPU for guest OS
+ # 9 guest_nice Time spent running a niced guest
+
+ # IDLE = idle + iowait
+ # NONIDLE = user + nice + system + irq + softirq + steal + guest
+ # + guest_nice
+ # TOTAL = IDLE + NONIDLE
+ # USAGE = TOTAL - IDLE / TOTAL
+ cpu = {}
+ for prev, now in zip(prev_proc, now_proc):
+ if prev.startswith("cpu"):
+ type = prev.split()[0]
+ prev = list(map(int, prev.split()[1:]))
+ now = list(map(int, now.split()[1:]))
+
+ idle = (now[3] + now[4]) - (prev[3] + prev[4])
+ total = sum(now) - sum(prev)
+ cpu[type] = int(1000 * (total - idle) / total) / 10
+ return cpu
+
+ def stats(self):
+ load_average = os.getloadavg()
+ return {
+ "cpu": {
+ "usage": self._cpu_stats(),
+ "count": os.cpu_count(),
+ "load_average": {
+ 1: load_average[0],
+ 5: load_average[1],
+ 15: load_average[2],
+ },
+ },
+ "memory": self._memory_stats(),
+ }
+
+ def processes(self):
+ processes = []
+ try:
+ output = self.execute("ps --pid 2 --ppid 2 --deselect u")["output"].split("\n")
+ headers = list(filter(None, output[0].split()))
+ rows = map(lambda s: s.strip().split(None, len(headers) - 1), output[1:])
+ processes = [dict(zip(headers, row)) for row in rows]
+ except Exception:
+ import traceback
+
+ traceback.print_exc()
+ return processes
+
+ def mariadb_processlist(self, mariadb_root_password):
+ processes = []
+ try:
+ mariadb = MySQLDatabase(
+ "mysql",
+ user="root",
+ password=mariadb_root_password,
+ host="localhost",
+ port=3306,
+ )
+ cursor = mariadb.execute_sql("SHOW PROCESSLIST")
+ rows = cursor.fetchall()
+ columns = [d[0] for d in cursor.description]
+ processes = list(map(lambda x: dict(zip(columns, x)), rows))
+ except Exception:
+ import traceback
+
+ traceback.print_exc()
+ return processes
+
+ def supervisor_status(self, name="all"):
+ status = []
+ try:
+ try:
+ supervisor = self.execute(f"sudo supervisorctl status {name}")
+ except AgentException as e:
+ supervisor = e.data
+
+ for process in supervisor["output"].split("\n"):
+ name, description = process.split(None, 1)
+
+ name, *group = name.strip().split(":")
+ group = group[0] if group else ""
+
+ state, *description = description.strip().split(None, 1)
+ state = state.strip()
+ description = description[0].strip() if description else ""
+
+ status.append(
+ {
+ "name": name,
+ "group": group,
+ "state": state,
+ "description": description,
+ "online": state == "RUNNING",
+ }
+ )
+ except Exception:
+ import traceback
+
+ traceback.print_exc()
+ return status
+
+ def nginx_status(self):
+ try:
+ systemd = self.execute("sudo systemctl status nginx")
+ except AgentException as e:
+ systemd = e.data
+ return systemd["output"]
+
+ def _generate_nginx_config(self):
+ nginx_config = os.path.join(self.nginx_directory, "nginx.conf")
+ self._render_template(
+ "nginx/nginx.conf.jinja2",
+ {
+ "proxy_ip": self.config.get("proxy_ip"),
+ "tls_protocols": self.config.get("tls_protocols"),
+ "nginx_vts_module_enabled": self.config.get("nginx_vts_module_enabled", True),
+ "ip_whitelist": self.config.get("ip_whitelist", []),
+ },
+ nginx_config,
+ )
+
+ def _generate_agent_nginx_config(self):
+ agent_nginx_config = os.path.join(self.directory, "nginx.conf")
+ self._render_template(
+ "agent/nginx.conf.jinja2",
+ {
+ "web_port": self.config["web_port"],
+ "name": self.name,
+ "registry": self.config.get("registry", False),
+ "monitor": self.config.get("monitor", False),
+ "log": self.config.get("log", False),
+ "analytics": self.config.get("analytics", False),
+ "trace": self.config.get("trace", False),
+ "tls_directory": self.config["tls_directory"],
+ "nginx_directory": self.nginx_directory,
+ "nginx_vts_module_enabled": self.config.get("nginx_vts_module_enabled", True),
+ "pages_directory": os.path.join(self.directory, "repo", "agent", "pages"),
+ "tls_protocols": self.config.get("tls_protocols"),
+ "jcloud_url": self.config.get("jcloud_url"),
+ },
+ agent_nginx_config,
+ )
+
+ def _generate_redis_config(self):
+ redis_config = os.path.join(self.directory, "redis.conf")
+ self._render_template(
+ "agent/redis.conf.jinja2",
+ {"redis_port": self.config["redis_port"]},
+ redis_config,
+ )
+
+ def _generate_supervisor_config(self):
+ supervisor_config = os.path.join(self.directory, "supervisor.conf")
+ self._render_template(
+ "agent/supervisor.conf.jinja2",
+ {
+ "web_port": self.config["web_port"],
+ "redis_port": self.config["redis_port"],
+ "gunicorn_workers": self.config.get("gunicorn_workers", 2),
+ "workers": self.config["workers"],
+ "directory": self.directory,
+ "user": self.config["user"],
+ "sentry_dsn": self.config.get("sentry_dsn"),
+ },
+ supervisor_config,
+ )
+
+ def _reload_nginx(self):
+ return self.execute("sudo systemctl reload nginx")
+
+ def _render_template(self, template, context, outfile, options=None):
+ if options is None:
+ options = {}
+ options.update({"loader": PackageLoader("agent", "templates")})
+ environment = Environment(**options)
+ template = environment.get_template(template)
+
+ with open(outfile, "w") as f:
+ f.write(template.render(**context))
+
+ def _update_supervisor(self):
+ self.execute("sudo supervisorctl reread")
+ self.execute("sudo supervisorctl update")
+
+ def _get_tree_size(self, path):
+ return self.execute(f"du -sh {path}")["output"].split()[0]
+
+ def long_method(
+ self,
+ ):
+ return self.execute("du -h -d 1 /home/aditya/Jingrow")["output"]
+
+ @job("Long")
+ def long_step(
+ self,
+ ):
+ return self.long_method()
+
+ @job("Long")
+ def long_job(
+ self,
+ ):
+ return self.long_step()
+
+ @job("Ping Job")
+ def ping_job(self):
+ return self.ping_step()
+
+ @step("Ping Step")
+ def ping_step(self):
+ return {"message": "pong"}
+
+ @property
+ def wildcards(self) -> list[str]:
+ wildcards = []
+ for host in os.listdir(self.hosts_directory):
+ if "*" in host:
+ wildcards.append(host.strip("*."))
+ return wildcards