commit 5dcca689785a51d976ea15c73a0af557e566b1a9 Author: jingrow Date: Sat Apr 12 17:39:38 2025 +0800 initial commit diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..080112a --- /dev/null +++ b/.coveragerc @@ -0,0 +1,6 @@ +[report] +exclude_lines = + pragma: no cover + raise NotImplementedError + if TYPE_CHECKING: + if typing.TYPE_CHECKING: diff --git a/.cspell.json b/.cspell.json new file mode 100644 index 0000000..4ff8b42 --- /dev/null +++ b/.cspell.json @@ -0,0 +1,498 @@ +{ + "version": "0.2", + "language": "en", + "allowCompoundWords": true, + "ignorePaths": [ + "dashboard/node_modules", + "**/assets", + "*.json", + "**.jinja2", + "**.service", + "**.yml", + "test_**", + "**.conf", + "requirements.txt", + "jcloud/utils/country_timezone.py" + ], + "words": [ + "Aaiun", + "Ababa", + "activites", + "Adak", + "adblockers", + "Addis", + "aditya", + "Adminstrator", + "Agejt", + "aggs", + "Akbary", + "Åland", + "Anadyr", + "Andhra", + "ansari", + "Aqtau", + "Aqtobe", + "Araguaina", + "Arunachal", + "Asmera", + "asname", + "asrc", + "ATEXT", + "athul", + "Atikokan", + "Atka", + "atleast", + "Atyrau", + "auid", + "backgound", + "Baja", + "Balamurali", + "Barthelemy", + "Barthélemy", + "Bator", + "behavior", + "behaviour", + "BENTO", + "binlog", + "biosdevname", + "blkid", + "boto", + "Bouvet", + "bouy", + "buildx", + "Busingen", + "Cabo", + "CCONTENT", + "CFWS", + "chdir", + "Chhattisgarh", + "Choibalsan", + "Chuuk", + "cidata", + "cint", + "clamav", + "clas", + "cloudimg", + "CMDLINE", + "cnsistency", + "codespell", + "cofig", + "commitlint", + "Comod", + "COMPATBILITY", + "confs", + "Consolas", + "cpath", + "cpcommerce", + "cpus", + "creat", + "creds", + "Creston", + "csvg", + "Csvg", + "CTEXT", + "Cuiaba", + "Cunha", + "cust", + "Dacca", + "Dadra", + "Danmarkshavn", + "Darkify", + "dateutil", + "DAYOFMONTH", + "DAYOFWEEK", + "DAYOFYEAR", + "dbgsym", + "dboptimize", + "dbserver", + "DCONTENT", + "ddeb", + "ddebs", + "dearmor", + "devscripts", + "devtmpfs", + "dffx", + "Dili", + "dnsmasq", + "dnspython", + "doesnt", + "dont", + "dpkg", + "dribbble", + "DSEes", + "DTEXT", + "DUID", + "Dumont", + "EACCES", + "earlyoom", + "ecommerce", + "EDITMSG", + "Efate", + "Eirunepe", + "elif", + "elts", + "emaill", + "Ensenada", + "EPERM", + "equivs", + "erpdb", + "jerp", + "jerpcom", + "jerpsmb", + "Eswatini", + "Eucla", + "euid", + "execv", + "execve", + "exitst", + "Exlude", + "Fakaofo", + "faris", + "Faso", + "fchmod", + "fchmodat", + "fchown", + "fchownat", + "FEFF", + "jingrowclient", + "jingrowhr", + "Jingrowio", + "jingrowui", + "fremovexattr", + "fsetxattr", + "fstype", + "ftrace", + "ftruncate", + "gcore", + "getdate", + "getitimer", + "gget", + "ghead", + "githubusercontent", + "gmxxxxcom", + "grequests", + "gshadow", + "GSSAPI", + "gstin", + "gstinhide", + "gstinshow", + "gunicorn", + "gxzc", + "hakanensari", + "Haryana", + "hase", + "Haveli", + "hdel", + "hget", + "Himachal", + "hookpy", + "Hovd", + "hrms", + "hrtimers", + "hset", + "hsts", + "htpasswd", + "ifaces", + "ifnames", + "ifnull", + "IGST", + "imds", + "innodb", + "innoterra", + "inodes", + "inplace", + "Inuvik", + "invs", + "iputils", + "ipython", + "isin", + "isnotnull", + "istable", + "ITIMER", + "Jammu", + "jemalloc", + "Jharkhand", + "joomla", + "joxit", + "jscache", + "jsons", + "Jujuy", + "Karnataka", + "Khandyga", + "KHTML", + "Kiritimati", + "Kitts", + "Kolkata", + "Kralendijk", + "Kuala", + "kwarg", + "Ladakh", + "Lakshadweep", + "lchown", + "Leste", + "libc", + "libdevel", + "libharfbuzz", + "libpango", + "libpangocairo", + "libsm", + "libstdc", + "libx", + "libxcb", + "libxext", + "libxmuu", + "libxrender", + "Lindeman", + "llen", + "localds", + "Longyearbyen", + "LOUAA", + "lpush", + "lrange", + "lremovexattr", + "lsetxattr", + "Lumpur", + "luxon", + "Maarten", + "Madhya", + "Mahe", + "makeprg", + "mariadbd", + "Marino", + "Marketpalce", + "Mayen", + "mccabe", + "Meghalaya", + "Menlo", + "Metlakatla", + "Mhsc", + "Minh", + "Mizoram", + "mkisofs", + "momentjs", + "Moresby", + "moto", + "Mpesa", + "msgprint", + "msisdn", + "muieblackcat", + "Murdo", + "myadmin", + "myisam", + "mypma", + "mysqld", + "mysqldb", + "Nadu", + "Nagar", + "nedded", + "NEFT", + "Nera", + "netcfg", + "NGROK", + "nineth", + "Nipigon", + "nistp", + "nofail", + "NOPASSWD", + "Noronha", + "Norte", + "notin", + "Nuuk", + "nvme", + "Nxzjr", + "Occurred", + "OCI", + "ocpus", + "ocsp", + "Odisha", + "Ojinaga", + "OLQY", + "onfail", + "oom", + "opasswd", + "OPENBLAS", + "opions", + "overriden", + "OWUVXXW", + "Paasphrase", + "packagejsons", + "Pago", + "paise", + "Pangnirtung", + "paramiko", + "parentfield", + "parenttype", + "pckj", + "pckjs", + "Pedning", + "Pesa", + "pexpect", + "pfiles", + "pgrep", + "phpmyadmin", + "pids", + "pmadb", + "Pohnpei", + "popperjs", + "pppconfig", + "pppoeconf", + "Pradesh", + "primarys", + "proces", + "procs", + "ptype", + "Puducherry", + "Punta", + "Pushkarev", + "pycups", + "pyngrok", + "pypr", + "pypt", + "PYTHONUNBUFFERED", + "pyunit", + "QCONTENT", + "Qostanay", + "qrcode", + "Qrcode", + "QTEXT", + "Qyzylorda", + "rdata", + "redisearch", + "referer", + "Regs", + "Releas", + "removexattr", + "reqd", + "Rerunnability", + "rerunnable", + "Réunion", + "Rica", + "Rioja", + "rootfs", + "rpush", + "rrset", + "rtype", + "rutwikhdev", + "sadd", + "Santo", + "saurabh", + "sbool", + "Scoresbysund", + "sdext", + "sdf", + "sdg", + "sdomain", + "secho", + "Segoe", + "seperate", + "serializability", + "setxattr", + "shadrak", + "signup", + "smembers", + "snuba", + "SNUBA", + "socketio", + "somes", + "sonner", + "splited", + "squashfs", + "Srednekolymsk", + "Starke", + "stdc", + "stime", + "stkpush", + "Storge", + "stripnl", + "supectl", + "supervisorctl", + "supervisord", + "swapuuid", + "symbolicator", + "SYMBOLICATOR", + "synchronise", + "Syowa", + "Syrus", + "sysrq", + "tanmoy", + "tanxxxxxxkar", + "Telangana", + "Tiraspol", + "tldextract", + "tmpfs", + "Tokelau", + "tomli", + "Tongatapu", + "TOOD", + "totp", + "TOTP", + "tqdm", + "Troso", + "tupple", + "uefi", + "Uenf", + "Ujung", + "Ulaanbaatar", + "Ulan", + "unarchived", + "Unbilled", + "uncollectible", + "unfollow", + "unlinkat", + "unparse", + "unpatch", + "unplugin", + "Unprovisioned", + "unscrub", + "unsuspended", + "Unsuspending", + "updadted", + "urandom", + "Urville", + "USEDNS", + "Ushuaia", + "Uttar", + "Uttarakhand", + "Uzhgorod", + "varkw", + "vcpu", + "vcpus", + "Velho", + "venv", + "vetur", + "Vetur", + "Vevay", + "vfat", + "vimrc", + "virsh", + "virtualenv", + "vite", + "Vite", + "vmis", + "vnic", + "volid", + "vpus", + "vueuse", + "weasyprint", + "webp", + "Winamac", + "witht", + "wkhtmlto", + "wkhtmltox", + "xampp", + "xauth", + "xcall", + "xfonts", + "xlink", + "XPUT", + "xvda", + "xvdf", + "xvdg", + "Xzmq", + "Yakutat", + "Yancowinna", + "zloirock", + "Zvkq", + "spamd" + ] +} diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..63ed281 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,19 @@ +# Root editor config file +root = true + +# Common settings +[*] +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +charset = utf-8 + +# python, js indentation settings +[{*.py}] +indent_style = tab +indent_size = 4 + + +[{*.js,*.vue}] +indent_style = tab +indent_size = 2 diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000..5b60b73 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,11 @@ +# Regenerate fixtures +9db90c9a790ad8b74e8f476c846898f3450e5c6d + +# Mess up Agent Job Type fixtures +b7d4540c32075cbf569d9c8e256a8ce9898c7115 + +# Fix Agent Job Type fixtures +0c88a71473a906c87c58c94cc11743f79711d240 + +# Generate PageType types +a965b98b90fadf438c5f0a22c5778896743a94e7 \ No newline at end of file diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..81cc2c2 --- /dev/null +++ b/.gitignore @@ -0,0 +1,166 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. git.jingrow.com:3000/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Added by jingrow +.DS_Store +*.pyc +*.egg-info +*.swp +tags +node_modules +jcloud/docs/current +jcloud/public/dashboard +jcloud/www/dashboard.html +jcloud/www/dashboard-old.html +jcloud/public/css/email.css +jcloud/public/css/saas-next.css +dashboard/tailwind.theme.json +dashboard/components.d.ts + +# Backbone artefacts +backbone/packer/builds/ +backbone/packer/scratch/ +backbone/packer/images/ +backbone/packer/cloud-init.img +backbone/packer/user-data +backbone/packer/meta-data +backbone/packer/cloud-init-scaleway.img + +# marketplace +jcloud/public/css/marketplace.css +jcloud/public/css/marketplace-next.css + +# Vim +.vim +.nvimrc + +# IDE +.idea +.vscode \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..2795381 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,69 @@ +exclude: 'node_modules|.git' +default_stages: [pre-commit] +fail_fast: false + +repos: + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v4.0.0-alpha.8 + hooks: + - id: prettier + types_or: [javascript, vue] + # Ignore any files that might contain jinja / bundles + exclude: | + (?x)^( + jcloud/public/dist/.*| + .*node_modules.*| + .*boilerplate.*| + jcloud/www/website_script.js| + jcloud/templates/includes/.*| + jcloud/public/js/.*min.js + )$ + + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: debug-statements + - id: trailing-whitespace + files: 'jcloud.*' + exclude: '.*json$|.*txt$|.*csv|.*md|.*svg' + - id: check-merge-conflict + - id: check-ast + - id: check-json + - id: check-toml + - id: check-yaml + + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.9.6 + hooks: + - id: ruff + args: [--fix] + - id: ruff-format + + - repo: local + hooks: + - id: commitlint + name: check commit message format + entry: npx commitlint --edit .git/COMMIT_EDITMSG + language: system + stages: [commit-msg] + always_run: true + + - id: cspell-commit-msg + name: check commit message spelling + entry: npx cspell --config .cspell.json .git/COMMIT_EDITMSG + language: system + stages: [commit-msg] + always_run: true + + - id: cspell-modified-files + name: check spelling of files + entry: sh -c "npx cspell --no-must-find-files --config .cspell.json `git diff --cached -p --name-status | cut -c3- | tr '\n' ' '`" + language: system + stages: [pre-commit] + + - id: todo-warning + name: check todos + entry: .github/hooks/todo-warning.sh + language: script + stages: [pre-commit] + verbose: true diff --git a/.prettierrc.json b/.prettierrc.json new file mode 100644 index 0000000..521e91f --- /dev/null +++ b/.prettierrc.json @@ -0,0 +1,5 @@ +{ + "useTabs": true, + "singleQuote": true, + "tabWidth": 2 +} diff --git a/.semgrepignore b/.semgrepignore new file mode 100644 index 0000000..5e8c8db --- /dev/null +++ b/.semgrepignore @@ -0,0 +1,16 @@ +# Common large paths +node_modules/ +build/ +dist/ +vendor/ +.env/ +.venv/ +.tox/ +*.min.js +.npm/ + +# Semgrep rules folder +.semgrep + +# Semgrep-action log folder +.semgrep_logs/ diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 0000000..a563080 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,26 @@ +# Each line is a file pattern followed by one or more owners + +# These owners will be the default owners for everything in + +# the repo. Unless a later match takes precedence + +backbone/ @adityahase +ssh* @adityahase +nginx.conf @adityahase +*server @adityahase +playbooks/ @adityahase @balamurali27 + +site* @balamurali27 +team/ @shadrak98 + +dashboard/ @breadgenie +invoice/ @shadrak98 +marketplace* @breadgenie +stripe* @shadrak98 +razorpay* @shadrak98 +subscription/ @shadrak98 + +saas @rutwikhdev + +deploy* @18alantom +jcloud/Dockerfile @18alantom \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..f056ab9 --- /dev/null +++ b/README.md @@ -0,0 +1,86 @@ +
+ +Jcloud logo +

Jcloud

+ +**Full Service Cloud Hosting For The Jingrow Stack - Powers Jingrow** + +[![codecov](https://codecov.io/gh/jingrow/jcloud/branch/master/graph/badge.svg?token=0puvH0jUx9)](https://codecov.io/gh/jingrow/jcloud) +[![unittests](http://git.jingrow.com:3000/jingrow/jcloud/actions/workflows/main.yaml/badge.svg)](http://git.jingrow.com:3000/jingrow/jcloud/actions/workflows/main.yaml) + +
+ +
+ Managed jcloud +
+
+
+ Website + - + Documentation +
+ +## Jcloud + +Jcloud is a 100% open-source cloud hosting for the Jingrow stack. + +### Motivation + +We originally hosted our customer sites on an internal cloud platform called "Central," designed to automate creating and hosting sites when customers signed up on our website. Central was primarily built to host JERP, our flagship product. However, as our customers' needs evolved, they began requesting the ability to host custom applications, a feature that was not a priority in Central. + +Additionally, customers lacked full control over their servers—no SSH access, no ability to manage updates, and limited flexibility in interacting with their environment. This led us to launch Jingrow, to build a self-serve cloud platform that would empower our customers with complete control over their hosting experience. + +### Key Features + +- **Multitenancy Made Easy**: Jcloud simplifies multi-tenancy by enabling multiple sites on a single platform, each with its app version, allowing independent updates and minimal downtime, even for large sites. +- **Dashboard**: The dashboard provides a centralized interface to manage apps, servers, sites, billing, backups, and updates, offering real-time insights and streamlined control of complex operations. + +- **Permissions**: Granular access controls let team owners manage roles and resources efficiently, ensuring users have access only to relevant information and actions for their roles. + +- **Simplified Management**: Jcloud streamlines site management with automated backups, real-time monitoring, role-based access, and easy scaling, making it ideal for growing Jingrow environments. + +- **Billing**: Automated billing supports daily or monthly subscriptions, flexible payment methods, wallet credits, and ERP integration, simplifying customer invoicing and payments. + +- **Marketplace**: The marketplace allows developers to list apps with flexible pricing models, ensures compatibility checks, and provides a streamlined system for sales and payouts. + +
+ Screenshots + +![Dashboard](https://github.com/user-attachments/assets/1904fa3e-39aa-4151-8276-d3cc622ed582) +![Permissions](https://github.com/user-attachments/assets/60da6b5e-8f48-4483-99cf-67886ccc8bd6) +![Bench Group Update](https://github.com/user-attachments/assets/2be6b0ee-084d-4949-8d13-218b5a218d3d) +![Marketplace](https://github.com/user-attachments/assets/2f325737-7929-485d-a670-549f986fd07e) +
+ +### Under the Hood + +- [**Jingrow Framework**](http://git.jingrow.com:3000/jingrow/jingrow): A full-stack web application framework written in Python and Javascript. The framework provides a robust foundation for building web applications, including a database abstraction layer, user authentication, and a REST API. + +- [**Jingrow UI**](http://git.jingrow.com:3000/jingrow/jingrow-ui): A Vue-based UI library, to provide a modern user interface. The Jingrow UI library provides a variety of components that can be used to build single-page applications on top of the Jingrow Framework. + +- [**Agent**](http://git.jingrow.com:3000/jingrow/agent): A flask app designed to work along with Jcloud. It provides a CLI interface for Jcloud to communicate with the sites and benches. + +- [**Docker**](https://www.docker.com): An open-source platform that enables developers to build, package, and deploy applications in lightweight, portable containers. + +- [**Ansible**](https://www.ansible.com): An open-source IT automation tool that simplifies the management, configuration, and deployment of systems and applications. + +## Setup + +To self host or to setup Jcloud locally follow the steps in the [Local Development Environment Setup Guide](https://jingrow.com/docs/local-fc-setup) or [this YouTube video](https://www.learn.jingrow.com/watch?v=Xb9QHnUrIEk) + +## Learn and connect + +- [Telegram Public Group](https://t.me/jingrowcloud) +- [Discuss Forum](https://discuss.jingrow.com/c/jingrow-cloud/77) +- [Documentation](https://jingrow.com/docs) + +
+
+
+ + + + Jingrow Technologies + + +
diff --git a/backbone/README.md b/backbone/README.md new file mode 100644 index 0000000..8a86b76 --- /dev/null +++ b/backbone/README.md @@ -0,0 +1,7 @@ +# Backbone + +> Note: Not to be confused with the scrapped project **Jingrow Backbone** + +## Installation + +Automatically installed with Jcloud diff --git a/backbone/__init__.py b/backbone/__init__.py new file mode 100644 index 0000000..1988e93 --- /dev/null +++ b/backbone/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt diff --git a/backbone/cli.py b/backbone/cli.py new file mode 100644 index 0000000..7e74320 --- /dev/null +++ b/backbone/cli.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +import click + +from backbone.hypervisor import Hypervisor, Shell +from backbone.tests import run_tests + + +@click.group() +def cli(): + pass + + +@cli.group() +def hypervisor(): + pass + + +@hypervisor.command() +def setup(): + shell = Shell() + hypervisor = Hypervisor(shell=shell) + hypervisor.setup() + + +@hypervisor.command() +@click.option("--size", default=16384, type=int) +@click.option("--scaleway", is_flag=True) +def build(size, scaleway): + shell = Shell() + hypervisor = Hypervisor(shell=shell) + if scaleway: + hypervisor.build_scaleway(size=size) + else: + hypervisor.build(size=size) + + +@hypervisor.command() +def up(): + shell = Shell() + hypervisor = Hypervisor(shell=shell) + hypervisor.up() + + +@hypervisor.command() +@click.option("-c", "--command") +def ssh(command): + shell = Shell() + hypervisor = Hypervisor(shell=shell) + hypervisor.ssh(command=command) + + +@cli.command() +def tests(): + run_tests() diff --git a/backbone/hypervisor.py b/backbone/hypervisor.py new file mode 100644 index 0000000..718a408 --- /dev/null +++ b/backbone/hypervisor.py @@ -0,0 +1,118 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +import platform +import subprocess +from pathlib import Path + + +class Hypervisor: + def __init__(self, shell=None): + self.shell = shell + + def setup(self): + system = platform.system() + if system == "Linux": + self.preinstall() + self.install() + self.verify() + elif system == "Darwin": + self.verify_mac() + + def build(self, size): + system = platform.system() + if system == "Linux": + self.build_cloud_init_linux() + elif system == "Darwin": + self.build_cloud_init_mac() + self.build_packer("backbone", size=size) + + def build_cloud_init_linux(self): + cloud_init_yml = str(Path(__file__).parent.joinpath("packer", "cloud-init.yml")) + cloud_init_image = str(Path(__file__).parent.joinpath("packer", "cloud-init.img")) + self.shell.execute(f"cloud-localds {cloud_init_image} {cloud_init_yml}") + + def build_cloud_init_mac(self): + # cloud-localds isn't available on macOS. + # So we do what it does ourselves + # user-data is the same as cloud-init.yml + # https://github.com/canonical/cloud-utils/blob/49e5dd7849ee3c662f3db35e857148d02e72694b/bin/cloud-localds#L168-L187 + cloud_init_yml = str(Path(__file__).parent.joinpath("packer", "cloud-init.yml")) + user_data = str(Path(__file__).parent.joinpath("packer", "user-data")) + self.shell.execute(f"cp {cloud_init_yml} {user_data}") + + # meta-data has some inconsequential values + # but the file is needed + meta_data = str(Path(__file__).parent.joinpath("packer", "meta-data")) + self.shell.execute(f"touch {meta_data}") + + cloud_init_image = str(Path(__file__).parent.joinpath("packer", "cloud-init.img")) + # Reference: https://github.com/canonical/cloud-utils/blob/49e5dd7849ee3c662f3db35e857148d02e72694b/bin/cloud-localds#L235-L237 + self.shell.execute( + f"mkisofs -joliet -rock -volid cidata -output {cloud_init_image} {user_data} {meta_data}" + ) + + def build_packer(self, template, size): + packer_template = str(Path(__file__).parent.joinpath("packer", f"{template}.json")) + packer = self.shell.execute(f"packer build -var 'disk_size={size}' {packer_template}") + if packer.returncode: + raise Exception("Build Failed") + + box = str(Path(__file__).parent.joinpath("packer", "builds", f"{template}.box")) + add = self.shell.execute(f"vagrant box add {box} --name {template} --force") + if add.returncode: + raise Exception(f"Cannot add box {box}") + + def build_scaleway(self, size): + self.build_cloud_init_scaleway() + self.build_packer("scaleway", size=size) + + def build_cloud_init_scaleway(self): + cloud_init_yml = str(Path(__file__).parent.joinpath("packer", "cloud-init-scaleway.yml")) + + cloud_init_image = str(Path(__file__).parent.joinpath("packer", "cloud-init-scaleway.img")) + self.shell.execute(f"cloud-localds {cloud_init_image} {cloud_init_yml}") + + def up(self): + vagrant = self.shell.execute("vagrant init backbone") + vagrant = self.shell.execute("vagrant up --provider=libvirt") + if vagrant.returncode: + raise Exception("Cannot start hypervisor") + + def ssh(self, command=None): + if command: + vagrant = self.shell.execute(f'vagrant ssh -c "{command}"') + else: + vagrant = self.shell.execute("vagrant ssh") + if vagrant.returncode: + raise Exception("Cannot ssh") + + def preinstall(self): + kvm_ok = self.shell.execute("kvm-ok") + if kvm_ok.returncode: + raise Exception("Cannot use KVM") + + def install(self): + kvm_install = self.shell.execute("sudo apt install qemu-kvm") + if kvm_install.returncode: + raise Exception("Cannot install KVM") + + def verify(self): + kvm_connect = self.shell.execute("virsh list --all") + if kvm_connect.returncode: + raise Exception("Cannot connect to KVM") + + def verify_mac(self): + kvm_connect = self.shell.execute("virsh list --all") + if kvm_connect.returncode: + raise Exception("Cannot connect to KVM") + + +class Shell: + def __init__(self, directory=None): + self.directory = directory + + def execute(self, command, directory=None): + directory = directory or self.directory + return subprocess.run( + command, check=False, stderr=subprocess.STDOUT, cwd=directory, shell=True, text=True + ) diff --git a/backbone/packer/backbone.json b/backbone/packer/backbone.json new file mode 100644 index 0000000..7f0ddb6 --- /dev/null +++ b/backbone/packer/backbone.json @@ -0,0 +1,56 @@ +{ + "builders": [ + { + "boot_wait": "10s", + "cpus": "2", + "disk_image": true, + "disk_size": "{{user `disk_size`}}", + "iso_checksum": "1bf86f40534c7c4c5491bbc8064bf1b0764da8c88d5a12edce0f442bc3055784", + "iso_urls": [ + "{{template_dir}}/images/ubuntu-20.04-server-cloudimg-amd64.img", + "{{template_dir}}/images/79f46c38b9e000a66d0edecf3222e2371fccd8a1.img", + "https://cloud-images.ubuntu.com/releases/focal/release-20221213/ubuntu-20.04-server-cloudimg-amd64.img" + ], + "iso_target_path": "{{template_dir}}/images", + "iso_target_extension": "img", + "memory": "4096", + "output_directory": "{{template_dir}}/scratch", + "headless": true, + "qemuargs": [ + [ + "-cdrom", + "{{template_dir}}/cloud-init.img" + ] + ], + "shutdown_command": "echo 'vagrant' | sudo -S shutdown -P now", + "ssh_password": "vagrant", + "ssh_username": "vagrant", + "type": "qemu", + "use_backing_file": false, + "vm_name": "backbone" + } + ], + "post-processors": [ + { + "output": "{{template_dir}}/builds/backbone.box", + "type": "vagrant" + } + ], + "provisioners": [ + { + "execute_command": "echo 'vagrant' | {{.Vars}} sudo -S -E sh -eux '{{.Path}}'", + "expect_disconnect": true, + "scripts": [ + "{{template_dir}}/scripts/sshd.sh", + "{{template_dir}}/scripts/networking.sh", + "{{template_dir}}/scripts/update.sh", + "{{template_dir}}/scripts/cleanup.sh", + "{{template_dir}}/scripts/minimize.sh" + ], + "type": "shell" + } + ], + "variables": { + "disk_size": "16384" + } +} \ No newline at end of file diff --git a/backbone/packer/cloud-init-scaleway.yml b/backbone/packer/cloud-init-scaleway.yml new file mode 100644 index 0000000..c619d15 --- /dev/null +++ b/backbone/packer/cloud-init-scaleway.yml @@ -0,0 +1,27 @@ +#cloud-config + +ssh_pwauth: true + +users: + - name: root + shell: /usr/bin/bash + + - name: jingrow + gecos: Jingrow + groups: sudo + lock_passwd: false + passwd: $6$rounds=4096$GytYXpLxIgl5SZ$C3zfa5zfD66lfm/TEgtlAVYbl3IjEK9ZAND4qnI7fXGWGhqUFl7m2DD25VjimMfqH3SepUBTUwuyiubwpTUtc/ + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDB3zVjTzHQSEHQG7OD3bYi7V1xk+PCwko0W3+d1fSUvSDCxSaMKtR31+CfMKmjnvoHubOHYI9wvLpx6KdZUl2uOzKnoLazi/FCGD+m75PS4lraNU6S/B62OQk0xaClDNYBKC3H3rdXCwTU4aWflWLcfc0bmffFDTDZBJa4ySy9ne9FomGYsaMMdYtt2GNwqOOAkhzI96RFz3d4/HvHDqAeR1zv5hdqpoRL49H+3PYHIpu3rz+oMGIrN/ZM7EhvXP3yCgBMIYDTpihbv0+KTJx9rQmGNdLObM+M3HHq2C4/Xj0yAd2xQYBSr/orUyJKeGB367k72M2NADT5EzPr99AV aditya@aditya + shell: /usr/bin/bash + uid: "1000" + + - name: vagrant + gecos: Vagrant + lock_passwd: false + passwd: $6$rounds=4096$GytYXpLxIgl5SZ$C3zfa5zfD66lfm/TEgtlAVYbl3IjEK9ZAND4qnI7fXGWGhqUFl7m2DD25VjimMfqH3SepUBTUwuyiubwpTUtc/ + shell: /usr/bin/bash + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key + sudo: ALL=(ALL) NOPASSWD:ALL + uid: "2000" diff --git a/backbone/packer/cloud-init.yml b/backbone/packer/cloud-init.yml new file mode 100644 index 0000000..c1e3b9e --- /dev/null +++ b/backbone/packer/cloud-init.yml @@ -0,0 +1,19 @@ +#cloud-config + +ssh_pwauth: true + +users: + - name: root + shell: /usr/bin/bash + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDB3zVjTzHQSEHQG7OD3bYi7V1xk+PCwko0W3+d1fSUvSDCxSaMKtR31+CfMKmjnvoHubOHYI9wvLpx6KdZUl2uOzKnoLazi/FCGD+m75PS4lraNU6S/B62OQk0xaClDNYBKC3H3rdXCwTU4aWflWLcfc0bmffFDTDZBJa4ySy9ne9FomGYsaMMdYtt2GNwqOOAkhzI96RFz3d4/HvHDqAeR1zv5hdqpoRL49H+3PYHIpu3rz+oMGIrN/ZM7EhvXP3yCgBMIYDTpihbv0+KTJx9rQmGNdLObM+M3HHq2C4/Xj0yAd2xQYBSr/orUyJKeGB367k72M2NADT5EzPr99AV aditya@aditya + + - name: vagrant + gecos: Vagrant + lock_passwd: false + passwd: $6$rounds=4096$GytYXpLxIgl5SZ$C3zfa5zfD66lfm/TEgtlAVYbl3IjEK9ZAND4qnI7fXGWGhqUFl7m2DD25VjimMfqH3SepUBTUwuyiubwpTUtc/ + shell: /usr/bin/bash + ssh_authorized_keys: + - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key + sudo: ALL=(ALL) NOPASSWD:ALL + uid: "2000" diff --git a/backbone/packer/scaleway.json b/backbone/packer/scaleway.json new file mode 100644 index 0000000..edec610 --- /dev/null +++ b/backbone/packer/scaleway.json @@ -0,0 +1,57 @@ +{ + "builders": [ + { + "boot_wait": "10s", + "cpus": "2", + "disk_image": true, + "disk_size": "{{user `disk_size`}}", + "iso_checksum": "36403f9562949545e2a6c38d4b840008acae674e20b67a67f4facba610b82aec", + "iso_urls": [ + "{{template_dir}}/images/ubuntu-20.04-server-cloudimg-amd64.img", + "{{template_dir}}/images/02b24c4cf15cb4f576c262aa4efa6bca3c64c620.img", + "https://cloud-images.ubuntu.com/releases/focal/release-20201210/ubuntu-20.04-server-cloudimg-amd64.img" + ], + "iso_target_path": "{{template_dir}}/images", + "iso_target_extension": "img", + "memory": "4096", + "output_directory": "{{template_dir}}/scratch", + "headless": true, + "qemuargs": [ + [ + "-fda", + "{{template_dir}}/cloud-init-scaleway.img" + ] + ], + "shutdown_command": "echo 'vagrant' | sudo -S shutdown -P now", + "ssh_password": "vagrant", + "ssh_username": "vagrant", + "type": "qemu", + "use_backing_file": false, + "vm_name": "scaleway" + } + ], + "post-processors": [ + { + "output": "{{template_dir}}/builds/scaleway.box", + "type": "vagrant" + } + ], + "provisioners": [ + { + "execute_command": "echo 'vagrant' | {{.Vars}} sudo -S -E sh -eux '{{.Path}}'", + "expect_disconnect": true, + "scripts": [ + "{{template_dir}}/scripts/sshd.sh", + "{{template_dir}}/scripts/scaleway-sshd.sh", + "{{template_dir}}/scripts/networking.sh", + "{{template_dir}}/scripts/update.sh", + "{{template_dir}}/scripts/cleanup.sh", + "{{template_dir}}/scripts/minimize.sh" + ], + "type": "shell" + } + ], + "variables": { + "disk_size": "16384" + } +} \ No newline at end of file diff --git a/backbone/packer/scripts/cleanup.sh b/backbone/packer/scripts/cleanup.sh new file mode 100644 index 0000000..87a371d --- /dev/null +++ b/backbone/packer/scripts/cleanup.sh @@ -0,0 +1,113 @@ +#!/bin/sh -eux +export DEBIAN_FRONTEND=noninteractive + +# Remove open-vm-tools +apt-get -y purge open-vm-tools + +# Remove git and vim +apt-get -y purge git vim-common + +# Remove snapd +apt-get -y purge snapd +rm -rf /var/cache/snapd/ +rm -rf /snap + +# Remove cloud init +apt-get -y purge cloud-init +rm -rf /etc/cloud/ +rm -rf /var/lib/cloud/ + +# Delete all Linux headers +dpkg --list \ + | awk '{ print $2 }' \ + | grep 'linux-headers' \ + | xargs apt-get -y purge + +# Remove specific Linux kernels, such as linux-image-3.11.0-15-generic but +# keeps the current kernel and does not touch the virtual packages, +# e.g. 'linux-image-generic', etc. +dpkg --list \ + | awk '{ print $2 }' \ + | grep 'linux-image-.*-generic' \ + | grep -v `uname -r` \ + | xargs apt-get -y purge + +# Delete Linux source +dpkg --list \ + | awk '{ print $2 }' \ + | grep linux-source \ + | xargs apt-get -y purge + +# Delete development packages +dpkg --list \ + | awk '{ print $2 }' \ + | grep -- '-dev$' \ + | xargs apt-get -y purge + +# delete docs packages +dpkg --list \ + | awk '{ print $2 }' \ + | grep -- '-pg$' \ + | xargs apt-get -y purge + +# Delete X11 libraries +apt-get -y purge libx11-data xauth libxmuu1 libxcb1 libx11-6 libxext6 + +# Delete obsolete networking +apt-get -y purge ppp pppconfig pppoeconf + +# Delete oddities +apt-get -y purge popularity-contest installation-report command-not-found friendly-recovery bash-completion fonts-ubuntu-font-family-console laptop-detect + +# Exlude the files we don't need w/o uninstalling linux-firmware +echo "==> Setup dpkg excludes for linux-firmware" +cat <<_EOF_ | cat >> /etc/dpkg/dpkg.cfg.d/excludes +#BENTO-BEGIN +path-exclude=/lib/firmware/* +path-exclude=/usr/share/pg/linux-firmware/* +#BENTO-END +_EOF_ + +# Delete the massive firmware packages +rm -rf /lib/firmware/* +rm -rf /usr/share/pg/linux-firmware/* + +# Clean up orphaned packages with deborphan +apt-get -y install deborphan +while [ -n "$(deborphan --guess-all --libdevel)" ] +do + deborphan --guess-all --libdevel | xargs apt-get -y purge +done +apt-get -y purge deborphan dialog + +apt-get -y autoremove +apt-get -y autoclean +apt-get -y clean + +# Remove docs +rm -rf /usr/share/pg/* + +# Remove man pages +rm -rf /usr/share/man/* + +# Remove cache files +find /var/cache -type f -exec rm -rf {} \; + +# Remove APT files" +find /var/lib/apt -type f | xargs rm -f + +# truncate any logs that have built up during the install +find /var/log -type f -exec truncate --size=0 {} \; + +# Blank netplan machine-id (DUID) so machines get unique ID generated on boot. +truncate -s 0 /etc/machine-id + +# remove the contents of /tmp and /var/tmp +rm -rf /tmp/* /var/tmp/* + +# clear the history so our install isn't there +export HISTSIZE=0 +rm -f /root/.wget-hsts + +# Remove unused blocks +/sbin/fstrim -v / diff --git a/backbone/packer/scripts/minimize.sh b/backbone/packer/scripts/minimize.sh new file mode 100644 index 0000000..189d900 --- /dev/null +++ b/backbone/packer/scripts/minimize.sh @@ -0,0 +1,33 @@ +#!/bin/sh -eux + +# Whiteout root +count=$(df --sync -kP / | tail -n1 | awk -F ' ' '{print $4}') +count=$(($count-1)) +dd if=/dev/zero of=/tmp/whitespace bs=1M count=$count || echo "dd exit code $? is suppressed" +rm /tmp/whitespace + +# Whiteout /boot +count=$(df --sync -kP /boot | tail -n1 | awk -F ' ' '{print $4}') +count=$(($count-1)) +dd if=/dev/zero of=/boot/whitespace bs=1M count=$count || echo "dd exit code $? is suppressed" +rm /boot/whitespace + +set +e +swapuuid="`/sbin/blkid -o value -l -s UUID -t TYPE=swap`" +case "$?" in + 2|0) ;; + *) exit 1 ;; +esac +set -e + +if [ "x${swapuuid}" != "x" ] +then + # Whiteout the swap partition to reduce box size + # Swap is disabled till reboot + swappart="`readlink -f /dev/disk/by-uuid/$swapuuid`" + /sbin/swapoff "$swappart" + dd if=/dev/zero of="$swappart" bs=1M || echo "dd exit code $? is suppressed" + /sbin/mkswap -U "$swapuuid" "$swappart" +fi + +sync diff --git a/backbone/packer/scripts/networking.sh b/backbone/packer/scripts/networking.sh new file mode 100644 index 0000000..bb37dd2 --- /dev/null +++ b/backbone/packer/scripts/networking.sh @@ -0,0 +1,14 @@ +#!/bin/sh -eux + +echo "Create netplan config for eth0" +cat </etc/netplan/01-netcfg.yaml +network: + version: 2 + ethernets: + eth0: + dhcp4: true +EOF + +# Disable Predictable Network Interface names and use eth0 +sed -i 's/GRUB_CMDLINE_LINUX="\(.*\)"/GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0 \1"/g' /etc/default/grub +update-grub diff --git a/backbone/packer/scripts/scaleway-sshd.sh b/backbone/packer/scripts/scaleway-sshd.sh new file mode 100644 index 0000000..8c440ec --- /dev/null +++ b/backbone/packer/scripts/scaleway-sshd.sh @@ -0,0 +1,14 @@ +#!/bin/sh -eux + +SSHD_CONFIG="/etc/ssh/sshd_config" + +# ensure that there is a trailing newline before attempting to concatenate +sed -i -e '$a\' "$SSHD_CONFIG" + +DISABLE_PASSWORD_AUTHENTICATION="PasswordAuthentication yes" +if grep -q -E "^[[:space:]]*PasswordAuthentication" "$SSHD_CONFIG" +then + sed -i "s/^\s*PasswordAuthentication.*/${DISABLE_PASSWORD_AUTHENTICATION}/" "$SSHD_CONFIG" +else + echo "$DISABLE_PASSWORD_AUTHENTICATION" >>"$SSHD_CONFIG" +fi diff --git a/backbone/packer/scripts/sshd.sh b/backbone/packer/scripts/sshd.sh new file mode 100644 index 0000000..ae31670 --- /dev/null +++ b/backbone/packer/scripts/sshd.sh @@ -0,0 +1,22 @@ +#!/bin/sh -eux + +SSHD_CONFIG="/etc/ssh/sshd_config" + +# ensure that there is a trailing newline before attempting to concatenate +sed -i -e '$a\' "$SSHD_CONFIG" + +USEDNS="UseDNS no" +if grep -q -E "^[[:space:]]*UseDNS" "$SSHD_CONFIG" +then + sed -i "s/^\s*UseDNS.*/${USEDNS}/" "$SSHD_CONFIG" +else + echo "$USEDNS" >>"$SSHD_CONFIG" +fi + +GSSAPI="GSSAPIAuthentication no" +if grep -q -E "^[[:space:]]*GSSAPIAuthentication" "$SSHD_CONFIG" +then + sed -i "s/^\s*GSSAPIAuthentication.*/${GSSAPI}/" "$SSHD_CONFIG" +else + echo "$GSSAPI" >>"$SSHD_CONFIG" +fi diff --git a/backbone/packer/scripts/update.sh b/backbone/packer/scripts/update.sh new file mode 100644 index 0000000..a4f09f1 --- /dev/null +++ b/backbone/packer/scripts/update.sh @@ -0,0 +1,38 @@ +#!/bin/sh -eux +export DEBIAN_FRONTEND=noninteractive + +# Disable release-upgrades +sed -i.bak 's/^Prompt=.*$/Prompt=never/' /etc/update-manager/release-upgrades + +# Disable systemd apt timers/services +systemctl stop apt-daily.timer +systemctl stop apt-daily-upgrade.timer +systemctl disable apt-daily.timer +systemctl disable apt-daily-upgrade.timer +systemctl mask apt-daily.service +systemctl mask apt-daily-upgrade.service +systemctl daemon-reload + +# Disable periodic activities of apt to be safe +cat </etc/apt/apt.conf.d/10periodic; +APT::Periodic::Enable "0"; +APT::Periodic::Update-Package-Lists "0"; +APT::Periodic::Download-Upgradeable-Packages "0"; +APT::Periodic::AutocleanInterval "0"; +APT::Periodic::Unattended-Upgrade "0"; +EOF + +# Clean and nuke the package from orbit +rm -rf /var/log/unattended-upgrades +apt-get -y purge unattended-upgrades + +# Update the package list +apt-get -y update + +# Upgrade all installed packages incl. kernel and kernel headers +apt-get -y dist-upgrade -o Dpkg::Options::="--force-confnew" + +# Install QEMU guest agent +apt-get install qemu-guest-agent + +reboot diff --git a/backbone/setup.py b/backbone/setup.py new file mode 100644 index 0000000..900f278 --- /dev/null +++ b/backbone/setup.py @@ -0,0 +1,57 @@ +import sys + +from backbone.hypervisor import Shell + +shell = Shell() + + +def apt_install(packages): + shell.execute( + f"sudo apt install --yes --no-install-suggests --no-install-recommends {packages}" + ) + + +def main(args): + prepare() + setup_vagrant() + setup_kvm() + setup_libvirt() + setup_packer() + + +def prepare(): + shell.execute("sudo apt update") + apt_install("build-essential") + + +def setup_vagrant(): + VAGRANT_SERVER = "https://releases.hashicorp.com/vagrant/2.2.10" + VAGRANT_PACKAGE = "vagrant_2.2.10_x86_64.deb" + shell.execute(f"wget {VAGRANT_SERVER}/{VAGRANT_PACKAGE} -O {VAGRANT_PACKAGE}") + shell.execute(f"sudo dpkg -i {VAGRANT_PACKAGE}") + + +def setup_packer(): + PACKER_KEY = "https://apt.releases.hashicorp.com/gpg" + PACKER_REPO = ( + '"deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main"' + ) + shell.execute(f"curl -fsSL {PACKER_KEY} | sudo apt-key add -") + shell.execute(f"sudo apt-add-repository {PACKER_REPO}") + apt_install("packer cloud-utils") + + +def setup_kvm(): + apt_install("qemu-kvm") + shell.execute("sudo usermod -aG kvm $USER") + + +def setup_libvirt(): + apt_install("libvirt-dev libvirt-daemon-system qemu-utils dnsmasq-base") + shell.execute("sudo usermod -aG libvirt $USER") + shell.execute("vagrant plugin install vagrant-libvirt") + shell.execute("vagrant plugin install vagrant-hostmanager") + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/backbone/setup_mac.py b/backbone/setup_mac.py new file mode 100644 index 0000000..80bd730 --- /dev/null +++ b/backbone/setup_mac.py @@ -0,0 +1,57 @@ +import sys + +from backbone.hypervisor import Shell + +shell = Shell() + + +def brew_install(packages): + shell.execute(f"brew install {packages}") + + +def main(args): + prepare() + setup_qemu() + setup_vagrant() + setup_libvirt() + setup_packer() + + +def prepare(): + shell.execute("brew update") + brew_install("cdrtools iproute2mac") + + +def setup_qemu(): + brew_install("qemu") + # We might need to disable a few things + # echo 'security_driver = "none"' >> /opt/homebrew/etc/libvirt/qemu.conf + # echo "dynamic_ownership = 0" >> /opt/homebrew/etc/libvirt/qemu.conf + # echo "remember_owner = 0" >> /opt/homebrew/etc/libvirt/qemu.conf + + +def setup_vagrant(): + # At the time of writing hashicorp tap has older 2.4.2 version + # We need 2.4.3 + # Reference: https://github.com/vagrant-libvirt/vagrant-libvirt/issues/1831 + brew_install("vagrant") + + +def setup_libvirt(): + brew_install("libvirt") + shell.execute("brew services start libvirt") + # Make sure you haven't installed macports + # It overrides pkg-config, and we won't find brew libvirt packages + shell.execute("vagrant plugin install vagrant-libvirt") + shell.execute("vagrant plugin install vagrant-hostmanager") + + +def setup_packer(): + shell.execute("brew tap hashicorp/tap") + brew_install("hashicorp/tap/packer") + shell.execute("packer plugins install git.jingrow.com:3000/hashicorp/qemu") + shell.execute("packer plugins install git.jingrow.com:3000/hashicorp/vagrant") + + +if __name__ == "__main__": + sys.exit(main(sys.argv)) diff --git a/backbone/tests/__init__.py b/backbone/tests/__init__.py new file mode 100644 index 0000000..0132fa3 --- /dev/null +++ b/backbone/tests/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +import unittest +from pathlib import Path + +from coverage import Coverage + + +def run_tests(): + coverage = Coverage( + source=[str(Path(__file__).parent.parent)], omit=["*/tests/*"], branch=True + ) + coverage.start() + unittest.main(module=None, argv=["", "discover", "-s", "backbone"], exit=False) + coverage.stop() + coverage.save() + coverage.html_report() + + +if __name__ == "__main__": + unittest.main() diff --git a/backbone/tests/test_hypervisor.py b/backbone/tests/test_hypervisor.py new file mode 100644 index 0000000..ba74989 --- /dev/null +++ b/backbone/tests/test_hypervisor.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +import unittest +from unittest.mock import MagicMock + +from backbone.hypervisor import Hypervisor + + +class TestHypervisor(unittest.TestCase): + def test_preinstall_pass(self): + shell = MagicMock() + shell.execute.return_value.returncode = 0 + hypervisor = Hypervisor(shell=shell) + self.assertEqual(hypervisor.preinstall(), None) + shell.execute.assert_called_with("kvm-ok") + + def test_preinstall_fail(self): + shell = MagicMock() + shell.execute.return_value.returncode = 1 + hypervisor = Hypervisor(shell=shell) + self.assertRaisesRegex(Exception, "Cannot use KVM", hypervisor.preinstall) + + def test_install_pass(self): + shell = MagicMock() + shell.execute.return_value.returncode = 0 + hypervisor = Hypervisor(shell=shell) + self.assertEqual(hypervisor.install(), None) + shell.execute.assert_called_with("sudo apt install qemu-kvm") + + def test_install_fail(self): + shell = MagicMock() + shell.execute.return_value.returncode = 1 + hypervisor = Hypervisor(shell=shell) + self.assertRaisesRegex(Exception, "Cannot install KVM", hypervisor.install) + + def test_verify_pass(self): + shell = MagicMock() + shell.execute.return_value.returncode = 0 + hypervisor = Hypervisor(shell=shell) + self.assertEqual(hypervisor.verify(), None) + shell.execute.assert_called_with("virsh list --all") + + def test_verify_fail(self): + shell = MagicMock() + shell.execute.return_value.returncode = 1 + hypervisor = Hypervisor(shell=shell) + self.assertRaisesRegex(Exception, "Cannot connect to KVM", hypervisor.verify) diff --git a/backbone/vagrant/Vagrantfile b/backbone/vagrant/Vagrantfile new file mode 100644 index 0000000..5c0efff --- /dev/null +++ b/backbone/vagrant/Vagrantfile @@ -0,0 +1,169 @@ +Vagrant.configure("2") do |config| + config.vm.box = "backbone" + config.vm.synced_folder ".", "/vagrant", disabled: true + + # This let's us access all guests with their names from host and other guests + config.hostmanager.enabled = true + config.hostmanager.manage_host = true + config.hostmanager.manage_guest = true + + config.vm.provider :libvirt do |libvirt| + libvirt.qemu_use_session = false + # Enable qemu_use_session or set this on macOS + # Also run **sudo** brew services start + # libvirt.uri = "qemu:///session" + libvirt.driver = "kvm" + libvirt.default_prefix = "" + # VMs with little disk space may fail to boot + libvirt.machine_virtual_size = 16 + libvirt.cpus = 1 + libvirt.cpu_mode = "host-passthrough" + libvirt.memory = 512 + end + + # We will add two static IPs to simulate public and private interfaces + # Host manager plugin will work only with first interface in this list + # Public 10.0.x.x + # Private 10.1.x.x + + # IP Pattern based on server types + # Proxy x.x.1.x + # Jingrow x.x.2.x + # Database x.x.3.x + # Other x.x.4.x + + # We'll start IPs from x.x.x.101 + + # Default Cluster + + # Reverse Proxy Server + config.vm.define "n1.local.jingrow.dev" do |n1| + n1.vm.hostname = "n1.local.jingrow.dev" + n1.vm.network "private_network", ip: "10.0.1.101", netmask: "255.255.0.0" + n1.vm.network "private_network", ip: "10.1.1.101", netmask: "255.255.0.0" + n1.vm.provider :libvirt do |libvirt| + libvirt.memory = 1024 + end + end + + # Primary App Server + config.vm.define "f1.local.jingrow.dev" do |f1| + f1.vm.hostname = "f1.local.jingrow.dev" + f1.vm.network "private_network", ip: "10.0.2.101", netmask: "255.255.0.0" + f1.vm.network "private_network", ip: "10.1.2.101", netmask: "255.255.0.0" + f1.vm.provider :libvirt do |libvirt| + libvirt.cpus = 2 + libvirt.memory = 4096 + end + end + + # Replica of f1 + # config.vm.define "f2.local.jingrow.dev" do |f2| + # f2.vm.hostname = "f2.local.jingrow.dev" + # f2.vm.network "private_network", ip: "10.0.2.102", netmask: "255.255.0.0" + # f2.vm.network "private_network", ip: "10.1.2.102", netmask: "255.255.0.0" + # f2.vm.provider :libvirt do |libvirt| + # libvirt.cpus = 2 + # libvirt.memory = 4096 + # end + # end + + # Primary DB Server + config.vm.define "m1.local.jingrow.dev" do |m1| + m1.vm.hostname = "m1.local.jingrow.dev" + m1.vm.network "private_network", ip: "10.0.3.101", netmask: "255.255.0.0" + m1.vm.network "private_network", ip: "10.1.3.101", netmask: "255.255.0.0" + m1.vm.provider :libvirt do |libvirt| + libvirt.cpus = 1 + libvirt.memory = 2048 + end + end + + # Replica of m1 + # config.vm.define "m2.local.jingrow.dev" do |m2| + # m2.vm.hostname = "m2.local.jingrow.dev" + # m2.vm.network "private_network", ip: "10.0.3.102", netmask: "255.255.0.0" + # m2.vm.network "private_network", ip: "10.1.3.102", netmask: "255.255.0.0" + # m2.vm.provider :libvirt do |libvirt| + # libvirt.cpus = 1 + # libvirt.memory = 2048 + # end + # end + + # # Secondary Cluster + # config.vm.define "n2.jingrow.dev" do |n2| + # n2.vm.hostname = "n2.jingrow.dev" + # n2.vm.network "private_network", ip: "10.0.1.102", netmask: "255.255.0.0" + # n2.vm.network "private_network", ip: "10.1.1.102", netmask: "255.255.0.0" + # end + + # Additional Hosts. + + # Docker Registry + config.vm.define "registry.local.jingrow.dev" do |registry| + registry.vm.hostname = "registry.local.jingrow.dev" + registry.vm.network "private_network", ip: "10.0.4.101", netmask: "255.255.0.0" + registry.vm.network "private_network", ip: "10.1.4.101", netmask: "255.255.0.0" + end + + # Log Server = ElasticSearch + Logstash + Kibana + config.vm.define "log.local.jingrow.dev" do |log| + log.vm.hostname = "log.local.jingrow.dev" + log.vm.network "private_network", ip: "10.0.4.102", netmask: "255.255.0.0" + log.vm.network "private_network", ip: "10.1.4.102", netmask: "255.255.0.0" + log.vm.provider :libvirt do |libvirt| + libvirt.cpus = 2 + libvirt.memory = 4096 + end + end + + # Uptime Server = Prometheus + Grafana + config.vm.define "monitor.local.jingrow.dev" do |monitor| + monitor.vm.hostname = "monitor.local.jingrow.dev" + monitor.vm.network "private_network", ip: "10.0.4.103", netmask: "255.255.0.0" + monitor.vm.network "private_network", ip: "10.1.4.103", netmask: "255.255.0.0" + monitor.vm.provider :libvirt do |libvirt| + libvirt.memory = 1024 + end + end + + # Analytics Server = Plausible + # config.vm.define "analytics.local.jingrow.dev" do |analytics| + # analytics.vm.hostname = "analytics.local.jingrow.dev" + # analytics.vm.network "private_network", ip: "10.0.4.104", netmask: "255.255.0.0" + # analytics.vm.network "private_network", ip: "10.1.4.104", netmask: "255.255.0.0" + # analytics.vm.provider :libvirt do |libvirt| + # libvirt.memory = 1024 + # end + # end + + # Trace Server = Sentry + config.vm.define "trace.local.jingrow.dev" do |trace| + trace.vm.hostname = "trace.local.jingrow.dev" + trace.vm.network "private_network", ip: "10.0.4.105", netmask: "255.255.0.0" + trace.vm.network "private_network", ip: "10.1.4.105", netmask: "255.255.0.0" + trace.vm.provider :libvirt do |libvirt| + libvirt.cpus = 2 + libvirt.memory = 4096 + end + end + + # config.vm.define "sn1.local.jingrow.dev" do |sn1| + # sn1.vm.box = "scaleway" + # sn1.vm.hostname = "sn1.local.jingrow.dev" + # sn1.vm.network "private_network", ip: "10.2.0.101", netmask: "255.255.0.0" + # sn1.vm.network "private_network", ip: "10.3.0.101", netmask: "255.255.0.0", auto_config: false + # end + + # config.vm.define "sf1.local.jingrow.dev" do |sf1| + # sf1.vm.box = "scaleway" + # sf1.vm.hostname = "sf1.local.jingrow.dev" + # sf1.vm.network "private_network", ip: "10.2.1.101", netmask: "255.255.0.0" + # sf1.vm.network "private_network", ip: "10.3.1.101", netmask: "255.255.0.0", auto_config: false + # sf1.vm.provider :libvirt do |libvirt| + # libvirt.cpus = 2 + # libvirt.memory = 4096 + # end + # end + +end diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 0000000..403374f --- /dev/null +++ b/codecov.yml @@ -0,0 +1,13 @@ +coverage: + status: + project: + default: + target: auto + threshold: 0.5% + patch: + default: + target: 75% + threshold: 0% + if_ci_failed: ignore + ignore: + - jcloud/jcloud/report/** diff --git a/commitlint.config.js b/commitlint.config.js new file mode 100644 index 0000000..21be290 --- /dev/null +++ b/commitlint.config.js @@ -0,0 +1,11 @@ +module.exports = { + extends: ['@commitlint/config-conventional'], + rules: { + 'header-max-length': [2, 'always', 72], + 'subject-case': [2, 'always', 'sentence-case'], + 'scope-case': [2, 'always', 'kebab-case'], + 'body-case': [2, 'always', 'sentence-case'], + 'body-leading-blank': [2, 'always'], + 'footer-leading-blank': [2, 'always'], + }, +}; diff --git a/dashboard/.browserslistrc b/dashboard/.browserslistrc new file mode 100644 index 0000000..81d8c1b --- /dev/null +++ b/dashboard/.browserslistrc @@ -0,0 +1,2 @@ +defaults +not IE 11 diff --git a/dashboard/.eslintrc.js b/dashboard/.eslintrc.js new file mode 100644 index 0000000..cbe52e2 --- /dev/null +++ b/dashboard/.eslintrc.js @@ -0,0 +1,14 @@ +module.exports = { + root: true, + env: { + node: true + }, + extends: ['plugin:vue/essential', 'eslint:recommended', '@vue/prettier'], + parserOptions: { + parser: 'babel-eslint' + }, + rules: { + 'no-console': process.env.NODE_ENV === 'production' ? 'error' : 'off', + 'no-debugger': process.env.NODE_ENV === 'production' ? 'error' : 'off' + } +}; diff --git a/dashboard/.gitignore b/dashboard/.gitignore new file mode 100644 index 0000000..0ffe3b1 --- /dev/null +++ b/dashboard/.gitignore @@ -0,0 +1,22 @@ +.DS_Store +node_modules +/dist +/coverage + +# local env files +.env.local +.env.*.local + +# Log files +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Editor directories and files +.idea +.vscode +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/dashboard/.prettierrc.json b/dashboard/.prettierrc.json new file mode 100644 index 0000000..b7695f8 --- /dev/null +++ b/dashboard/.prettierrc.json @@ -0,0 +1,4 @@ +{ + "singleQuote": true, + "useTabs": true +} diff --git a/dashboard/README.md b/dashboard/README.md new file mode 100644 index 0000000..8713190 --- /dev/null +++ b/dashboard/README.md @@ -0,0 +1,64 @@ +# Dashboard + +Dashboard is a VueJS application that is the face of 今果 Jingrow. This is what the end users (tenants) see and manage their FC stuff in. The tenants does not have access to the desk, so, this is their dashboard for managing sites, apps, updates etc. + +Technologies at the heart of dashboard: + +1. [VueJS 3](https://vuejs.org/): The JavaScript framework of our choice. + +2. [TailwindCSS 3](https://tailwindcss.com/): We love it. + +3. [ViteJS](https://vitejs.dev/guide/): Build tooling for dev server and build command. + +4. [Feather Icons](https://feathericons.com/): Those Shiny & Crisp Open Source icons. + +## Development + +We use the vite's development server, gives us super-fast hot reload and more. + +### Running the development server + +Run: + +```bash +yarn run dev +``` + +> Note: If you are getting `CSRFTokenError` in your local development machine, please add the following key value pair in your site_cofig.json +> +> ```json +> "ignore_csrf": 1 +> ``` + +### Proxy + +While running the vite dev server, the requests to paths like `/app`, `/files` and `/api` are redirected to the actual site inside the bench. This makes sure these paths and other backend API keep working properly. You can check the [proxyOptions.js](./proxyOptions.js) files to check how the proxying happens. These options are then loaded and used in the [vite config](./vite.config.js) file. + +## Testing + +There is a separate setup for testing the frontend. + +### The Stack + +1. [MSW](https://mswjs.io/) + +2. [Vitest](https://vitest.dev/) + +### Running the tests + +```bash +yarn run test +``` + +The tests run in CI too. + +## Learning More + +You can start by taking a look at the [main.js](./src/main.js) file. This is where the VueJS app is initialzed and the below things are attached (registered) to the instance: + +1. Vue Router +2. Plugins +3. Controllers +4. Global Components + +The logic to register each of the above is in its own separate file, you can take a look at the imports as required. Till we have a more docs, you have to dig into some `js` and `vue` files. If you find something that you can add here, feel free to raise a PR! diff --git a/dashboard/babel.config.js b/dashboard/babel.config.js new file mode 100644 index 0000000..7d5adcf --- /dev/null +++ b/dashboard/babel.config.js @@ -0,0 +1,3 @@ +module.exports = { + presets: ['@babel/preset-env'] +}; diff --git a/dashboard/generateThemeConfig.cjs b/dashboard/generateThemeConfig.cjs new file mode 100644 index 0000000..279b853 --- /dev/null +++ b/dashboard/generateThemeConfig.cjs @@ -0,0 +1,10 @@ +/** + * This node script resolves the tailwind config and dumps it as a json in + * tailwind.theme.json which is later imported into the app. + */ +let fs = require('fs'); +let resolveConfig = require('tailwindcss/resolveConfig'); +let config = require('./tailwind.config.cjs'); +let { theme } = resolveConfig(config); + +fs.writeFileSync('./tailwind.theme.json', JSON.stringify(theme, null, 2)); diff --git a/dashboard/index.html b/dashboard/index.html new file mode 100644 index 0000000..a95bde5 --- /dev/null +++ b/dashboard/index.html @@ -0,0 +1,34 @@ + + + + + + + 今果 Jingrow + + + + + + + +
+ + +
+
+ + + + + + diff --git a/dashboard/jsconfig.json b/dashboard/jsconfig.json new file mode 100644 index 0000000..8ab59b4 --- /dev/null +++ b/dashboard/jsconfig.json @@ -0,0 +1,9 @@ +{ + "include": ["./src/**/*", "src2/components/AddressableErrorDialog.vue"], + "compilerOptions": { + "baseUrl": ".", + "paths": { + "@/*": ["src/*"] + } + } +} diff --git a/dashboard/package.json b/dashboard/package.json new file mode 100644 index 0000000..25a1079 --- /dev/null +++ b/dashboard/package.json @@ -0,0 +1,95 @@ +{ + "name": "dashboard", + "version": "0.1.0", + "private": true, + "type": "module", + "scripts": { + "dev": "yarn generate-theme-config && vite", + "build": "yarn generate-theme-config && vite build --base=/assets/jcloud/dashboard/ && yarn copy-html-entry", + "copy-html-entry": "cp ../jcloud/public/dashboard/index.html ../jcloud/www/dashboard.html", + "generate-theme-config": "node ./generateThemeConfig.cjs", + "test": "vitest", + "coverage": "vitest run --coverage", + "lint": "eslint src" + }, + "dependencies": { + "@codemirror/autocomplete": "^6.18.1", + "@codemirror/lang-sql": "^6.8.0", + "@headlessui/vue": "^1.7.14", + "@popperjs/core": "^2.11.2", + "@sentry/vite-plugin": "^2.19.0", + "@sentry/vue": "^8.10.0", + "@stripe/stripe-js": "^1.3.0", + "@tailwindcss/container-queries": "^0.1.1", + "@tanstack/vue-table": "^8.20.5", + "@vueuse/components": "^10.7.0", + "@vueuse/core": "^10.3.0", + "codemirror": "^6.0.1", + "core-js": "^3.6.4", + "dayjs": "^1.10.7", + "echarts": "^5.4.3", + "feather-icons": "^4.26.0", + "jingrow-charts": "http://npm.jingrow.com:105/jingrow-charts-2.0.0-rc22.tgz", + "jingrow-ui": "http://npm.jingrow.com:105/jingrow-ui-0.1.108.tgz", + "fuse.js": "6.6.2", + "libarchive.js": "^1.3.0", + "lodash": "^4.17.19", + "luxon": "^1.22.0", + "markdown-it": "^12.3.2", + "papaparse": "^5.4.1", + "qrcode": "^1.5.4", + "register-service-worker": "^1.6.2", + "socket.io-client": "^4.5.1", + "sql-formatter": "^15.4.10", + "unplugin-icons": "^0.17.0", + "unplugin-vue-components": "^0.25.2", + "vue": "^3.4.12", + "vue-codemirror": "^6.1.1", + "vue-echarts": "^6.6.1", + "vue-qrcode": "^2.2.2", + "vue-router": "^4.0.5", + "vue-sonner": "^1.2.5" + }, + "devDependencies": { + "@iconify/json": "^2.2.123", + "@tailwindcss/forms": "^0.4.0", + "@tailwindcss/postcss7-compat": "^2.0.2", + "@tailwindcss/typography": "^0.5.1", + "@vitejs/plugin-legacy": "^4.1.1", + "@vitejs/plugin-vue": "^5.0.3", + "@vitejs/plugin-vue-jsx": "^3.1.0", + "@vue/compiler-sfc": "^3.1.0", + "@vue/eslint-config-prettier": "^6.0.0", + "@vue/test-utils": "^2.0.0-rc.19", + "autoprefixer": "^10.4.2", + "babel-eslint": "^10.0.3", + "c8": "^7.11.0", + "eslint": "^6.7.2", + "eslint-plugin-prettier": "^3.1.1", + "eslint-plugin-vue": "^6.2.2", + "jsdom": "^19.0.0", + "lint-staged": "^9.5.0", + "msw": "^0.36.8", + "node-fetch": "^3.2.10", + "postcss": "^8.4.6", + "postcss-easy-import": "^4.0.0", + "prettier": "^2.5.1", + "prettier-plugin-tailwindcss": "^0.1.8", + "tailwindcss": "^3.2", + "typescript": "^5.4.3", + "vite": "5.0.13", + "vite-plugin-rewrite-all": "^1.0.1", + "vitest": "^0.9.3", + "vue-tsc": "^2.0.7", + "yorkie": "^2.0.0" + }, + "gitHooks": { + "pre-commit": "lint-staged" + }, + "lint-staged": { + "*.{js,jsx,vue}": [ + "yarn lint", + "git add" + ] + } +} diff --git a/dashboard/postcss.config.cjs b/dashboard/postcss.config.cjs new file mode 100644 index 0000000..23f3595 --- /dev/null +++ b/dashboard/postcss.config.cjs @@ -0,0 +1,6 @@ +module.exports = { + plugins: { + tailwindcss: {}, + autoprefixer: {} + } +}; diff --git a/dashboard/public/favicon.png b/dashboard/public/favicon.png new file mode 100644 index 0000000..762383d Binary files /dev/null and b/dashboard/public/favicon.png differ diff --git a/dashboard/public/robots.txt b/dashboard/public/robots.txt new file mode 100644 index 0000000..a82d96e --- /dev/null +++ b/dashboard/public/robots.txt @@ -0,0 +1,2 @@ +User-agent: * +Disallow: diff --git a/dashboard/shims-global.d.ts b/dashboard/shims-global.d.ts new file mode 100644 index 0000000..75c1835 --- /dev/null +++ b/dashboard/shims-global.d.ts @@ -0,0 +1,7 @@ +declare global { + interface Window { + is_system_user?: boolean; + } +} + +export {}; diff --git a/dashboard/shims.d.ts b/dashboard/shims.d.ts new file mode 100644 index 0000000..52da0b7 --- /dev/null +++ b/dashboard/shims.d.ts @@ -0,0 +1,5 @@ +declare module '~icons/*' { + import { FunctionalComponent, SVGAttributes } from 'vue'; + const component: FunctionalComponent; + export default component; +} diff --git a/dashboard/src/App.vue b/dashboard/src/App.vue new file mode 100644 index 0000000..9b01c49 --- /dev/null +++ b/dashboard/src/App.vue @@ -0,0 +1,74 @@ + + + diff --git a/dashboard/src/assets/404.png b/dashboard/src/assets/404.png new file mode 100644 index 0000000..1cbf7ee Binary files /dev/null and b/dashboard/src/assets/404.png differ diff --git a/dashboard/src/assets/Inter/Inter-Black.woff b/dashboard/src/assets/Inter/Inter-Black.woff new file mode 100644 index 0000000..c7737ed Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Black.woff differ diff --git a/dashboard/src/assets/Inter/Inter-Black.woff2 b/dashboard/src/assets/Inter/Inter-Black.woff2 new file mode 100644 index 0000000..b16b995 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Black.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-BlackItalic.woff b/dashboard/src/assets/Inter/Inter-BlackItalic.woff new file mode 100644 index 0000000..b5f1447 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-BlackItalic.woff differ diff --git a/dashboard/src/assets/Inter/Inter-BlackItalic.woff2 b/dashboard/src/assets/Inter/Inter-BlackItalic.woff2 new file mode 100644 index 0000000..a3f1b70 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-BlackItalic.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-Bold.woff b/dashboard/src/assets/Inter/Inter-Bold.woff new file mode 100644 index 0000000..e384555 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Bold.woff differ diff --git a/dashboard/src/assets/Inter/Inter-Bold.woff2 b/dashboard/src/assets/Inter/Inter-Bold.woff2 new file mode 100644 index 0000000..835dd49 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Bold.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-BoldItalic.woff b/dashboard/src/assets/Inter/Inter-BoldItalic.woff new file mode 100644 index 0000000..ffac3f5 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-BoldItalic.woff differ diff --git a/dashboard/src/assets/Inter/Inter-BoldItalic.woff2 b/dashboard/src/assets/Inter/Inter-BoldItalic.woff2 new file mode 100644 index 0000000..1a41a14 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-BoldItalic.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-ExtraBold.woff b/dashboard/src/assets/Inter/Inter-ExtraBold.woff new file mode 100644 index 0000000..885ac94 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-ExtraBold.woff differ diff --git a/dashboard/src/assets/Inter/Inter-ExtraBold.woff2 b/dashboard/src/assets/Inter/Inter-ExtraBold.woff2 new file mode 100644 index 0000000..ae956b1 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-ExtraBold.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-ExtraBoldItalic.woff b/dashboard/src/assets/Inter/Inter-ExtraBoldItalic.woff new file mode 100644 index 0000000..d6cf862 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-ExtraBoldItalic.woff differ diff --git a/dashboard/src/assets/Inter/Inter-ExtraBoldItalic.woff2 b/dashboard/src/assets/Inter/Inter-ExtraBoldItalic.woff2 new file mode 100644 index 0000000..8657899 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-ExtraBoldItalic.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-ExtraLight.woff b/dashboard/src/assets/Inter/Inter-ExtraLight.woff new file mode 100644 index 0000000..ff76919 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-ExtraLight.woff differ diff --git a/dashboard/src/assets/Inter/Inter-ExtraLight.woff2 b/dashboard/src/assets/Inter/Inter-ExtraLight.woff2 new file mode 100644 index 0000000..694b2df Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-ExtraLight.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-ExtraLightItalic.woff b/dashboard/src/assets/Inter/Inter-ExtraLightItalic.woff new file mode 100644 index 0000000..c6ed13a Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-ExtraLightItalic.woff differ diff --git a/dashboard/src/assets/Inter/Inter-ExtraLightItalic.woff2 b/dashboard/src/assets/Inter/Inter-ExtraLightItalic.woff2 new file mode 100644 index 0000000..9a7bd11 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-ExtraLightItalic.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-Italic.woff b/dashboard/src/assets/Inter/Inter-Italic.woff new file mode 100644 index 0000000..4fdb59d Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Italic.woff differ diff --git a/dashboard/src/assets/Inter/Inter-Italic.woff2 b/dashboard/src/assets/Inter/Inter-Italic.woff2 new file mode 100644 index 0000000..deca637 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Italic.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-Light.woff b/dashboard/src/assets/Inter/Inter-Light.woff new file mode 100644 index 0000000..42850ac Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Light.woff differ diff --git a/dashboard/src/assets/Inter/Inter-Light.woff2 b/dashboard/src/assets/Inter/Inter-Light.woff2 new file mode 100644 index 0000000..65a7dad Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Light.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-LightItalic.woff b/dashboard/src/assets/Inter/Inter-LightItalic.woff new file mode 100644 index 0000000..c4ed9a9 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-LightItalic.woff differ diff --git a/dashboard/src/assets/Inter/Inter-LightItalic.woff2 b/dashboard/src/assets/Inter/Inter-LightItalic.woff2 new file mode 100644 index 0000000..555fc55 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-LightItalic.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-Medium.woff b/dashboard/src/assets/Inter/Inter-Medium.woff new file mode 100644 index 0000000..495faef Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Medium.woff differ diff --git a/dashboard/src/assets/Inter/Inter-Medium.woff2 b/dashboard/src/assets/Inter/Inter-Medium.woff2 new file mode 100644 index 0000000..871ce4c Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Medium.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-MediumItalic.woff b/dashboard/src/assets/Inter/Inter-MediumItalic.woff new file mode 100644 index 0000000..389c7a2 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-MediumItalic.woff differ diff --git a/dashboard/src/assets/Inter/Inter-MediumItalic.woff2 b/dashboard/src/assets/Inter/Inter-MediumItalic.woff2 new file mode 100644 index 0000000..aa80579 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-MediumItalic.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-Regular.woff b/dashboard/src/assets/Inter/Inter-Regular.woff new file mode 100644 index 0000000..fa7715d Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Regular.woff differ diff --git a/dashboard/src/assets/Inter/Inter-Regular.woff2 b/dashboard/src/assets/Inter/Inter-Regular.woff2 new file mode 100644 index 0000000..b52dd0a Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Regular.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-SemiBold.woff b/dashboard/src/assets/Inter/Inter-SemiBold.woff new file mode 100644 index 0000000..18d7749 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-SemiBold.woff differ diff --git a/dashboard/src/assets/Inter/Inter-SemiBold.woff2 b/dashboard/src/assets/Inter/Inter-SemiBold.woff2 new file mode 100644 index 0000000..ece5204 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-SemiBold.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-SemiBoldItalic.woff b/dashboard/src/assets/Inter/Inter-SemiBoldItalic.woff new file mode 100644 index 0000000..8ee6439 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-SemiBoldItalic.woff differ diff --git a/dashboard/src/assets/Inter/Inter-SemiBoldItalic.woff2 b/dashboard/src/assets/Inter/Inter-SemiBoldItalic.woff2 new file mode 100644 index 0000000..b32c0ba Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-SemiBoldItalic.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-Thin.woff b/dashboard/src/assets/Inter/Inter-Thin.woff new file mode 100644 index 0000000..1a22286 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Thin.woff differ diff --git a/dashboard/src/assets/Inter/Inter-Thin.woff2 b/dashboard/src/assets/Inter/Inter-Thin.woff2 new file mode 100644 index 0000000..c56bc7c Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-Thin.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-ThinItalic.woff b/dashboard/src/assets/Inter/Inter-ThinItalic.woff new file mode 100644 index 0000000..d8ec837 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-ThinItalic.woff differ diff --git a/dashboard/src/assets/Inter/Inter-ThinItalic.woff2 b/dashboard/src/assets/Inter/Inter-ThinItalic.woff2 new file mode 100644 index 0000000..eca5608 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-ThinItalic.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-italic.var.woff2 b/dashboard/src/assets/Inter/Inter-italic.var.woff2 new file mode 100644 index 0000000..1f5d926 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-italic.var.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter-roman.var.woff2 b/dashboard/src/assets/Inter/Inter-roman.var.woff2 new file mode 100644 index 0000000..05621d8 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter-roman.var.woff2 differ diff --git a/dashboard/src/assets/Inter/Inter.var.woff2 b/dashboard/src/assets/Inter/Inter.var.woff2 new file mode 100644 index 0000000..46bb515 Binary files /dev/null and b/dashboard/src/assets/Inter/Inter.var.woff2 differ diff --git a/dashboard/src/assets/Inter/inter.css b/dashboard/src/assets/Inter/inter.css new file mode 100644 index 0000000..e21671c --- /dev/null +++ b/dashboard/src/assets/Inter/inter.css @@ -0,0 +1,152 @@ +@font-face { + font-family: 'Inter'; + font-style: normal; + font-weight: 100; + font-display: swap; + src: url('Inter-Thin.woff2?v=3.12') format('woff2'), + url('Inter-Thin.woff?v=3.12') format('woff'); +} +@font-face { + font-family: 'Inter'; + font-style: italic; + font-weight: 100; + font-display: swap; + src: url('Inter-ThinItalic.woff2?v=3.12') format('woff2'), + url('Inter-ThinItalic.woff?v=3.12') format('woff'); +} + +@font-face { + font-family: 'Inter'; + font-style: normal; + font-weight: 200; + font-display: swap; + src: url('Inter-ExtraLight.woff2?v=3.12') format('woff2'), + url('Inter-ExtraLight.woff?v=3.12') format('woff'); +} +@font-face { + font-family: 'Inter'; + font-style: italic; + font-weight: 200; + font-display: swap; + src: url('Inter-ExtraLightItalic.woff2?v=3.12') format('woff2'), + url('Inter-ExtraLightItalic.woff?v=3.12') format('woff'); +} + +@font-face { + font-family: 'Inter'; + font-style: normal; + font-weight: 300; + font-display: swap; + src: url('Inter-Light.woff2?v=3.12') format('woff2'), + url('Inter-Light.woff?v=3.12') format('woff'); +} +@font-face { + font-family: 'Inter'; + font-style: italic; + font-weight: 300; + font-display: swap; + src: url('Inter-LightItalic.woff2?v=3.12') format('woff2'), + url('Inter-LightItalic.woff?v=3.12') format('woff'); +} + +@font-face { + font-family: 'Inter'; + font-style: normal; + font-weight: 400; + font-display: swap; + src: url('Inter-Regular.woff2?v=3.12') format('woff2'), + url('Inter-Regular.woff?v=3.12') format('woff'); +} +@font-face { + font-family: 'Inter'; + font-style: italic; + font-weight: 400; + font-display: swap; + src: url('Inter-Italic.woff2?v=3.12') format('woff2'), + url('Inter-Italic.woff?v=3.12') format('woff'); +} + +@font-face { + font-family: 'Inter'; + font-style: normal; + font-weight: 500; + font-display: swap; + src: url('Inter-Medium.woff2?v=3.12') format('woff2'), + url('Inter-Medium.woff?v=3.12') format('woff'); +} +@font-face { + font-family: 'Inter'; + font-style: italic; + font-weight: 500; + font-display: swap; + src: url('Inter-MediumItalic.woff2?v=3.12') format('woff2'), + url('Inter-MediumItalic.woff?v=3.12') format('woff'); +} + +@font-face { + font-family: 'Inter'; + font-style: normal; + font-weight: 600; + font-display: swap; + src: url('Inter-SemiBold.woff2?v=3.12') format('woff2'), + url('Inter-SemiBold.woff?v=3.12') format('woff'); +} +@font-face { + font-family: 'Inter'; + font-style: italic; + font-weight: 600; + font-display: swap; + src: url('Inter-SemiBoldItalic.woff2?v=3.12') format('woff2'), + url('Inter-SemiBoldItalic.woff?v=3.12') format('woff'); +} + +@font-face { + font-family: 'Inter'; + font-style: normal; + font-weight: 700; + font-display: swap; + src: url('Inter-Bold.woff2?v=3.12') format('woff2'), + url('Inter-Bold.woff?v=3.12') format('woff'); +} +@font-face { + font-family: 'Inter'; + font-style: italic; + font-weight: 700; + font-display: swap; + src: url('Inter-BoldItalic.woff2?v=3.12') format('woff2'), + url('Inter-BoldItalic.woff?v=3.12') format('woff'); +} + +@font-face { + font-family: 'Inter'; + font-style: normal; + font-weight: 800; + font-display: swap; + src: url('Inter-ExtraBold.woff2?v=3.12') format('woff2'), + url('Inter-ExtraBold.woff?v=3.12') format('woff'); +} +@font-face { + font-family: 'Inter'; + font-style: italic; + font-weight: 800; + font-display: swap; + src: url('Inter-ExtraBoldItalic.woff2?v=3.12') format('woff2'), + url('Inter-ExtraBoldItalic.woff?v=3.12') format('woff'); +} + +@font-face { + font-family: 'Inter'; + font-style: normal; + font-weight: 900; + font-display: swap; + src: url('Inter-Black.woff2?v=3.12') format('woff2'), + url('Inter-Black.woff?v=3.12') format('woff'); +} +@font-face { + font-family: 'Inter'; + font-style: italic; + font-weight: 900; + font-display: swap; + src: url('Inter-BlackItalic.woff2?v=3.12') format('woff2'), + url('Inter-BlackItalic.woff?v=3.12') format('woff'); +} diff --git a/dashboard/src/assets/jerp-logo.svg b/dashboard/src/assets/jerp-logo.svg new file mode 100644 index 0000000..814abff --- /dev/null +++ b/dashboard/src/assets/jerp-logo.svg @@ -0,0 +1,12 @@ + + + + jerp-logo + Created with Sketch. + + + + \ No newline at end of file diff --git a/dashboard/src/assets/jingrow-cloud.svg b/dashboard/src/assets/jingrow-cloud.svg new file mode 100644 index 0000000..2947f78 --- /dev/null +++ b/dashboard/src/assets/jingrow-cloud.svg @@ -0,0 +1,4 @@ + + + + diff --git a/dashboard/src/assets/jingrow-framework-logo.png b/dashboard/src/assets/jingrow-framework-logo.png new file mode 100644 index 0000000..5049078 Binary files /dev/null and b/dashboard/src/assets/jingrow-framework-logo.png differ diff --git a/dashboard/src/assets/razorpay.svg b/dashboard/src/assets/razorpay.svg new file mode 100644 index 0000000..db7102d --- /dev/null +++ b/dashboard/src/assets/razorpay.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/dashboard/src/assets/stripe.svg b/dashboard/src/assets/stripe.svg new file mode 100644 index 0000000..a7aed39 --- /dev/null +++ b/dashboard/src/assets/stripe.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + diff --git a/dashboard/src/assets/style.css b/dashboard/src/assets/style.css new file mode 100644 index 0000000..f102acf --- /dev/null +++ b/dashboard/src/assets/style.css @@ -0,0 +1,34 @@ +@import 'jingrow-ui/src/style.css'; + +@layer components { + /* Works on Firefox */ + * { + scrollbar-width: thin; + scrollbar-color: #c0c6cc #ebeef0; + } + + html { + scrollbar-width: auto; + } + + /* Works on Chrome, Edge, and Safari */ + *::-webkit-scrollbar-thumb { + background: #c0c6cc; + border-radius: 6px; + } + + *::-webkit-scrollbar-track, + *::-webkit-scrollbar-corner { + background: #ebeef0; + } + + *::-webkit-scrollbar { + width: 6px; + height: 6px; + } + + body::-webkit-scrollbar { + width: 12px; + height: 12px; + } +} diff --git a/dashboard/src/components/AlertBenchUpdate.vue b/dashboard/src/components/AlertBenchUpdate.vue new file mode 100644 index 0000000..d561b87 --- /dev/null +++ b/dashboard/src/components/AlertBenchUpdate.vue @@ -0,0 +1,159 @@ + + diff --git a/dashboard/src/components/AlertBillingInformation.vue b/dashboard/src/components/AlertBillingInformation.vue new file mode 100644 index 0000000..e4003a1 --- /dev/null +++ b/dashboard/src/components/AlertBillingInformation.vue @@ -0,0 +1,65 @@ + + diff --git a/dashboard/src/components/AlertSiteActivation.vue b/dashboard/src/components/AlertSiteActivation.vue new file mode 100644 index 0000000..4f7427c --- /dev/null +++ b/dashboard/src/components/AlertSiteActivation.vue @@ -0,0 +1,32 @@ + + diff --git a/dashboard/src/components/AlertSiteUpdate.vue b/dashboard/src/components/AlertSiteUpdate.vue new file mode 100644 index 0000000..81747c5 --- /dev/null +++ b/dashboard/src/components/AlertSiteUpdate.vue @@ -0,0 +1,165 @@ + + diff --git a/dashboard/src/components/AlertUpdate.vue b/dashboard/src/components/AlertUpdate.vue new file mode 100644 index 0000000..81a305a --- /dev/null +++ b/dashboard/src/components/AlertUpdate.vue @@ -0,0 +1,172 @@ + + diff --git a/dashboard/src/components/AppPlanCard.vue b/dashboard/src/components/AppPlanCard.vue new file mode 100644 index 0000000..5fd6d01 --- /dev/null +++ b/dashboard/src/components/AppPlanCard.vue @@ -0,0 +1,98 @@ + + + diff --git a/dashboard/src/components/AppSourceSelector.vue b/dashboard/src/components/AppSourceSelector.vue new file mode 100644 index 0000000..1bb0f89 --- /dev/null +++ b/dashboard/src/components/AppSourceSelector.vue @@ -0,0 +1,110 @@ + + diff --git a/dashboard/src/components/AppSubscriptionSummary.vue b/dashboard/src/components/AppSubscriptionSummary.vue new file mode 100644 index 0000000..bfc4ce4 --- /dev/null +++ b/dashboard/src/components/AppSubscriptionSummary.vue @@ -0,0 +1,75 @@ + + + diff --git a/dashboard/src/components/AppUpdateCard.vue b/dashboard/src/components/AppUpdateCard.vue new file mode 100644 index 0000000..7fa3761 --- /dev/null +++ b/dashboard/src/components/AppUpdateCard.vue @@ -0,0 +1,121 @@ + + + diff --git a/dashboard/src/components/AvatarUploader.vue b/dashboard/src/components/AvatarUploader.vue new file mode 100644 index 0000000..bd7cf61 --- /dev/null +++ b/dashboard/src/components/AvatarUploader.vue @@ -0,0 +1,32 @@ + + diff --git a/dashboard/src/components/BackupFilesUploader.vue b/dashboard/src/components/BackupFilesUploader.vue new file mode 100644 index 0000000..c403b58 --- /dev/null +++ b/dashboard/src/components/BackupFilesUploader.vue @@ -0,0 +1,147 @@ + + diff --git a/dashboard/src/components/BenchAppUpdates.vue b/dashboard/src/components/BenchAppUpdates.vue new file mode 100644 index 0000000..91dc401 --- /dev/null +++ b/dashboard/src/components/BenchAppUpdates.vue @@ -0,0 +1,105 @@ + + diff --git a/dashboard/src/components/BenchSiteUpdates.vue b/dashboard/src/components/BenchSiteUpdates.vue new file mode 100644 index 0000000..35fa9c4 --- /dev/null +++ b/dashboard/src/components/BenchSiteUpdates.vue @@ -0,0 +1,44 @@ + + diff --git a/dashboard/src/components/BillingInformationDialog.vue b/dashboard/src/components/BillingInformationDialog.vue new file mode 100644 index 0000000..b40d28a --- /dev/null +++ b/dashboard/src/components/BillingInformationDialog.vue @@ -0,0 +1,35 @@ + + diff --git a/dashboard/src/components/BuyPrepaidCredits.vue b/dashboard/src/components/BuyPrepaidCredits.vue new file mode 100644 index 0000000..27af80b --- /dev/null +++ b/dashboard/src/components/BuyPrepaidCredits.vue @@ -0,0 +1,201 @@ + + diff --git a/dashboard/src/components/CardDetails.vue b/dashboard/src/components/CardDetails.vue new file mode 100644 index 0000000..315ff81 --- /dev/null +++ b/dashboard/src/components/CardDetails.vue @@ -0,0 +1,15 @@ + + diff --git a/dashboard/src/components/CardWithDetails.vue b/dashboard/src/components/CardWithDetails.vue new file mode 100644 index 0000000..cceb955 --- /dev/null +++ b/dashboard/src/components/CardWithDetails.vue @@ -0,0 +1,24 @@ + + diff --git a/dashboard/src/components/ChangeAppBranchDialog.vue b/dashboard/src/components/ChangeAppBranchDialog.vue new file mode 100644 index 0000000..9415c0b --- /dev/null +++ b/dashboard/src/components/ChangeAppBranchDialog.vue @@ -0,0 +1,116 @@ + + + diff --git a/dashboard/src/components/ChangeAppPlanSelector.vue b/dashboard/src/components/ChangeAppPlanSelector.vue new file mode 100644 index 0000000..74e91fe --- /dev/null +++ b/dashboard/src/components/ChangeAppPlanSelector.vue @@ -0,0 +1,115 @@ + + + diff --git a/dashboard/src/components/ChangePaymentModeDialog.vue b/dashboard/src/components/ChangePaymentModeDialog.vue new file mode 100644 index 0000000..d3f15d9 --- /dev/null +++ b/dashboard/src/components/ChangePaymentModeDialog.vue @@ -0,0 +1,135 @@ + + diff --git a/dashboard/src/components/ClickToCopyField.vue b/dashboard/src/components/ClickToCopyField.vue new file mode 100644 index 0000000..2ea8eb9 --- /dev/null +++ b/dashboard/src/components/ClickToCopyField.vue @@ -0,0 +1,59 @@ + + + diff --git a/dashboard/src/components/CommandPalette.vue b/dashboard/src/components/CommandPalette.vue new file mode 100644 index 0000000..0f65956 --- /dev/null +++ b/dashboard/src/components/CommandPalette.vue @@ -0,0 +1,114 @@ + + + diff --git a/dashboard/src/components/ConfigEditor.vue b/dashboard/src/components/ConfigEditor.vue new file mode 100644 index 0000000..3b2c91e --- /dev/null +++ b/dashboard/src/components/ConfigEditor.vue @@ -0,0 +1,341 @@ + + + diff --git a/dashboard/src/components/ConfirmDialogs.vue b/dashboard/src/components/ConfirmDialogs.vue new file mode 100644 index 0000000..1f300b9 --- /dev/null +++ b/dashboard/src/components/ConfirmDialogs.vue @@ -0,0 +1,63 @@ + + + diff --git a/dashboard/src/components/DescriptionList.vue b/dashboard/src/components/DescriptionList.vue new file mode 100644 index 0000000..d583cfc --- /dev/null +++ b/dashboard/src/components/DescriptionList.vue @@ -0,0 +1,17 @@ + + + diff --git a/dashboard/src/components/FeatureList.vue b/dashboard/src/components/FeatureList.vue new file mode 100644 index 0000000..bee0e38 --- /dev/null +++ b/dashboard/src/components/FeatureList.vue @@ -0,0 +1,19 @@ + + + diff --git a/dashboard/src/components/FileUploader.vue b/dashboard/src/components/FileUploader.vue new file mode 100644 index 0000000..4498e67 --- /dev/null +++ b/dashboard/src/components/FileUploader.vue @@ -0,0 +1,118 @@ + + + diff --git a/dashboard/src/components/Form.vue b/dashboard/src/components/Form.vue new file mode 100644 index 0000000..05cd0de --- /dev/null +++ b/dashboard/src/components/Form.vue @@ -0,0 +1,130 @@ + + + diff --git a/dashboard/src/components/FrappeCloudLogo.vue b/dashboard/src/components/FrappeCloudLogo.vue new file mode 100644 index 0000000..07285f9 --- /dev/null +++ b/dashboard/src/components/FrappeCloudLogo.vue @@ -0,0 +1,95 @@ + diff --git a/dashboard/src/components/ImpersonateTeam.vue b/dashboard/src/components/ImpersonateTeam.vue new file mode 100644 index 0000000..cac4cb5 --- /dev/null +++ b/dashboard/src/components/ImpersonateTeam.vue @@ -0,0 +1,32 @@ + + + diff --git a/dashboard/src/components/InvoiceUsageCard.vue b/dashboard/src/components/InvoiceUsageCard.vue new file mode 100644 index 0000000..2f58c47 --- /dev/null +++ b/dashboard/src/components/InvoiceUsageCard.vue @@ -0,0 +1,39 @@ + + diff --git a/dashboard/src/components/InvoiceUsageTable.vue b/dashboard/src/components/InvoiceUsageTable.vue new file mode 100644 index 0000000..85e0f22 --- /dev/null +++ b/dashboard/src/components/InvoiceUsageTable.vue @@ -0,0 +1,150 @@ + + diff --git a/dashboard/src/components/Link.vue b/dashboard/src/components/Link.vue new file mode 100644 index 0000000..61733a0 --- /dev/null +++ b/dashboard/src/components/Link.vue @@ -0,0 +1,15 @@ + + diff --git a/dashboard/src/components/MarketplaceAppCard.vue b/dashboard/src/components/MarketplaceAppCard.vue new file mode 100644 index 0000000..f24d32d --- /dev/null +++ b/dashboard/src/components/MarketplaceAppCard.vue @@ -0,0 +1,30 @@ + + + diff --git a/dashboard/src/components/MarketplaceAppDescriptions.vue b/dashboard/src/components/MarketplaceAppDescriptions.vue new file mode 100644 index 0000000..3a64de0 --- /dev/null +++ b/dashboard/src/components/MarketplaceAppDescriptions.vue @@ -0,0 +1,184 @@ + + + diff --git a/dashboard/src/components/MarketplaceAppLinks.vue b/dashboard/src/components/MarketplaceAppLinks.vue new file mode 100644 index 0000000..a6f3d82 --- /dev/null +++ b/dashboard/src/components/MarketplaceAppLinks.vue @@ -0,0 +1,99 @@ + + + diff --git a/dashboard/src/components/MarketplaceAppProfile.vue b/dashboard/src/components/MarketplaceAppProfile.vue new file mode 100644 index 0000000..4fc23aa --- /dev/null +++ b/dashboard/src/components/MarketplaceAppProfile.vue @@ -0,0 +1,246 @@ + + + diff --git a/dashboard/src/components/MarketplaceAppReleaseList.vue b/dashboard/src/components/MarketplaceAppReleaseList.vue new file mode 100644 index 0000000..95c71c2 --- /dev/null +++ b/dashboard/src/components/MarketplaceAppReleaseList.vue @@ -0,0 +1,313 @@ + + + diff --git a/dashboard/src/components/MarketplaceAppScreenshots.vue b/dashboard/src/components/MarketplaceAppScreenshots.vue new file mode 100644 index 0000000..5d7e0bd --- /dev/null +++ b/dashboard/src/components/MarketplaceAppScreenshots.vue @@ -0,0 +1,86 @@ + + + diff --git a/dashboard/src/components/Modal.vue b/dashboard/src/components/Modal.vue new file mode 100644 index 0000000..85225a8 --- /dev/null +++ b/dashboard/src/components/Modal.vue @@ -0,0 +1,79 @@ + + + diff --git a/dashboard/src/components/Navbar.vue b/dashboard/src/components/Navbar.vue new file mode 100644 index 0000000..71ab073 --- /dev/null +++ b/dashboard/src/components/Navbar.vue @@ -0,0 +1,143 @@ + + + diff --git a/dashboard/src/components/NewAppRepositories.vue b/dashboard/src/components/NewAppRepositories.vue new file mode 100644 index 0000000..72f5789 --- /dev/null +++ b/dashboard/src/components/NewAppRepositories.vue @@ -0,0 +1,108 @@ + + diff --git a/dashboard/src/components/Notification.vue b/dashboard/src/components/Notification.vue new file mode 100644 index 0000000..6e1a82d --- /dev/null +++ b/dashboard/src/components/Notification.vue @@ -0,0 +1,59 @@ + + + diff --git a/dashboard/src/components/NotificationToasts.vue b/dashboard/src/components/NotificationToasts.vue new file mode 100644 index 0000000..5b75b22 --- /dev/null +++ b/dashboard/src/components/NotificationToasts.vue @@ -0,0 +1,20 @@ + + + diff --git a/dashboard/src/components/PageHeader.vue b/dashboard/src/components/PageHeader.vue new file mode 100644 index 0000000..42ad9fe --- /dev/null +++ b/dashboard/src/components/PageHeader.vue @@ -0,0 +1,17 @@ + + + diff --git a/dashboard/src/components/PlanIcon.vue b/dashboard/src/components/PlanIcon.vue new file mode 100644 index 0000000..1babbc2 --- /dev/null +++ b/dashboard/src/components/PlanIcon.vue @@ -0,0 +1,25 @@ + diff --git a/dashboard/src/components/Popover.vue b/dashboard/src/components/Popover.vue new file mode 100644 index 0000000..4866c8d --- /dev/null +++ b/dashboard/src/components/Popover.vue @@ -0,0 +1,194 @@ + + + + diff --git a/dashboard/src/components/PrepaidCreditsDialog.vue b/dashboard/src/components/PrepaidCreditsDialog.vue new file mode 100644 index 0000000..8e9f225 --- /dev/null +++ b/dashboard/src/components/PrepaidCreditsDialog.vue @@ -0,0 +1,225 @@ + + diff --git a/dashboard/src/components/PrinterIcon.vue b/dashboard/src/components/PrinterIcon.vue new file mode 100644 index 0000000..6fd562a --- /dev/null +++ b/dashboard/src/components/PrinterIcon.vue @@ -0,0 +1,45 @@ + diff --git a/dashboard/src/components/ProgressArc.vue b/dashboard/src/components/ProgressArc.vue new file mode 100644 index 0000000..7c5103f --- /dev/null +++ b/dashboard/src/components/ProgressArc.vue @@ -0,0 +1,61 @@ + + diff --git a/dashboard/src/components/Report.vue b/dashboard/src/components/Report.vue new file mode 100644 index 0000000..c429995 --- /dev/null +++ b/dashboard/src/components/Report.vue @@ -0,0 +1,60 @@ + + diff --git a/dashboard/src/components/RichSelect.vue b/dashboard/src/components/RichSelect.vue new file mode 100644 index 0000000..3bfb922 --- /dev/null +++ b/dashboard/src/components/RichSelect.vue @@ -0,0 +1,88 @@ + + diff --git a/dashboard/src/components/SelectAppFromGithub.vue b/dashboard/src/components/SelectAppFromGithub.vue new file mode 100644 index 0000000..1e67618 --- /dev/null +++ b/dashboard/src/components/SelectAppFromGithub.vue @@ -0,0 +1,207 @@ + + + diff --git a/dashboard/src/components/SelectableCard.vue b/dashboard/src/components/SelectableCard.vue new file mode 100644 index 0000000..92719fe --- /dev/null +++ b/dashboard/src/components/SelectableCard.vue @@ -0,0 +1,32 @@ + + diff --git a/dashboard/src/components/ServerPlansTable.vue b/dashboard/src/components/ServerPlansTable.vue new file mode 100644 index 0000000..dda2020 --- /dev/null +++ b/dashboard/src/components/ServerPlansTable.vue @@ -0,0 +1,117 @@ + + + diff --git a/dashboard/src/components/Sidebar.vue b/dashboard/src/components/Sidebar.vue new file mode 100644 index 0000000..b8bcce4 --- /dev/null +++ b/dashboard/src/components/Sidebar.vue @@ -0,0 +1,253 @@ + + + diff --git a/dashboard/src/components/SiteAppUpdateCard.vue b/dashboard/src/components/SiteAppUpdateCard.vue new file mode 100644 index 0000000..3d18b93 --- /dev/null +++ b/dashboard/src/components/SiteAppUpdateCard.vue @@ -0,0 +1,74 @@ + + + diff --git a/dashboard/src/components/SiteAppUpdates.vue b/dashboard/src/components/SiteAppUpdates.vue new file mode 100644 index 0000000..7eff1cb --- /dev/null +++ b/dashboard/src/components/SiteAppUpdates.vue @@ -0,0 +1,32 @@ + + diff --git a/dashboard/src/components/SitePlansTable.vue b/dashboard/src/components/SitePlansTable.vue new file mode 100644 index 0000000..941acee --- /dev/null +++ b/dashboard/src/components/SitePlansTable.vue @@ -0,0 +1,75 @@ + + + diff --git a/dashboard/src/components/SiteRestoreSelector.vue b/dashboard/src/components/SiteRestoreSelector.vue new file mode 100644 index 0000000..53b3ff3 --- /dev/null +++ b/dashboard/src/components/SiteRestoreSelector.vue @@ -0,0 +1,34 @@ + + diff --git a/dashboard/src/components/SiteUpdateCard.vue b/dashboard/src/components/SiteUpdateCard.vue new file mode 100644 index 0000000..a6b9618 --- /dev/null +++ b/dashboard/src/components/SiteUpdateCard.vue @@ -0,0 +1,60 @@ + + + diff --git a/dashboard/src/components/StarRatingInput.vue b/dashboard/src/components/StarRatingInput.vue new file mode 100644 index 0000000..9837c64 --- /dev/null +++ b/dashboard/src/components/StarRatingInput.vue @@ -0,0 +1,80 @@ + + + + + diff --git a/dashboard/src/components/Steps.vue b/dashboard/src/components/Steps.vue new file mode 100644 index 0000000..ce1b51d --- /dev/null +++ b/dashboard/src/components/Steps.vue @@ -0,0 +1,90 @@ + + diff --git a/dashboard/src/components/StripeCard.vue b/dashboard/src/components/StripeCard.vue new file mode 100644 index 0000000..3854340 --- /dev/null +++ b/dashboard/src/components/StripeCard.vue @@ -0,0 +1,281 @@ + + + diff --git a/dashboard/src/components/StripeLogo.vue b/dashboard/src/components/StripeLogo.vue new file mode 100644 index 0000000..377ddfc --- /dev/null +++ b/dashboard/src/components/StripeLogo.vue @@ -0,0 +1,32 @@ + + diff --git a/dashboard/src/components/SwitchTeamDialog.vue b/dashboard/src/components/SwitchTeamDialog.vue new file mode 100644 index 0000000..2473b10 --- /dev/null +++ b/dashboard/src/components/SwitchTeamDialog.vue @@ -0,0 +1,34 @@ + + + diff --git a/dashboard/src/components/Table/Table.vue b/dashboard/src/components/Table/Table.vue new file mode 100644 index 0000000..cbd638c --- /dev/null +++ b/dashboard/src/components/Table/Table.vue @@ -0,0 +1,35 @@ + + + diff --git a/dashboard/src/components/Table/TableCell.vue b/dashboard/src/components/Table/TableCell.vue new file mode 100644 index 0000000..d55668d --- /dev/null +++ b/dashboard/src/components/Table/TableCell.vue @@ -0,0 +1,12 @@ + + + diff --git a/dashboard/src/components/Table/TableHeader.vue b/dashboard/src/components/Table/TableHeader.vue new file mode 100644 index 0000000..64e3567 --- /dev/null +++ b/dashboard/src/components/Table/TableHeader.vue @@ -0,0 +1,23 @@ + + + diff --git a/dashboard/src/components/Table/TableRow.vue b/dashboard/src/components/Table/TableRow.vue new file mode 100644 index 0000000..9182b4f --- /dev/null +++ b/dashboard/src/components/Table/TableRow.vue @@ -0,0 +1,21 @@ + + + diff --git a/dashboard/src/components/Tabs.vue b/dashboard/src/components/Tabs.vue new file mode 100644 index 0000000..7ac0619 --- /dev/null +++ b/dashboard/src/components/Tabs.vue @@ -0,0 +1,66 @@ + + + diff --git a/dashboard/src/components/Tags.vue b/dashboard/src/components/Tags.vue new file mode 100644 index 0000000..72480d7 --- /dev/null +++ b/dashboard/src/components/Tags.vue @@ -0,0 +1,158 @@ + + diff --git a/dashboard/src/components/WizardCard.vue b/dashboard/src/components/WizardCard.vue new file mode 100644 index 0000000..4f7e3d6 --- /dev/null +++ b/dashboard/src/components/WizardCard.vue @@ -0,0 +1,16 @@ + + diff --git a/dashboard/src/components/charts/BarChart.vue b/dashboard/src/components/charts/BarChart.vue new file mode 100644 index 0000000..78598ef --- /dev/null +++ b/dashboard/src/components/charts/BarChart.vue @@ -0,0 +1,225 @@ + + + diff --git a/dashboard/src/components/charts/LineChart.vue b/dashboard/src/components/charts/LineChart.vue new file mode 100644 index 0000000..e5dfc35 --- /dev/null +++ b/dashboard/src/components/charts/LineChart.vue @@ -0,0 +1,224 @@ + + + diff --git a/dashboard/src/components/charts/utils.js b/dashboard/src/components/charts/utils.js new file mode 100644 index 0000000..1046442 --- /dev/null +++ b/dashboard/src/components/charts/utils.js @@ -0,0 +1,19 @@ +function formatBytes(bytes, decimals = 2, current = 0) { + if (bytes === 0) return '0 Bytes'; + + const k = 1024; + const dm = decimals < 0 ? 0 : decimals; + const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + const i = Math.floor(Math.log(Math.abs(bytes)) / Math.log(k)); + + return ( + parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i + current] + ); +} + +function getUnit(value, seriesName) { + if (seriesName === 'bytes') return formatBytes(value); + else return `${+value.toFixed(2)} ${seriesName}`; +} + +export { formatBytes, getUnit }; diff --git a/dashboard/src/components/global/Alert.vue b/dashboard/src/components/global/Alert.vue new file mode 100644 index 0000000..08d4c72 --- /dev/null +++ b/dashboard/src/components/global/Alert.vue @@ -0,0 +1,44 @@ + + + diff --git a/dashboard/src/components/global/Badge.vue b/dashboard/src/components/global/Badge.vue new file mode 100644 index 0000000..a4bc03b --- /dev/null +++ b/dashboard/src/components/global/Badge.vue @@ -0,0 +1,64 @@ + + + diff --git a/dashboard/src/components/global/Breadcrumbs.vue b/dashboard/src/components/global/Breadcrumbs.vue new file mode 100644 index 0000000..3967774 --- /dev/null +++ b/dashboard/src/components/global/Breadcrumbs.vue @@ -0,0 +1,50 @@ + + diff --git a/dashboard/src/components/global/Card.vue b/dashboard/src/components/global/Card.vue new file mode 100644 index 0000000..dd1f60b --- /dev/null +++ b/dashboard/src/components/global/Card.vue @@ -0,0 +1,43 @@ + + diff --git a/dashboard/src/components/global/CircularCheckIcon.vue b/dashboard/src/components/global/CircularCheckIcon.vue new file mode 100644 index 0000000..a997e99 --- /dev/null +++ b/dashboard/src/components/global/CircularCheckIcon.vue @@ -0,0 +1,21 @@ + diff --git a/dashboard/src/components/global/FormatDate.vue b/dashboard/src/components/global/FormatDate.vue new file mode 100644 index 0000000..52482a7 --- /dev/null +++ b/dashboard/src/components/global/FormatDate.vue @@ -0,0 +1,28 @@ + diff --git a/dashboard/src/components/global/GrayCheckIcon.vue b/dashboard/src/components/global/GrayCheckIcon.vue new file mode 100644 index 0000000..19f107e --- /dev/null +++ b/dashboard/src/components/global/GrayCheckIcon.vue @@ -0,0 +1,21 @@ + diff --git a/dashboard/src/components/global/ListItem.vue b/dashboard/src/components/global/ListItem.vue new file mode 100644 index 0000000..9315d45 --- /dev/null +++ b/dashboard/src/components/global/ListItem.vue @@ -0,0 +1,31 @@ + + diff --git a/dashboard/src/components/global/OldDropdown.vue b/dashboard/src/components/global/OldDropdown.vue new file mode 100644 index 0000000..430f1e1 --- /dev/null +++ b/dashboard/src/components/global/OldDropdown.vue @@ -0,0 +1,211 @@ + + + diff --git a/dashboard/src/components/global/Section.vue b/dashboard/src/components/global/Section.vue new file mode 100644 index 0000000..4f46c72 --- /dev/null +++ b/dashboard/src/components/global/Section.vue @@ -0,0 +1,18 @@ + + + diff --git a/dashboard/src/components/global/SectionCard.vue b/dashboard/src/components/global/SectionCard.vue new file mode 100644 index 0000000..e41ae4d --- /dev/null +++ b/dashboard/src/components/global/SectionCard.vue @@ -0,0 +1,11 @@ + + + diff --git a/dashboard/src/components/global/SectionHeader.vue b/dashboard/src/components/global/SectionHeader.vue new file mode 100644 index 0000000..907dca7 --- /dev/null +++ b/dashboard/src/components/global/SectionHeader.vue @@ -0,0 +1,10 @@ + + + diff --git a/dashboard/src/components/global/Select.vue b/dashboard/src/components/global/Select.vue new file mode 100644 index 0000000..4dbddb6 --- /dev/null +++ b/dashboard/src/components/global/Select.vue @@ -0,0 +1,40 @@ + + + diff --git a/dashboard/src/components/global/Spinner.vue b/dashboard/src/components/global/Spinner.vue new file mode 100644 index 0000000..f81c4a6 --- /dev/null +++ b/dashboard/src/components/global/Spinner.vue @@ -0,0 +1,27 @@ + + diff --git a/dashboard/src/components/global/SuccessCard.vue b/dashboard/src/components/global/SuccessCard.vue new file mode 100644 index 0000000..184279b --- /dev/null +++ b/dashboard/src/components/global/SuccessCard.vue @@ -0,0 +1,20 @@ + + + diff --git a/dashboard/src/components/global/outsideClickDirective.js b/dashboard/src/components/global/outsideClickDirective.js new file mode 100644 index 0000000..ea3f6dc --- /dev/null +++ b/dashboard/src/components/global/outsideClickDirective.js @@ -0,0 +1,26 @@ +let instances = []; + +function onDocumentClick(e, el, fn) { + let target = e.target; + if (el !== target && !el.contains(target)) { + fn(e); + } +} + +export default { + created(el, binding) { + el.dataset.outsideClickIndex = instances.length; + const fn = binding.value; + const click = function (e) { + onDocumentClick(e, el, fn); + }; + document.addEventListener('click', click); + instances.push(click); + }, + unmounted(el) { + const index = el.dataset.outsideClickIndex; + const handler = instances[index]; + document.addEventListener('click', handler); + instances.splice(index, 1); + } +}; diff --git a/dashboard/src/components/global/register.js b/dashboard/src/components/global/register.js new file mode 100644 index 0000000..dcd62dd --- /dev/null +++ b/dashboard/src/components/global/register.js @@ -0,0 +1,52 @@ +import { + Button, + FeatherIcon, + Tooltip, + LoadingIndicator, + LoadingText, + Dialog, + Link, + Input, + Avatar, + GreenCheckIcon, + Dropdown, + FormControl, + ErrorMessage, + Autocomplete +} from 'jingrow-ui'; +import outsideClickDirective from './outsideClickDirective'; + +let components = import.meta.glob('./*.vue', { eager: true }); // To get each component inside this folder + +let globalJingrowUIComponents = { + Button, + Avatar, + FeatherIcon, + Tooltip, + LoadingIndicator, + LoadingText, + Link, + Dialog, + Input, + GreenCheckIcon, + Dropdown, + FormControl, + ErrorMessage, + Autocomplete +}; + +export default function registerGlobalComponents(app) { + app.directive('on-outside-click', outsideClickDirective); + + for (let path in components) { + let component = components[path]; + let name = path.replace('./', '').replace('.vue', ''); + app.component(name, component.default || component); + } + + for (let key in globalJingrowUIComponents) { + app.component(key, globalJingrowUIComponents[key]); + } +} + +export { components }; diff --git a/dashboard/src/components/icons/AppsIcon.vue b/dashboard/src/components/icons/AppsIcon.vue new file mode 100644 index 0000000..4bd8a13 --- /dev/null +++ b/dashboard/src/components/icons/AppsIcon.vue @@ -0,0 +1,26 @@ + diff --git a/dashboard/src/components/icons/BenchIcon.vue b/dashboard/src/components/icons/BenchIcon.vue new file mode 100644 index 0000000..f3c76e6 --- /dev/null +++ b/dashboard/src/components/icons/BenchIcon.vue @@ -0,0 +1,58 @@ + diff --git a/dashboard/src/components/icons/BillingIcon.vue b/dashboard/src/components/icons/BillingIcon.vue new file mode 100644 index 0000000..c3c230c --- /dev/null +++ b/dashboard/src/components/icons/BillingIcon.vue @@ -0,0 +1,29 @@ + diff --git a/dashboard/src/components/icons/CheckCircleIcon.vue b/dashboard/src/components/icons/CheckCircleIcon.vue new file mode 100644 index 0000000..f3c27b1 --- /dev/null +++ b/dashboard/src/components/icons/CheckCircleIcon.vue @@ -0,0 +1,14 @@ + diff --git a/dashboard/src/components/icons/FrappeCloudLogo.vue b/dashboard/src/components/icons/FrappeCloudLogo.vue new file mode 100644 index 0000000..3b34086 --- /dev/null +++ b/dashboard/src/components/icons/FrappeCloudLogo.vue @@ -0,0 +1,8 @@ + diff --git a/dashboard/src/components/icons/GoogleIcon.vue b/dashboard/src/components/icons/GoogleIcon.vue new file mode 100644 index 0000000..6c59ad8 --- /dev/null +++ b/dashboard/src/components/icons/GoogleIcon.vue @@ -0,0 +1,25 @@ + diff --git a/dashboard/src/components/icons/GoogleIconSolid.vue b/dashboard/src/components/icons/GoogleIconSolid.vue new file mode 100644 index 0000000..533fe7b --- /dev/null +++ b/dashboard/src/components/icons/GoogleIconSolid.vue @@ -0,0 +1,15 @@ + diff --git a/dashboard/src/components/icons/JLogo.vue b/dashboard/src/components/icons/JLogo.vue new file mode 100644 index 0000000..025ee1e --- /dev/null +++ b/dashboard/src/components/icons/JLogo.vue @@ -0,0 +1,198 @@ + + \ No newline at end of file diff --git a/dashboard/src/components/icons/JingrowLogo.vue b/dashboard/src/components/icons/JingrowLogo.vue new file mode 100644 index 0000000..95720d4 --- /dev/null +++ b/dashboard/src/components/icons/JingrowLogo.vue @@ -0,0 +1,5 @@ + diff --git a/dashboard/src/components/icons/SecurityIcon.vue b/dashboard/src/components/icons/SecurityIcon.vue new file mode 100644 index 0000000..45acc5d --- /dev/null +++ b/dashboard/src/components/icons/SecurityIcon.vue @@ -0,0 +1,16 @@ + diff --git a/dashboard/src/components/icons/ServerIcon.vue b/dashboard/src/components/icons/ServerIcon.vue new file mode 100644 index 0000000..137d197 --- /dev/null +++ b/dashboard/src/components/icons/ServerIcon.vue @@ -0,0 +1,62 @@ + diff --git a/dashboard/src/components/icons/SettingsIcon.vue b/dashboard/src/components/icons/SettingsIcon.vue new file mode 100644 index 0000000..56256f5 --- /dev/null +++ b/dashboard/src/components/icons/SettingsIcon.vue @@ -0,0 +1,22 @@ + diff --git a/dashboard/src/components/icons/SiteIcon.vue b/dashboard/src/components/icons/SiteIcon.vue new file mode 100644 index 0000000..f31c20d --- /dev/null +++ b/dashboard/src/components/icons/SiteIcon.vue @@ -0,0 +1,43 @@ + diff --git a/dashboard/src/components/icons/SpacesIcon.vue b/dashboard/src/components/icons/SpacesIcon.vue new file mode 100644 index 0000000..152aca9 --- /dev/null +++ b/dashboard/src/components/icons/SpacesIcon.vue @@ -0,0 +1,17 @@ + diff --git a/dashboard/src/components/icons/cards/Amex.vue b/dashboard/src/components/icons/cards/Amex.vue new file mode 100644 index 0000000..b0a6ad4 --- /dev/null +++ b/dashboard/src/components/icons/cards/Amex.vue @@ -0,0 +1,20 @@ + diff --git a/dashboard/src/components/icons/cards/Generic.vue b/dashboard/src/components/icons/cards/Generic.vue new file mode 100644 index 0000000..8d685b9 --- /dev/null +++ b/dashboard/src/components/icons/cards/Generic.vue @@ -0,0 +1,63 @@ + diff --git a/dashboard/src/components/icons/cards/JCB.vue b/dashboard/src/components/icons/cards/JCB.vue new file mode 100644 index 0000000..55956c9 --- /dev/null +++ b/dashboard/src/components/icons/cards/JCB.vue @@ -0,0 +1,68 @@ + diff --git a/dashboard/src/components/icons/cards/MasterCard.vue b/dashboard/src/components/icons/cards/MasterCard.vue new file mode 100644 index 0000000..aafa70d --- /dev/null +++ b/dashboard/src/components/icons/cards/MasterCard.vue @@ -0,0 +1,38 @@ + diff --git a/dashboard/src/components/icons/cards/UnionPay.vue b/dashboard/src/components/icons/cards/UnionPay.vue new file mode 100644 index 0000000..b9113aa --- /dev/null +++ b/dashboard/src/components/icons/cards/UnionPay.vue @@ -0,0 +1,89 @@ + diff --git a/dashboard/src/components/icons/cards/Visa.vue b/dashboard/src/components/icons/cards/Visa.vue new file mode 100644 index 0000000..3b521d5 --- /dev/null +++ b/dashboard/src/components/icons/cards/Visa.vue @@ -0,0 +1,24 @@ + diff --git a/dashboard/src/components/icons/index.js b/dashboard/src/components/icons/index.js new file mode 100644 index 0000000..f9431ac --- /dev/null +++ b/dashboard/src/components/icons/index.js @@ -0,0 +1,19 @@ +import AppsIcon from './AppsIcon.vue'; +import SiteIcon from './SiteIcon.vue'; +import BenchIcon from './BenchIcon.vue'; +import ServerIcon from './ServerIcon.vue'; +import BillingIcon from './BillingIcon.vue'; +import SettingsIcon from './SettingsIcon.vue'; +import SpacesIcon from './SpacesIcon.vue'; +import SecurityIcon from './SecurityIcon.vue'; + +export const FCIcons = { + SiteIcon, + BenchIcon, + ServerIcon, + AppsIcon, + BillingIcon, + SettingsIcon, + SpacesIcon, + SecurityIcon +}; diff --git a/dashboard/src/components/marketplace/ChangeAppBranchDialog.vue b/dashboard/src/components/marketplace/ChangeAppBranchDialog.vue new file mode 100644 index 0000000..556a1ab --- /dev/null +++ b/dashboard/src/components/marketplace/ChangeAppBranchDialog.vue @@ -0,0 +1,78 @@ + + + diff --git a/dashboard/src/components/marketplace/CreateAppVersionDialog.vue b/dashboard/src/components/marketplace/CreateAppVersionDialog.vue new file mode 100644 index 0000000..23fa63f --- /dev/null +++ b/dashboard/src/components/marketplace/CreateAppVersionDialog.vue @@ -0,0 +1,104 @@ + + + diff --git a/dashboard/src/components/marketplace/MarketplaceAppReviewStages.vue b/dashboard/src/components/marketplace/MarketplaceAppReviewStages.vue new file mode 100644 index 0000000..59832ba --- /dev/null +++ b/dashboard/src/components/marketplace/MarketplaceAppReviewStages.vue @@ -0,0 +1,161 @@ + + + diff --git a/dashboard/src/components/marketplace/PublisherPayoutInfoCard.vue b/dashboard/src/components/marketplace/PublisherPayoutInfoCard.vue new file mode 100644 index 0000000..20d3d78 --- /dev/null +++ b/dashboard/src/components/marketplace/PublisherPayoutInfoCard.vue @@ -0,0 +1,154 @@ + + + diff --git a/dashboard/src/components/marketplace/PublisherProfileCard.vue b/dashboard/src/components/marketplace/PublisherProfileCard.vue new file mode 100644 index 0000000..50e295b --- /dev/null +++ b/dashboard/src/components/marketplace/PublisherProfileCard.vue @@ -0,0 +1,114 @@ + + + diff --git a/dashboard/src/components/utils/CommitChooser.vue b/dashboard/src/components/utils/CommitChooser.vue new file mode 100644 index 0000000..df7620c --- /dev/null +++ b/dashboard/src/components/utils/CommitChooser.vue @@ -0,0 +1,42 @@ + + + diff --git a/dashboard/src/components/utils/CommitTag.vue b/dashboard/src/components/utils/CommitTag.vue new file mode 100644 index 0000000..9a85f29 --- /dev/null +++ b/dashboard/src/components/utils/CommitTag.vue @@ -0,0 +1,12 @@ + + + diff --git a/dashboard/src/composables/resource.js b/dashboard/src/composables/resource.js new file mode 100644 index 0000000..eae7f46 --- /dev/null +++ b/dashboard/src/composables/resource.js @@ -0,0 +1,14 @@ +import { unref, reactive, getCurrentInstance } from 'vue'; +import { Resource } from '@/resourceManager/ResourceManager'; + +export default function useResource(options) { + const resourceOptions = unref(options); + const _vm = getCurrentInstance(); + const resource = reactive(new Resource(_vm, resourceOptions)); + + if (options.auto) { + resource.reload(); + } + + return resource; +} diff --git a/dashboard/src/controllers/account.js b/dashboard/src/controllers/account.js new file mode 100644 index 0000000..beac4f3 --- /dev/null +++ b/dashboard/src/controllers/account.js @@ -0,0 +1,124 @@ +import call from './call'; + +export default class Account { + constructor() { + this.user = null; + this.team = null; + this.ssh_key = null; + this.teams = []; + this.team_members = []; + this.onboarding = null; + this.balance = 0; + this.feature_flags = {}; + this._fetchAccountPromise = null; + } + + async fetchIfRequired() { + if (!this.user) { + if (this._fetchAccountPromise) { + await this._fetchAccountPromise; + } else { + await this.fetchAccount(); + } + } + } + + async fetchAccount() { + if (document.cookie.includes('user_id=Guest')) { + return; + } + try { + this._fetchAccountPromise = call('jcloud.api.account.get'); + let result = await this._fetchAccountPromise; + this.user = result.user; + this.ssh_key = result.ssh_key; + this.team = result.team; + this.teams = result.teams; + this.team_members = result.team_members; + this.child_team_members = result.child_team_members; + this.onboarding = result.onboarding; + this.balance = result.balance; + this.feature_flags = result.feature_flags; + this.parent_team = result.parent_team; + this.partner_email = result.partner_email; + this.partner_billing_name = result.partner_billing_name; + this.saas_site_request = result.saas_site_request; + this.permissions = result.permissions; + this.number_of_sites = result.number_of_sites; + this.billing_info = result.billing_info; + } catch (e) { + localStorage.removeItem('current_team'); + } finally { + this._fetchAccountPromise = null; + } + } + + hasRole(role) { + let roles = this.user.roles.map(d => d.role); + return roles.includes(role); + } + + async switchTeam(team) { + if (team === this.team.name) { + return; + } + let result = await call('jcloud.api.account.switch_team', { team }); + this.team = result.team; + this.team_members = result.team_members; + localStorage.setItem('current_team', team); + } + + async switchToTeam(team) { + await this.switchTeam(team); + window.location.reload(); + } + + get needsCard() { + return !this.hasBillingInfo; + } + + get hasBillingInfo() { + if (!this.team) { + return true; + } + if (this.team.free_account || this.team.parent_team) { + return true; + } + if (this.team.payment_mode === 'Paid By Partner') { + // partner credits shall be deprecated in few months + return true; + } + if (['Card', 'Prepaid Credits'].includes(this.team.payment_mode)) { + // card is chargeable and not spam + return ( + this.billing_info.verified_micro_charge || + this.billing_info.has_paid_before || + this.balance > 0 + ); + } + + return false; + } + + hasPermission(docname, action = '', list = false) { + // logged in user is site owner or + // has no granular permissions set, so has all permissions + if ( + this.team.user === this.user.name || + Object.keys(this.permissions).length === 0 + ) { + return true; + } + // if any permission is set for resource, show list view + if (Object.keys(this.permissions).includes(docname) && list) { + return true; + } + // check for granular restricted access + if (Object.keys(this.permissions).includes(docname)) { + if (this.permissions[docname].includes(action)) { + return true; + } + } + return false; + } +} diff --git a/dashboard/src/controllers/auth.js b/dashboard/src/controllers/auth.js new file mode 100644 index 0000000..a0122ae --- /dev/null +++ b/dashboard/src/controllers/auth.js @@ -0,0 +1,52 @@ +import call from './call'; + +export default class Auth { + constructor() { + this.isLoggedIn = false; + this.user = null; + this.user_image = null; + this.cookie = null; + + this.cookie = Object.fromEntries( + document.cookie + .split('; ') + .map(part => part.split('=')) + .map(d => [d[0], decodeURIComponent(d[1])]) + ); + + this.isLoggedIn = this.cookie.user_id && this.cookie.user_id !== 'Guest'; + } + + async login(email, password) { + localStorage.removeItem('current_team'); + let res = await call('login', { + usr: email, + pwd: password + }); + if (res) { + await window.$account.fetchAccount(); + let last_used_team = window.$account.team.last_used_team; + let team = window.$account.team.name; + + if (last_used_team && last_used_team != team) { + team = last_used_team; + window.$account.switchTeam(last_used_team); + } + localStorage.setItem('current_team', team); + this.isLoggedIn = true; + window.location.reload(); + return res; + } + return false; + } + async logout() { + localStorage.removeItem('current_team'); + await call('logout'); + window.location.reload(); + } + async resetPassword(email) { + return await call('jcloud.api.account.send_reset_password_email', { + email + }); + } +} diff --git a/dashboard/src/controllers/call.js b/dashboard/src/controllers/call.js new file mode 100644 index 0000000..929149b --- /dev/null +++ b/dashboard/src/controllers/call.js @@ -0,0 +1,95 @@ +import router from '@/router'; + +export default async function call(method, args) { + if (!args) { + args = {}; + } + + let headers = { + Accept: 'application/json', + 'Content-Type': 'application/json; charset=utf-8', + 'X-Jingrow-Site-Name': window.location.hostname + }; + + let team = localStorage.getItem('current_team') || null; + if (team) { + headers['X-Jcloud-Team'] = team; + } + + if (window.csrf_token && window.csrf_token !== '{{ csrf_token }}') { + headers['X-Jingrow-CSRF-Token'] = window.csrf_token; + } + + updateState(this, 'RequestStarted', null); + + const res = await fetch(`/api/method/${method}`, { + method: 'POST', + headers, + body: JSON.stringify(args) + }); + + if (res.ok) { + updateState(this, null, null); + const data = await res.json(); + if (data.docs || method === 'login') { + return data; + } + return data.message; + } else { + let response = await res.text(); + let error, exception; + try { + error = JSON.parse(response); + // eslint-disable-next-line no-empty + } catch (e) {} + let errorParts = [ + [method, error.exc_type, error._error_message].filter(Boolean).join(' ') + ]; + if (error.exc) { + exception = error.exc; + try { + exception = JSON.parse(exception)[0]; + console.log(exception); + // eslint-disable-next-line no-empty + } catch (e) {} + } + let e = new Error(errorParts.join('\n')); + e.exc_type = error.exc_type; + e.exc = exception; + e.messages = error._server_messages + ? JSON.parse(error._server_messages) + : []; + e.messages = e.messages.concat(error.message); + e.messages = e.messages.map(m => { + try { + return JSON.parse(m).message; + } catch (error) { + return m; + } + }); + e.messages = e.messages.filter(Boolean); + if (!e.messages.length) { + e.messages = error._error_message + ? [error._error_message] + : ['Internal Server Error']; + } + updateState(this, null, e.messages.join('\n')); + + if ( + [401, 403].includes(res.status) && + router.currentRoute.name !== 'Login' + ) { + router.push('/login'); + } + throw e; + } + + function updateState(vm, state, errorMessage) { + if (vm?.state !== undefined) { + vm.state = state; + } + if (vm?.errorMessage !== undefined) { + vm.errorMessage = errorMessage; + } + } +} diff --git a/dashboard/src/controllers/fileUploader.js b/dashboard/src/controllers/fileUploader.js new file mode 100644 index 0000000..b1e0072 --- /dev/null +++ b/dashboard/src/controllers/fileUploader.js @@ -0,0 +1,100 @@ +export default class FileUploader { + constructor() { + this.listeners = {}; + } + + on(event, handler) { + this.listeners[event] = this.listeners[event] || []; + this.listeners[event].push(handler); + } + + trigger(event, data) { + let handlers = this.listeners[event] || []; + handlers.forEach(handler => { + handler.call(this, data); + }); + } + + upload(file, options) { + return new Promise((resolve, reject) => { + let xhr = new XMLHttpRequest(); + xhr.upload.addEventListener('loadstart', () => { + this.trigger('start'); + }); + xhr.upload.addEventListener('progress', e => { + if (e.lengthComputable) { + this.trigger('progress', { + uploaded: e.loaded, + total: e.total + }); + } + }); + xhr.upload.addEventListener('load', () => { + this.trigger('finish'); + }); + xhr.addEventListener('error', () => { + this.trigger('error'); + reject(); + }); + xhr.onreadystatechange = () => { + if (xhr.readyState == XMLHttpRequest.DONE) { + let error; + if (xhr.status === 200) { + let r = null; + try { + r = JSON.parse(xhr.responseText); + } catch (e) { + r = xhr.responseText; + } + let out = r.message || r; + resolve(out); + } else if (xhr.status === 403) { + error = JSON.parse(xhr.responseText); + } else { + this.failed = true; + try { + error = JSON.parse(xhr.responseText); + } catch (e) { + // pass + } + } + if (error && error.exc) { + console.error(JSON.parse(error.exc)[0]); + } + reject(error); + } + }; + xhr.open('POST', '/api/method/upload_file', true); + xhr.setRequestHeader('Accept', 'application/json'); + if (window.csrf_token && window.csrf_token !== '{{ csrf_token }}') { + xhr.setRequestHeader('X-Jingrow-CSRF-Token', window.csrf_token); + } + + let form_data = new FormData(); + if (file) { + form_data.append('file', file, file.name); + } + form_data.append('is_private', +(options.private || 0)); + form_data.append('folder', options.folder || 'Home'); + + if (options.file_url) { + form_data.append('file_url', options.file_url); + } + + if (options.pagetype && options.docname) { + form_data.append('pagetype', options.pagetype); + form_data.append('docname', options.docname); + } + + if (options.method) { + form_data.append('method', options.method); + } + + if (options.type) { + form_data.append('type', options.type); + } + + xhr.send(form_data); + }); + } +} diff --git a/dashboard/src/controllers/loginAsAdmin.js b/dashboard/src/controllers/loginAsAdmin.js new file mode 100644 index 0000000..7913cac --- /dev/null +++ b/dashboard/src/controllers/loginAsAdmin.js @@ -0,0 +1,30 @@ +import { notify } from '@/utils/toast'; + +export function loginAsAdmin(siteName) { + return { + url: 'jcloud.api.site.login', + params: { name: siteName }, + onSuccess(data) { + if (data?.sid && data?.site) { + window.open(`https://${data.site}/desk?sid=${data.sid}`, '_blank'); + } + }, + validate() { + // hack to display the toast + notify({ + title: 'Attempting to login as Administrator', + message: `Please wait...`, + icon: 'alert-circle', + color: 'yellow' + }); + }, + onError(err) { + notify({ + title: 'Could not login as Administrator', + message: err.messages.join('\n'), + color: 'red', + icon: 'x' + }); + } + }; +} diff --git a/dashboard/src/controllers/register.js b/dashboard/src/controllers/register.js new file mode 100644 index 0000000..e01be17 --- /dev/null +++ b/dashboard/src/controllers/register.js @@ -0,0 +1,33 @@ +import call from './call'; +import Auth from './auth'; +import socket from './socket'; +import Account from './account'; + +import { reactive } from 'vue'; + +const auth = reactive(new Auth()); +const account = reactive(new Account()); + +export default function registerControllers(app) { + // Hack to get $auth working, should be refactored later + app.config.globalProperties.$call = call; + app.config.globalProperties.$socket = socket; + app.config.globalProperties.$auth = auth; + app.config.globalProperties.$account = account; + + // Actually, provide-inject is recommended to be used + app.provide('$auth', auth); + app.provide('$account', account); + app.provide('$call', call); + app.provide('$socket', socket); + + // global accessor to expose switchToTeam method + window.$account = account; + + return { + auth, + account, + call, + socket + }; +} diff --git a/dashboard/src/controllers/s3FileUploader.js b/dashboard/src/controllers/s3FileUploader.js new file mode 100644 index 0000000..2ecdbfb --- /dev/null +++ b/dashboard/src/controllers/s3FileUploader.js @@ -0,0 +1,114 @@ +import call from './call'; + +export default class S3FileUploader { + constructor() { + this.listeners = {}; + } + + on(event, handler) { + this.listeners[event] = this.listeners[event] || []; + this.listeners[event].push(handler); + } + + trigger(event, data) { + let handlers = this.listeners[event] || []; + handlers.forEach(handler => { + handler.call(this, data); + }); + } + + upload(file, options) { + return new Promise(async (resolve, reject) => { + async function getUploadLink() { + try { + let response = await fetch( + `/api/method/jcloud.api.site.get_upload_link?file=${file.name}` + ); + let data = await response.json(); + return data.message; + } catch (e) { + reject(e); + } + } + const upload_link = await getUploadLink(); + const file_path = upload_link?.fields?.key; + + if (!file_path) { + return; + } + + let xhr = new XMLHttpRequest(); + xhr.upload.addEventListener('loadstart', () => { + this.trigger('start'); + }); + xhr.upload.addEventListener('progress', e => { + if (e.lengthComputable) { + this.trigger('progress', { + uploaded: e.loaded, + total: e.total + }); + } + }); + xhr.upload.addEventListener('load', () => { + this.trigger('finish'); + }); + xhr.addEventListener('error', () => { + this.trigger('error'); + reject(); + }); + xhr.onreadystatechange = () => { + if (xhr.readyState == XMLHttpRequest.DONE) { + let error; + if (xhr.status === 200 || xhr.status === 204) { + let r = null; + try { + r = JSON.parse(xhr.responseText); + } catch (e) { + r = xhr.responseText; + } + let out = + r.message || + call('jcloud.api.site.uploaded_backup_info', { + file: file.name, + path: file_path, + type: file.type, + size: file.size + }); + resolve(out || upload_link.fields.key); + } else { + // response from aws is in xml + let xmlDoc = new DOMParser().parseFromString( + xhr.responseText, + 'text/xml' + ); + let code = + xmlDoc.getElementsByTagName('Code')[0].childNodes[0].nodeValue; + let message = + xmlDoc.getElementsByTagName('Message')[0].childNodes[0].nodeValue; + console.error(`${code}: ${message}`); + error = xhr.responseText; + } + if (error && error.exc) { + console.error(JSON.parse(error.exc)[0]); + } + reject(error); + } + }; + + xhr.open('POST', upload_link.url, true); + xhr.setRequestHeader('Accept', 'application/json'); + + let form_data = new FormData(); + for (let key in upload_link.fields) { + if (upload_link.fields.hasOwnProperty(key)) { + form_data.append(key, upload_link.fields[key]); + } + } + if (file) { + form_data.append('file', file, file.name); + } + + xhr.send(form_data); + }); + } +} diff --git a/dashboard/src/controllers/socket.js b/dashboard/src/controllers/socket.js new file mode 100644 index 0000000..a4d9a6a --- /dev/null +++ b/dashboard/src/controllers/socket.js @@ -0,0 +1,13 @@ +import { io } from 'socket.io-client'; +import config from '../../../../../sites/common_site_config.json'; + +let host = window.location.hostname; +let port = window.location.port ? `:${config.socketio_port}` : ''; +let protocol = port ? 'http' : 'https'; +let siteName = window.site_name || host; +let url = `${protocol}://${host}${port}/${siteName}`; +let socket = io(url, { + withCredentials: true +}); + +export default socket; diff --git a/dashboard/src/data/notifications.js b/dashboard/src/data/notifications.js new file mode 100644 index 0000000..beb0ff0 --- /dev/null +++ b/dashboard/src/data/notifications.js @@ -0,0 +1,7 @@ +import { createResource } from 'jingrow-ui'; + +export const unreadNotificationsCount = createResource({ + cache: 'Unread Notifications Count', + url: 'jcloud.api.notifications.get_unread_count', + initialData: 0 +}); diff --git a/dashboard/src/main.js b/dashboard/src/main.js new file mode 100644 index 0000000..f0d6e02 --- /dev/null +++ b/dashboard/src/main.js @@ -0,0 +1,108 @@ +import App from './App.vue'; +import { createApp } from 'vue'; +import registerPlugins from './plugins'; +import registerRouter from './router/register'; +import registerControllers from './controllers/register'; +import registerGlobalComponents from './components/global/register'; +import * as Sentry from '@sentry/vue'; +import posthog from 'posthog-js'; +import { BrowserTracing } from '@sentry/tracing'; +import router from './router/index'; +import dayjs from 'dayjs'; +import { notify } from '@/utils/toast'; +import { + setConfig, + jingrowRequest, + pageMetaPlugin, + resourcesPlugin +} from 'jingrow-ui'; + +const app = createApp(App); +let request = options => { + let _options = options || {}; + _options.headers = options.headers || {}; + let currentTeam = localStorage.getItem('current_team'); + if (currentTeam) { + _options.headers['X-Jcloud-Team'] = currentTeam; + } + return jingrowRequest(_options); +}; +setConfig('resourceFetcher', request); +app.use(resourcesPlugin); +app.use(pageMetaPlugin); + +registerPlugins(app); +registerGlobalComponents(app); +const { auth, account } = registerControllers(app); +registerRouter(app, auth, account); + +// sentry +if (window.jcloud_frontend_sentry_dsn?.includes('https://')) { + Sentry.init({ + app, + dsn: window.jcloud_frontend_sentry_dsn, + integrations: [ + new BrowserTracing({ + routingInstrumentation: Sentry.vueRouterInstrumentation(router), + tracingOrigins: ['localhost', /^\//] + }) + ], + beforeSend(event, hint) { + const ignoreErrors = [ + /dynamically imported module/, + /NetworkError when attempting to fetch resource/ + ]; + const error = hint.originalException; + + if (error?.message && ignoreErrors.some(re => re.test(error.message))) + return null; + + return event; + }, + logErrors: true + }); +} + +// posthog +if (window.jcloud_frontend_posthog_host?.includes('https://')) { + try { + posthog.init(window.jcloud_frontend_posthog_project_id, { + api_host: window.jcloud_frontend_posthog_host, + autocapture: false, + capture_pageview: false, + capture_pageleave: false, + advanced_disable_decide: true + }); + window.posthog = posthog; + } catch (e) { + console.trace('Failed to initialize telemetry', e); + } +} + +if (import.meta.env.DEV) { + request({ + url: '/api/method/jcloud.www.dashboard.get_context_for_dev' + }).then(values => { + for (let key in values) { + window[key] = values[key]; + } + app.mount('#app'); + }); +} else { + app.mount('#app'); +} + +app.config.globalProperties.$dayjs = dayjs; +app.config.errorHandler = (error, instance) => { + if (instance) { + let errorMessage = error.message; + if (error.messages) errorMessage = error.messages.join('\n'); + notify({ + icon: 'x', + title: 'An error occurred', + message: errorMessage, + color: 'red' + }); + } + console.error(error); +}; diff --git a/dashboard/src/plugins.js b/dashboard/src/plugins.js new file mode 100644 index 0000000..7ffde53 --- /dev/null +++ b/dashboard/src/plugins.js @@ -0,0 +1,5 @@ +import utils from './utils'; + +export default function registerPlugins(app) { + app.use(utils); +} diff --git a/dashboard/src/registerServiceWorker.js b/dashboard/src/registerServiceWorker.js new file mode 100644 index 0000000..b243793 --- /dev/null +++ b/dashboard/src/registerServiceWorker.js @@ -0,0 +1,34 @@ +/* eslint-disable no-console */ + +import { register } from 'register-service-worker'; + +if (import.meta.env.PROD) { + register(`${import.meta.env.BASE_URL}service-worker.js`, { + ready() { + console.log( + 'App is being served from cache by a service worker.\n' + + 'For more details, visit https://goo.gl/AFskqB' + ); + }, + registered() { + console.log('Service worker has been registered.'); + }, + cached() { + console.log('Content has been cached for offline use.'); + }, + updatefound() { + console.log('New content is downloading.'); + }, + updated() { + console.log('New content is available; please refresh.'); + }, + offline() { + console.log( + 'No internet connection found. App is running in offline mode.' + ); + }, + error(error) { + console.error('Error during service worker registration:', error); + } + }); +} diff --git a/dashboard/src/router/index.js b/dashboard/src/router/index.js new file mode 100644 index 0000000..27a2fa0 --- /dev/null +++ b/dashboard/src/router/index.js @@ -0,0 +1,566 @@ +import Home from '../views/general/Home.vue'; +import { createRouter, createWebHistory } from 'vue-router'; + +const routes = [ + { + path: '/', + name: 'Home', + component: Home + }, + { + path: '/checkout/:secretKey', + name: 'Checkout', + component: () => import('../views/checkout/Checkout.vue'), + props: true, + meta: { + isLoginPage: true + } + }, + { + path: '/login', + name: 'Login', + component: () => import('../views/auth/Auth.vue'), + meta: { + isLoginPage: true + } + }, + { + path: '/signup', + name: 'Signup', + component: () => import('../views/auth/Auth.vue'), + meta: { + isLoginPage: true + } + }, + { + path: '/setup-account/:requestKey/:joinRequest?', + name: 'Setup Account', + component: () => import('../views/auth/SetupAccount.vue'), + props: true, + meta: { + isLoginPage: true + } + }, + { + path: '/reset-password/:requestKey', + name: 'Reset Password', + component: () => import('../views/auth/ResetPassword.vue'), + props: true, + meta: { + isLoginPage: true + } + }, + { + path: '/impersonate/:team', + name: 'Impersonate Team', + component: () => import('../views/auth/ImpersonateTeam.vue'), + props: true + }, + { + path: '/notifications', + name: 'Notifications', + component: () => import('../views/notifications/Notifications.vue') + }, + { + path: '/groups', + name: 'BenchesScreen', + component: () => import('../views/bench/Benches.vue') + }, + { + path: '/groups/new/:saas_app?', + name: 'NewBench', + meta: { + isSaasPage: true + }, + component: () => import('../views/bench/NewBench.vue'), + props: true + }, + { + path: '/servers/:server/bench/new', + name: 'NewServerBench', + component: () => import('../views/bench/NewBench.vue'), + props: true + }, + { + path: '/groups/:benchName', + name: 'Bench', + component: () => import('../views/bench/Bench.vue'), + props: true, + meta: { + isSaasPage: true + }, + redirect: { name: 'BenchSiteList' }, + children: [ + { + name: 'BenchSiteList', + path: 'sites', + component: () => import('../views/bench/BenchSites.vue'), + props: true + }, + { + path: 'apps', + component: () => import('../views/bench/BenchApps.vue'), + props: true + }, + { + path: 'bench-config', + component: () => import('../views/bench/BenchConfig.vue'), + props: true + }, + { + name: 'BenchDeploys', + path: 'deploys/:candidateName?', + component: () => import('../views/bench/BenchDeploys.vue'), + props: true + }, + { + path: 'logs/:instanceName/:logName?', + component: () => import('../views/bench/BenchLogs.vue'), + props: true + }, + { + name: 'BenchJobs', + path: 'jobs/:jobName?', + component: () => import('../views/bench/BenchJobs.vue'), + props: true + }, + { + path: 'settings', + component: () => import('../views/bench/BenchSettings.vue'), + props: true + } + ] + }, + { + path: '/groups/:benchName/apps/new', + name: 'NewBenchApp', + component: () => import('../views/bench/NewBenchApp.vue'), + props: true + }, + { + path: '/sites', + name: 'Sites', + component: () => import('../views/site/Sites.vue') + }, + { + path: '/:bench/sites', + name: 'BenchSites', + component: () => import('../views/site/Sites.vue'), + props: true + }, + { + path: '/sites/new', + name: 'NewSite', + component: () => import('../views/site/NewSite.vue'), + props: true + }, + { + path: '/:bench/new', + name: 'NewBenchSite', + component: () => import('../views/site/NewSite.vue'), + props: true + }, + { + path: '/sites/:siteName', + name: 'Site', + component: () => import('../views/site/Site.vue'), + props: true, + children: [ + { + name: 'SiteOverview', + path: 'overview', + component: () => import('../views/site/SiteOverview.vue') + }, + { + name: 'SiteAppsAndSubscriptions', + path: 'apps', + component: () => import('../views/site/SiteAppsAndSubscriptions.vue'), + props: true + }, + { + path: 'installing', + component: () => import('../views/site/SiteInstalling.vue') + }, + { + path: 'analytics', + component: () => import('../views/site/SiteCharts.vue'), + props: true + }, + { + path: 'database', + component: () => import('../views/site/SiteDatabase.vue') + }, + { + path: 'site-config', + component: () => import('../views/site/SiteConfig.vue') + }, + { + path: 'settings', + component: () => import('../views/site/SiteSettings.vue') + }, + { + path: 'console', + component: () => import('../views/site/SiteConsole.vue') + }, + { + name: 'SiteJobs', + path: 'jobs/:jobName?', + component: () => import('../views/site/SiteJobs.vue'), + props: true + }, + { + path: 'logs/:logName?', + component: () => import('../views/site/SiteLogs.vue'), + props: true + }, + { + path: 'auto-update', + component: () => import('../views/site/SiteAutoUpdate.vue'), + props: true + }, + { + path: 'monitor', + component: () => import('../views/site/SiteMonitorsList.vue'), + props: true, + children: [ + { + name: 'SiteRequestLogs', + path: 'request-logs', + component: () => import('../views/site/SiteRequestLogs.vue'), + props: true + }, + { + name: 'SiteBinaryLogs', + path: 'binary-logs', + component: () => import('../views/site/SiteBinaryLogs.vue'), + props: true + }, + { + name: 'MariaDBProcessList', + path: 'mariadb-process-list', + component: () => import('../views/site/SiteMariaDBProcessList.vue'), + props: true + }, + { + name: 'SiteMariaDBSlowQueries', + path: 'mariadb-slow-queries', + component: () => import('../views/site/SiteMariaDBSlowQueries.vue'), + props: true + }, + { + name: 'SiteDeadlockReport', + path: 'deadlock-report', + component: () => import('../views/site/SiteDeadlockReport.vue'), + props: true + } + ] + } + ] + }, + { + path: '/servers', + name: 'Servers', + component: () => import('../views/server/Servers.vue') + }, + { + path: '/servers/new', + name: 'NewServer', + component: () => import('../views/server/NewServer.vue'), + props: true + }, + { + path: '/servers/:serverName', + name: 'Server', + component: () => import('../views/server/Server.vue'), + props: true, + children: [ + { + name: 'ServerOverview', + path: 'overview', + component: () => import('../views/server/ServerOverview.vue'), + props: true + }, + { + name: 'ServerAnalytics', + path: 'analytics', + component: () => import('../views/server/ServerAnalytics.vue'), + props: true + }, + { + name: 'ServerBenches', + path: 'groups', + component: () => import('../views/server/ServerBenches.vue'), + props: true + }, + { + name: 'ServerJobs', + path: 'jobs/:jobName?', + component: () => import('../views/server/ServerJobs.vue'), + props: true + }, + { + name: 'ServerPlays', + path: 'plays/:playName?', + component: () => import('../views/server/ServerPlays.vue'), + props: true + }, + { + name: 'ServerInstall', + path: 'install', + component: () => import('../views/server/ServerInstall.vue'), + props: true + }, + { + name: 'ServerSettings', + path: 'settings', + component: () => import('../views/server/ServerSettings.vue'), + props: true + } + ] + }, + { + name: 'New SelfHosted Server', + path: '/selfhosted/new', + component: () => import('../views/server/NewSelfHostedServer.vue'), + props: true + }, + { + path: '/install-app/:marketplaceApp', + name: 'InstallMarketplaceApp', + component: () => import('@/views/marketplace/InstallMarketplaceApp.vue'), + props: true + }, + { + path: '/marketplace', + name: 'Marketplace', + component: () => import('../views/marketplace/Marketplace.vue'), + children: [ + { + path: 'publisher-profile', + component: () => + import('../views/marketplace/MarketplacePublisherProfile.vue') + }, + { + path: 'apps', + component: () => import('../views/marketplace/MarketplaceApps.vue') + }, + { + path: 'payouts/:payoutOrderName?', + component: () => import('../views/marketplace/MarketplacePayouts.vue'), + props: true + } + ] + }, + { + path: '/marketplace/apps/new', + name: 'NewMarketplaceApp', + component: () => import('../views/marketplace/NewMarketplaceApp.vue'), + props: true + }, + { + path: '/marketplace/apps/:appName', + name: 'MarketplaceApp', + component: () => import('../views/marketplace/MarketplaceApp.vue'), + props: true, + children: [ + { + name: 'MarketplaceAppOverview', + path: 'overview', + component: () => + import('../views/marketplace/MarketplaceAppOverview.vue') + }, + { + name: 'MarketplaceAppReview', + path: 'review', + component: () => + import('../views/marketplace/MarketplaceAppReview.vue'), + props: true + }, + { + name: 'MarketplaceAppAnalytics', + path: 'analytics', + component: () => + import('../views/marketplace/MarketplaceAppAnalytics.vue') + }, + { + name: 'MarketplaceAppDeployment', + path: 'releases', + component: () => + import('../views/marketplace/MarketplaceAppDeployment.vue') + }, + { + name: 'MarketplaceAppSubscriptions', + path: 'subscriptions', + component: () => + import('../views/marketplace/MarketplaceAppSubscriptions.vue'), + props: true + }, + { + name: 'MarketplaceAppPricing', + path: 'pricing', + component: () => + import('../views/marketplace/MarketplaceAppPricing.vue') + } + ] + }, + { + path: '/spaces', + name: 'Spaces', + component: () => import('../views/spaces/Spaces.vue') + }, + { + path: '/codeservers/new', + name: 'NewCodeServer', + component: () => import('../views/spaces/NewCodeServer.vue') + }, + { + path: '/codeservers/:serverName', + name: 'CodeServer', + component: () => import('../views/spaces/CodeServer.vue'), + props: true, + children: [ + { + name: 'CodeServerOverview', + path: 'overview', + component: () => import('../views/spaces/CodeServerOverview.vue') + }, + { + path: 'jobs/:jobName?', + component: () => import('../views/spaces/CodeServerJobs.vue'), + props: true + } + ] + }, + { + path: '/setup-site/:product', + name: 'App Site Setup', + component: () => import('../views/site/AppSiteSetup.vue'), + props: true, + meta: { + hideSidebar: true + } + }, + { + path: '/subscription/:site?', + name: 'Subscription', + component: () => import('../views/checkout/Subscription.vue'), + props: true, + meta: { + hideSidebar: true + } + }, + { + path: '/billing/:invoiceName?', + name: 'BillingScreen', + props: true, + redirect: { name: 'BillingOverview' }, + component: () => import('../views/billing/AccountBilling.vue'), + children: [ + { + name: 'BillingOverview', + path: 'overview', + component: () => import('../views/billing/BillingOverview.vue') + }, + { + name: 'Payment Methods', + path: 'payment', + component: () => import('../views/billing/PaymentMethods.vue') + }, + { + name: 'Invoices', + path: 'invoices', + props: true, + component: () => import('../views/billing/AccountBillingPayments.vue') + }, + { + name: 'Credit Balance', + path: 'credit-balance', + component: () => + import('../views/billing/AccountBillingCreditBalance.vue') + } + ] + }, + { + path: '/settings', + name: 'SettingsScreen', + redirect: { name: 'ProfileSettings' }, + component: () => import('../views/settings/AccountSettings.vue'), + children: [ + { + name: 'ProfileSettings', + path: 'profile', + component: () => import('../views/settings/ProfileSettings.vue') + }, + { + name: 'TeamSettings', + path: 'team', + component: () => import('../views/settings/TeamSettings.vue') + }, + { + name: 'DeveloperSettings', + path: 'developer', + component: () => import('../views/settings/DeveloperSettings.vue') + }, + { + name: 'PartnerSettings', + path: 'partner', + component: () => import('../views/settings/PartnerSettings.vue') + } + ] + }, + { + path: '/security', + name: 'Security', + component: () => import('../views/security/Servers.vue') + }, + { + path: '/security/:serverName', + name: 'ServerSecurity', + component: () => import('../views/security/Security.vue'), + props: true, + children: [ + { + name: 'SecurityOverview', + path: 'overview', + component: () => import('../views/security/SecurityOverview.vue'), + props: true + }, + { + name: 'SecurityUpdates', + path: 'security_update/:updateId?', + component: () => import('../views/security/SecurityUpdates.vue'), + props: true + }, + { + name: 'Firewall', + path: 'firewall/', + // component: () => import('../views/security/SecurityUpdateInfo.vue'), + props: true + }, + { + name: 'SSH Session Logs', + path: 'ssh_session_logs/:logId?', + component: () => import('../views/security/SSHSession.vue'), + props: true + }, + { + name: 'Nginx Overview', + path: 'nginx_overview/', + // component: () => import('../views/security/SecurityUpdateInfo.vue'), + props: true + } + ] + }, + { + name: 'NotFound', + path: '/:pathMatch(.*)*', + component: () => import('../views/general/404.vue') + } +]; + +const router = createRouter({ + history: createWebHistory('/dashboard-old/'), + routes +}); + +export default router; diff --git a/dashboard/src/router/register.js b/dashboard/src/router/register.js new file mode 100644 index 0000000..951b368 --- /dev/null +++ b/dashboard/src/router/register.js @@ -0,0 +1,43 @@ +import router from './index'; + +export default function registerRouter(app, auth, account) { + app.use(router); + + router.beforeEach(async (to, from, next) => { + // TODO: Remove once the new signup flow is live, + // currently this is being called for every guest request which breaks the current signup flow + // await account.fetchIfRequired(); + + if (to.name == 'Home') { + next({ name: 'Sites' }); + return; + } + + if (to.matched.some(record => !record.meta.isLoginPage)) { + // this route requires auth, check if logged in + // if not, redirect to login page. + if (!auth.isLoggedIn) { + next({ name: 'Login', query: { route: to.path } }); + } else { + if (!account.user) { + await account.fetchAccount(); + } + next(); + } + } else { + // if already logged in, route to /sites + if (auth.isLoggedIn) { + if (!account.user) { + await account.fetchAccount(); + } + if (to?.query?.route) { + next({ path: to.query.route }); + } else { + next({ name: 'Sites' }); + } + } else { + next(); + } + } + }); +} diff --git a/dashboard/src/tests/core/ClickToCopy.test.js b/dashboard/src/tests/core/ClickToCopy.test.js new file mode 100644 index 0000000..c672cf8 --- /dev/null +++ b/dashboard/src/tests/core/ClickToCopy.test.js @@ -0,0 +1,53 @@ +import { nextTick } from 'vue'; +import { mount } from '@vue/test-utils'; +import { describe, expect, test, vi } from 'vitest'; +import ClickToCopyField from '@/components/ClickToCopyField.vue'; + +// Mocking clipboard API +let clipboardData = ''; +Object.assign(window.navigator, { + clipboard: { + writeText: vi.fn(data => { + clipboardData = data; + return Promise.resolve(); + }), + readText: vi.fn(() => clipboardData) + } +}); + +describe('ClickToCopyField Component', () => { + test('displays the passed text content', () => { + expect(ClickToCopyField).toBeTruthy(); + + const wrapper = mount(ClickToCopyField, { + props: { + textContent: 'Test' + } + }); + + expect(wrapper.html()).contains('Test'); + }); + + test("let's us copy with a button click", async () => { + const $notify = vi.fn(); + expect(ClickToCopyField).toBeTruthy(); + + const wrapper = mount(ClickToCopyField, { + props: { + textContent: 'Test' + }, + global: { + mocks: { + $notify + } + } + }); + + wrapper.find('button').isVisible(); + wrapper.find('button').trigger('click'); + + await nextTick(); + + expect(navigator.clipboard.readText()).toBe('Test'); + }); +}); diff --git a/dashboard/src/tests/core/FeatureList.test.js b/dashboard/src/tests/core/FeatureList.test.js new file mode 100644 index 0000000..b1dd180 --- /dev/null +++ b/dashboard/src/tests/core/FeatureList.test.js @@ -0,0 +1,19 @@ +import { mount } from '@vue/test-utils'; +import { describe, expect, test } from 'vitest'; +import FeatureList from '@/components/FeatureList.vue'; + +describe('FeatureList Component', () => { + test('feature list renders with 2 features in correct order', async () => { + expect(FeatureList).toBeTruthy(); + + const wrapper = mount(FeatureList, { + props: { + features: ['Feature 1', 'Feature 2'] + } + }); + + expect(wrapper.findAll('li').length).toBe(2); + expect(wrapper.findAll('li')[0].text()).toBe('Feature 1'); + expect(wrapper.findAll('li')[1].text()).toBe('Feature 2'); + }); +}); diff --git a/dashboard/src/tests/core/RichSelect.test.js b/dashboard/src/tests/core/RichSelect.test.js new file mode 100644 index 0000000..9bbdf37 --- /dev/null +++ b/dashboard/src/tests/core/RichSelect.test.js @@ -0,0 +1,82 @@ +import { mount, config } from '@vue/test-utils'; +import { test, describe, expect } from 'vitest'; +import RichSelect from '@/components/RichSelect.vue'; +import { setupGlobalConfig } from '../setup/msw'; +import { nextTick } from 'vue'; + +setupGlobalConfig(config); // Setup vue app global config + +// Ref: Testing teleports +// https://test-utils.vuejs.org/guide/advanced/teleport.html#interacting-with-the-teleported-component + +let wrapper; + +beforeEach(() => { + // create teleport target + const el = document.createElement('div'); + el.id = 'popovers'; + document.body.appendChild(el); + + wrapper = mount(RichSelect, { + props: { + value: 'opt-1', + options: [ + { + label: 'Option 1', + value: 'opt-1', + image: 'https://via.placeholder.com/100x100' + }, + { + label: 'Option 2', + value: 'opt-2', + image: 'https://via.placeholder.com/200x200' + } + ] + } + }); +}); + +afterEach(() => { + // clean up + document.body.outerHTML = ''; +}); + +describe('Rich Select Component', () => { + test('should display a dropdown menu', () => { + expect(RichSelect).toBeTruthy(); + + expect(wrapper.find('button').exists()).toBe(true); + + // Image should be displayed along with the label + expect(wrapper.find('img').exists()).toBe(true); + expect(wrapper.find('img').attributes('src')).toBe( + 'https://via.placeholder.com/100x100' + ); + + expect(wrapper.text()).toContain('Option 1'); + }); + + test('should display a popup with desired options', () => { + expect(wrapper.find('button').exists()).toBe(true); + wrapper.find('button').trigger('click'); + + // Test images are displayed + const images = document.getElementsByTagName('img'); + expect(images.length).toBe(2); + expect(images[0].src).toBe('https://via.placeholder.com/100x100'); + expect(images[1].src).toBe('https://via.placeholder.com/200x200'); + + // Test labels are displayed + expect(images[0].parentNode.children[1].innerHTML).toBe('Option 1'); + expect(images[1].parentNode.children[1].innerHTML).toBe('Option 2'); + }); + + test('should emit on clicking of other option', () => { + wrapper.find('button').trigger('click'); + expect(wrapper.text()).toContain('Option 1'); + const images = document.getElementsByTagName('img'); + + images[1].parentNode.click(); + expect(wrapper.emitted('change')[0]).toEqual(['opt-2']); + }); +}); diff --git a/dashboard/src/tests/core/StarRatingComponent.test.js b/dashboard/src/tests/core/StarRatingComponent.test.js new file mode 100644 index 0000000..d629505 --- /dev/null +++ b/dashboard/src/tests/core/StarRatingComponent.test.js @@ -0,0 +1,28 @@ +import { mount } from '@vue/test-utils'; +import { test, describe, expect } from 'vitest'; +import StarRatingInput from '@/components/StarRatingInput.vue'; + +describe('Star Rating Component', () => { + test('should display 5 star icons', () => { + expect(StarRatingInput).toBeTruthy(); + + const wrapper = mount(StarRatingInput); + expect(wrapper.findAll('svg').length).toBe(5); + }); + + test('should emit on star click', () => { + const wrapper = mount(StarRatingInput); + + // Click on the second star + wrapper.findAll('svg')[1].trigger('click'); + + // An modelValue update event should be emitted + expect(wrapper.emitted()).toHaveProperty('update:modelValue'); + + // The modelValue should be 1 + const updateEvent = wrapper.emitted('update:modelValue'); + + // Should emit "2" as the emit payload + expect(updateEvent[1][0]).toBe(2); + }); +}); diff --git a/dashboard/src/tests/setup/msw.js b/dashboard/src/tests/setup/msw.js new file mode 100644 index 0000000..2a67a4a --- /dev/null +++ b/dashboard/src/tests/setup/msw.js @@ -0,0 +1,52 @@ +import { rest } from 'msw'; +import router from '@/router'; +import fetch from 'node-fetch'; +import { setupServer } from 'msw/node'; +import { config } from '@vue/test-utils'; +import resourceManager from '@/resourceManager'; +import { components } from '@/components/global/register'; +import { afterAll, afterEach, beforeAll, vi } from 'vitest'; +import outsideClickDirective from '@/components/global/outsideClickDirective'; + +const FAKE_BASE_URL = 'http://fc.tests'; + +const restHandlers = [ + rest.post( + FAKE_BASE_URL + '/api/method/jcloud.api.site.features', + (req, res, ctx) => { + return res(ctx.status(200), ctx.json({ message: apps })); + } + ) +]; + +const server = setupServer(...restHandlers); + +beforeAll(() => { + setupGlobalConfig(config); // Plugins, global components etc. + + // Have to mock fetch, since tests run in node environment + vi.stubGlobal('fetch', (url, options) => fetch(FAKE_BASE_URL + url, options)); + + // Starts the msw server + server.listen({ onUnhandledRequest: 'error' }); +}); + +// Close server after all tests +afterAll(() => server.close()); + +afterEach(() => server.resetHandlers()); + +export function setupGlobalConfig(config) { + const globalComponents = {}; + for (let path in components) { + let component = components[path]; + let name = path.replace('./', '').replace('.vue', ''); + globalComponents[name] = component.default || component; + } + + config.global.components = globalComponents; + config.global.plugins = [resourceManager, router]; + config.global.directives = { + 'on-outside-click': outsideClickDirective + }; +} diff --git a/dashboard/src/utils.js b/dashboard/src/utils.js new file mode 100644 index 0000000..e6d3473 --- /dev/null +++ b/dashboard/src/utils.js @@ -0,0 +1,210 @@ +import { DateTime, Duration } from 'luxon'; +import theme from '../tailwind.theme.json'; + +let utils = { + methods: { + $plural(number, singular, plural) { + if (number === 1) { + return singular; + } + return plural; + }, + $date(date, serverDatesTimezone = 'Asia/Kolkata') { + // assuming all dates on the server are stored in our timezone + + let localZone = DateTime.local().zoneName; + return DateTime.fromSQL(date, { zone: serverDatesTimezone }).setZone( + localZone + ); + }, + round(number, precision) { + let multiplier = Math.pow(10, precision || 0); + return Math.round(number * multiplier) / multiplier; + }, + formatDate(value, type = 'DATETIME_FULL', isUTC = false) { + let datetime = isUTC ? this.$date(value, 'UTC') : this.$date(value); + let format = value; + if (type === 'relative') { + format = datetime.toRelative(); + } else { + let formatOptions = DateTime[type]; + format = datetime.toLocaleString(formatOptions); + } + return format; + }, + $formatDuration(value) { + // Remove decimal seconds + value = value.split('.')[0]; + + // Add leading zero + // 0:0:2 -> 00:00:02 + const formattedDuration = value + .split(':') + .map(x => x.padStart(2, '0')) + .join(':'); + + const dateTime = Duration.fromISOTime(formattedDuration).toObject(); + const hourString = dateTime.hours ? `${dateTime.hours}h` : ''; + const minuteString = dateTime.minutes ? `${dateTime.minutes}m` : ''; + const secondString = `${dateTime.seconds}s`; + + return `${hourString} ${minuteString} ${secondString}`; + }, + formatBytes(bytes, decimals = 2, current = 0) { + if (bytes === 0) return '0 Bytes'; + + const k = 1024; + const dm = decimals < 0 ? 0 : decimals; + const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + const i = Math.floor(Math.log(Math.abs(bytes)) / Math.log(k)); + + return ( + parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + + ' ' + + sizes[i + current] + ); + }, + $formatCPUTime(duration) { + return duration / 1000000; + }, + $planTitle(plan) { + let china = this.$account.team.country == 'china'; + let currency = china ? '¥' : '$'; + let price_field = china ? 'price_cny' : 'price_usd'; + let price = + plan.block_monthly == 1 ? plan[price_field] * 12 : plan[price_field]; + return price > 0 ? `${currency}${price}` : plan.plan_title; + }, + trialEndsInDaysText(date) { + let diff = this.$date(date).diff(DateTime.local(), ['days']).toObject(); + + let days = diff.days; + if (days > 1) { + return `in ${Math.floor(days)} days`; + } + return 'in a day'; + }, + $routeTo404PageIfNotFound(errorMessage) { + if (errorMessage.indexOf('not found') >= 0) { + this.$router.push({ + name: 'NotFound', + // preserve current path and remove the first char to avoid the target URL starting with `//` + params: { pathMatch: this.$route.path.substring(1).split('/') }, + // preserve existing query and hash if any + query: this.$route.query, + hash: this.$route.hash + }); + } + }, + $siteStatus(site) { + let status = site.status; + if (site.update_available && site.status == 'Active') { + status = 'Update Available'; + } + + let usage = Math.max( + site.current_cpu_usage, + site.current_database_usage, + site.current_disk_usage + ); + if (usage && usage >= 80 && status == 'Active') { + status = 'Attention Required'; + } + if (site.trial_end_date) { + status = 'Trial'; + } + return status; + }, + $sanitize(text) { + if (!text) return text; + const map = { + '&': '&', + '<': '<', + '>': '>', + '"': '"', + "'": ''', + '/': '/' + }; + const reg = /[&<>"'/]/gi; + return text.replace(reg, match => map[match]); + } + }, + computed: { + $theme() { + return theme; + }, + $platform() { + const ua = navigator.userAgent.toLowerCase(); + + if (ua.indexOf('win') > -1) { + return 'win'; + } else if (ua.indexOf('mac') > -1) { + return 'mac'; + } else if (ua.indexOf('x11') > -1 || ua.indexOf('linux') > -1) { + return 'linux'; + } + } + } +}; + +export function validateGST(gst) { + // https://github.com/raysk4ever/raysk-vali/blob/master/validate.js#L51 + const gstReg = new RegExp( + /\d{2}[A-Z]{5}\d{4}[A-Z]{1}[A-Z\d]{1}[Z]{1}[A-Z\d]{1}/ + ); + return gstReg.test(gst); +} + +export default function install(Vue) { + Vue.mixin(utils); +} + +export function isWasmSupported() { + // Check if browser supports WASM + // ref: https://stackoverflow.com/a/47880734/10309266 + return (() => { + try { + if ( + typeof WebAssembly === 'object' && + typeof WebAssembly.instantiate === 'function' + ) { + const module = new WebAssembly.Module( + Uint8Array.of(0x0, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00) + ); + if (module instanceof WebAssembly.Module) + return ( + new WebAssembly.Instance(module) instanceof WebAssembly.Instance + ); + } + } catch (e) {} // eslint-disable-line no-empty + return false; + })(); +} + +export async function trypromise(promise) { + try { + let data = await promise; + return [null, data]; + } catch (error) { + return [error, null]; + } +} + +export function validateSubdomain(subdomain) { + if (!subdomain) { + return 'Subdomain cannot be empty'; + } + if (subdomain.length < 5) { + return 'Subdomain too short. Use 5 or more characters'; + } + if (subdomain.length > 32) { + return 'Subdomain too long. Use 32 or less characters'; + } + if (!subdomain.match(/^[a-z0-9][a-z0-9-]*[a-z0-9]$/)) { + return 'Subdomain contains invalid characters. Use lowercase characters, numbers and hyphens'; + } + return null; +} + +export { utils }; +export { default as dayjs } from './utils/dayjs'; diff --git a/dashboard/src/utils/billing.js b/dashboard/src/utils/billing.js new file mode 100644 index 0000000..a3baf05 --- /dev/null +++ b/dashboard/src/utils/billing.js @@ -0,0 +1,287 @@ +export const chinaStates = [ + '安徽省', + '北京市', + '重庆市', + '福建省', + '甘肃省', + '广东省', + '广西壮族自治区', + '贵州省', + '海南省', + '河北省', + '河南省', + '黑龙江省', + '湖北省', + '湖南省', + '吉林省', + '江苏省', + '江西省', + '辽宁省', + '内蒙古自治区', + '宁夏回族自治区', + '青海省', + '山东省', + '山西省', + '陕西省', + '上海市', + '四川省', + '天津市', + '西藏自治区', + '新疆维吾尔自治区', + '云南省', + '浙江省', + '香港特别行政区', + '澳门特别行政区', + '台湾省' +]; + +// 国家代码(小写)到中文名称的映射 +export const countryNameMap = { + 'af': '阿富汗', + 'ax': '奥兰群岛', + 'al': '阿尔巴尼亚', + 'dz': '阿尔及利亚', + 'as': '美属萨摩亚', + 'ad': '安道尔', + 'ao': '安哥拉', + 'ai': '安圭拉', + 'aq': '南极洲', + 'ag': '安提瓜和巴布达', + 'ar': '阿根廷', + 'am': '亚美尼亚', + 'aw': '阿鲁巴', + 'au': '澳大利亚', + 'at': '奥地利', + 'az': '阿塞拜疆', + 'bs': '巴哈马', + 'bh': '巴林', + 'bd': '孟加拉国', + 'bb': '巴巴多斯', + 'by': '白俄罗斯', + 'be': '比利时', + 'bz': '伯利兹', + 'bj': '贝宁', + 'bm': '百慕大', + 'bt': '不丹', + 'bo': '玻利维亚', + 'ba': '波斯尼亚和黑塞哥维那', + 'bw': '博茨瓦纳', + 'bv': '布韦岛', + 'br': '巴西', + 'io': '英属印度洋领地', + 'bn': '文莱', + 'bg': '保加利亚', + 'bf': '布基纳法索', + 'bi': '布隆迪', + 'kh': '柬埔寨', + 'cm': '喀麦隆', + 'ca': '加拿大', + 'cv': '佛得角', + 'ky': '开曼群岛', + 'cf': '中非共和国', + 'td': '乍得', + 'cl': '智利', + 'cn': '中国', + 'cx': '圣诞岛', + 'cc': '科科斯(基林)群岛', + 'co': '哥伦比亚', + 'km': '科摩罗', + 'cg': '刚果共和国', + 'cd': '刚果民主共和国', + 'ck': '库克群岛', + 'cr': '哥斯达黎加', + 'ci': '科特迪瓦', + 'hr': '克罗地亚', + 'cu': '古巴', + 'cy': '塞浦路斯', + 'cz': '捷克共和国', + 'dk': '丹麦', + 'dj': '吉布提', + 'dm': '多米尼克', + 'do': '多米尼加共和国', + 'ec': '厄瓜多尔', + 'eg': '埃及', + 'sv': '萨尔瓦多', + 'gq': '赤道几内亚', + 'er': '厄立特里亚', + 'ee': '爱沙尼亚', + 'et': '埃塞俄比亚', + 'fk': '福克兰群岛', + 'fo': '法罗群岛', + 'fj': '斐济', + 'fi': '芬兰', + 'fr': '法国', + 'gf': '法属圭亚那', + 'pf': '法属波利尼西亚', + 'tf': '法属南部领地', + 'ga': '加蓬', + 'gm': '冈比亚', + 'ge': '格鲁吉亚', + 'de': '德国', + 'gh': '加纳', + 'gi': '直布罗陀', + 'gr': '希腊', + 'gl': '格陵兰', + 'gd': '格林纳达', + 'gp': '瓜德罗普', + 'gu': '关岛', + 'gt': '危地马拉', + 'gg': '根西岛', + 'gn': '几内亚', + 'gw': '几内亚比绍', + 'gy': '圭亚那', + 'ht': '海地', + 'hm': '赫德岛和麦克唐纳群岛', + 'va': '梵蒂冈', + 'hn': '洪都拉斯', + 'hk': '香港', + 'hu': '匈牙利', + 'is': '冰岛', + 'in': '印度', + 'id': '印度尼西亚', + 'ir': '伊朗', + 'iq': '伊拉克', + 'ie': '爱尔兰', + 'im': '马恩岛', + 'il': '以色列', + 'it': '意大利', + 'jm': '牙买加', + 'jp': '日本', + 'je': '泽西岛', + 'jo': '约旦', + 'kz': '哈萨克斯坦', + 'ke': '肯尼亚', + 'ki': '基里巴斯', + 'kp': '朝鲜', + 'kr': '韩国', + 'kw': '科威特', + 'kg': '吉尔吉斯斯坦', + 'la': '老挝', + 'lv': '拉脱维亚', + 'lb': '黎巴嫩', + 'ls': '莱索托', + 'lr': '利比里亚', + 'ly': '利比亚', + 'li': '列支敦士登', + 'lt': '立陶宛', + 'lu': '卢森堡', + 'mo': '澳门', + 'mk': '北马其顿', + 'mg': '马达加斯加', + 'mw': '马拉维', + 'my': '马来西亚', + 'mv': '马尔代夫', + 'ml': '马里', + 'mt': '马耳他', + 'mh': '马绍尔群岛', + 'mq': '马提尼克', + 'mr': '毛里塔尼亚', + 'mu': '毛里求斯', + 'yt': '马约特', + 'mx': '墨西哥', + 'fm': '密克罗尼西亚', + 'md': '摩尔多瓦', + 'mc': '摩纳哥', + 'mn': '蒙古', + 'me': '黑山', + 'ms': '蒙特塞拉特', + 'ma': '摩洛哥', + 'mz': '莫桑比克', + 'mm': '缅甸', + 'na': '纳米比亚', + 'nr': '瑙鲁', + 'np': '尼泊尔', + 'nl': '荷兰', + 'nc': '新喀里多尼亚', + 'nz': '新西兰', + 'ni': '尼加拉瓜', + 'ne': '尼日尔', + 'ng': '尼日利亚', + 'nu': '纽埃', + 'nf': '诺福克岛', + 'mp': '北马里亚纳群岛', + 'no': '挪威', + 'om': '阿曼', + 'pk': '巴基斯坦', + 'pw': '帕劳', + 'ps': '巴勒斯坦', + 'pa': '巴拿马', + 'pg': '巴布亚新几内亚', + 'py': '巴拉圭', + 'pe': '秘鲁', + 'ph': '菲律宾', + 'pn': '皮特凯恩群岛', + 'pl': '波兰', + 'pt': '葡萄牙', + 'pr': '波多黎各', + 'qa': '卡塔尔', + 're': '留尼汪', + 'ro': '罗马尼亚', + 'ru': '俄罗斯', + 'rw': '卢旺达', + 'bl': '圣巴泰勒米', + 'sh': '圣赫勒拿', + 'kn': '圣基茨和尼维斯', + 'lc': '圣卢西亚', + 'mf': '法属圣马丁', + 'pm': '圣皮埃尔和密克隆', + 'vc': '圣文森特和格林纳丁斯', + 'ws': '萨摩亚', + 'sm': '圣马力诺', + 'st': '圣多美和普林西比', + 'sa': '沙特阿拉伯', + 'sn': '塞内加尔', + 'rs': '塞尔维亚', + 'sc': '塞舌尔', + 'sl': '塞拉利昂', + 'sg': '新加坡', + 'sx': '荷属圣马丁', + 'sk': '斯洛伐克', + 'si': '斯洛文尼亚', + 'sb': '所罗门群岛', + 'so': '索马里', + 'za': '南非', + 'gs': '南乔治亚和南桑威奇群岛', + 'ss': '南苏丹', + 'es': '西班牙', + 'lk': '斯里兰卡', + 'sd': '苏丹', + 'sr': '苏里南', + 'sj': '斯瓦尔巴和扬马延', + 'sz': '斯威士兰', + 'se': '瑞典', + 'ch': '瑞士', + 'sy': '叙利亚', + 'tw': '台湾', + 'tj': '塔吉克斯坦', + 'tz': '坦桑尼亚', + 'th': '泰国', + 'tl': '东帝汶', + 'tg': '多哥', + 'tk': '托克劳', + 'to': '汤加', + 'tt': '特立尼达和多巴哥', + 'tn': '突尼斯', + 'tr': '土耳其', + 'tm': '土库曼斯坦', + 'tc': '特克斯和凯科斯群岛', + 'tv': '图瓦卢', + 'ug': '乌干达', + 'ua': '乌克兰', + 'ae': '阿拉伯联合酋长国', + 'gb': '英国', + 'us': '美国', + 'um': '美国本土外小岛屿', + 'uy': '乌拉圭', + 'uz': '乌兹别克斯坦', + 'vu': '瓦努阿图', + 've': '委内瑞拉', + 'vn': '越南', + 'vg': '英属维尔京群岛', + 'vi': '美属维尔京群岛', + 'wf': '瓦利斯和富图纳', + 'eh': '西撒哈拉', + 'ye': '也门', + 'zm': '赞比亚', + 'zw': '津巴布韦' +}; diff --git a/dashboard/src/utils/dayjs.js b/dashboard/src/utils/dayjs.js new file mode 100644 index 0000000..8b2ab04 --- /dev/null +++ b/dashboard/src/utils/dayjs.js @@ -0,0 +1,62 @@ +import dayjs from 'dayjs'; +import relativeTime from 'dayjs/esm/plugin/relativeTime'; +import localizedFormat from 'dayjs/plugin/localizedFormat'; +import updateLocale from 'dayjs/plugin/updateLocale'; +import isToday from 'dayjs/plugin/isToday'; +import utc from 'dayjs/plugin/utc'; +import timezone from 'dayjs/plugin/timezone'; + +dayjs.extend(updateLocale); +dayjs.extend(relativeTime); +dayjs.extend(localizedFormat); +dayjs.extend(isToday); +dayjs.extend(utc); +dayjs.extend(timezone); + +dayjs.shortFormating = (s, ago = false) => { + if (s === 'now' || s === 'now ago') { + return 'now'; + } + + const prefix = s.split(' ')[0]; + const posfix = s.split(' ')[1]; + const isPast = s.includes('ago'); + let newPostfix = ''; + switch (posfix) { + case 'minute': + newPostfix = 'm'; + break; + case 'minutes': + newPostfix = 'm'; + break; + case 'hour': + newPostfix = 'h'; + break; + case 'hours': + newPostfix = 'h'; + break; + case 'day': + newPostfix = 'd'; + break; + case 'days': + newPostfix = 'd'; + break; + case 'month': + newPostfix = 'M'; + break; + case 'months': + newPostfix = 'M'; + break; + case 'year': + newPostfix = 'Y'; + break; + case 'years': + newPostfix = 'Y'; + break; + } + return `${['a', 'an'].includes(prefix) ? '1' : prefix} ${newPostfix}${ + isPast ? (ago ? ' ago' : '') : '' + }`; +}; + +export default dayjs; diff --git a/dashboard/src/utils/toast.js b/dashboard/src/utils/toast.js new file mode 100644 index 0000000..a2a4871 --- /dev/null +++ b/dashboard/src/utils/toast.js @@ -0,0 +1,15 @@ +import { ref } from 'vue'; + +export const notifications = ref([]); + +export const hideNotification = id => { + notifications.value = notifications.value.filter(props => props.id !== id); +}; + +export const notify = props => { + // TODO: remove the line below once the jingrow-ui bug (onError triggers twice) is fixed + if (notifications.value.some(n => n.message === props.message)) return; + props.id = Math.floor(Math.random() * 1000 + Date.now()); + notifications.value.push(props); + setTimeout(() => hideNotification(props.id), props.timeout || 5000); +}; diff --git a/dashboard/src/views/auth/Auth.vue b/dashboard/src/views/auth/Auth.vue new file mode 100644 index 0000000..830ce52 --- /dev/null +++ b/dashboard/src/views/auth/Auth.vue @@ -0,0 +1,248 @@ + + + diff --git a/dashboard/src/views/auth/ImpersonateTeam.vue b/dashboard/src/views/auth/ImpersonateTeam.vue new file mode 100644 index 0000000..c6dc035 --- /dev/null +++ b/dashboard/src/views/auth/ImpersonateTeam.vue @@ -0,0 +1,23 @@ + + diff --git a/dashboard/src/views/auth/Login.vue b/dashboard/src/views/auth/Login.vue new file mode 100644 index 0000000..719c988 --- /dev/null +++ b/dashboard/src/views/auth/Login.vue @@ -0,0 +1,179 @@ + + diff --git a/dashboard/src/views/auth/ResetPassword.vue b/dashboard/src/views/auth/ResetPassword.vue new file mode 100644 index 0000000..aac240f --- /dev/null +++ b/dashboard/src/views/auth/ResetPassword.vue @@ -0,0 +1,90 @@ + + + diff --git a/dashboard/src/views/auth/SetupAccount.vue b/dashboard/src/views/auth/SetupAccount.vue new file mode 100644 index 0000000..be6736c --- /dev/null +++ b/dashboard/src/views/auth/SetupAccount.vue @@ -0,0 +1,210 @@ + + + diff --git a/dashboard/src/views/bench/Bench.vue b/dashboard/src/views/bench/Bench.vue new file mode 100644 index 0000000..a987fb1 --- /dev/null +++ b/dashboard/src/views/bench/Bench.vue @@ -0,0 +1,252 @@ + + + diff --git a/dashboard/src/views/bench/BenchApps.vue b/dashboard/src/views/bench/BenchApps.vue new file mode 100644 index 0000000..e1fa099 --- /dev/null +++ b/dashboard/src/views/bench/BenchApps.vue @@ -0,0 +1,284 @@ + + diff --git a/dashboard/src/views/bench/BenchConfig.vue b/dashboard/src/views/bench/BenchConfig.vue new file mode 100644 index 0000000..c6cbda1 --- /dev/null +++ b/dashboard/src/views/bench/BenchConfig.vue @@ -0,0 +1,49 @@ + + + diff --git a/dashboard/src/views/bench/BenchDependencies.vue b/dashboard/src/views/bench/BenchDependencies.vue new file mode 100644 index 0000000..9b4b176 --- /dev/null +++ b/dashboard/src/views/bench/BenchDependencies.vue @@ -0,0 +1,105 @@ + + + diff --git a/dashboard/src/views/bench/BenchDeploys.vue b/dashboard/src/views/bench/BenchDeploys.vue new file mode 100644 index 0000000..b55aadf --- /dev/null +++ b/dashboard/src/views/bench/BenchDeploys.vue @@ -0,0 +1,216 @@ + + + diff --git a/dashboard/src/views/bench/BenchDropDialog.vue b/dashboard/src/views/bench/BenchDropDialog.vue new file mode 100644 index 0000000..e6097b4 --- /dev/null +++ b/dashboard/src/views/bench/BenchDropDialog.vue @@ -0,0 +1,72 @@ + + diff --git a/dashboard/src/views/bench/BenchJobs.vue b/dashboard/src/views/bench/BenchJobs.vue new file mode 100644 index 0000000..507970a --- /dev/null +++ b/dashboard/src/views/bench/BenchJobs.vue @@ -0,0 +1,38 @@ + + + diff --git a/dashboard/src/views/bench/BenchLogs.vue b/dashboard/src/views/bench/BenchLogs.vue new file mode 100644 index 0000000..b7a3b8d --- /dev/null +++ b/dashboard/src/views/bench/BenchLogs.vue @@ -0,0 +1,57 @@ + + + diff --git a/dashboard/src/views/bench/BenchLogsDetail.vue b/dashboard/src/views/bench/BenchLogsDetail.vue new file mode 100644 index 0000000..94d5a83 --- /dev/null +++ b/dashboard/src/views/bench/BenchLogsDetail.vue @@ -0,0 +1,70 @@ + + diff --git a/dashboard/src/views/bench/BenchRegions.vue b/dashboard/src/views/bench/BenchRegions.vue new file mode 100644 index 0000000..6edcca6 --- /dev/null +++ b/dashboard/src/views/bench/BenchRegions.vue @@ -0,0 +1,128 @@ + + + diff --git a/dashboard/src/views/bench/BenchSettings.vue b/dashboard/src/views/bench/BenchSettings.vue new file mode 100644 index 0000000..20233ee --- /dev/null +++ b/dashboard/src/views/bench/BenchSettings.vue @@ -0,0 +1,29 @@ + + + diff --git a/dashboard/src/views/bench/BenchSites.vue b/dashboard/src/views/bench/BenchSites.vue new file mode 100644 index 0000000..fa314c7 --- /dev/null +++ b/dashboard/src/views/bench/BenchSites.vue @@ -0,0 +1,557 @@ + + diff --git a/dashboard/src/views/bench/Benches.vue b/dashboard/src/views/bench/Benches.vue new file mode 100644 index 0000000..2402d84 --- /dev/null +++ b/dashboard/src/views/bench/Benches.vue @@ -0,0 +1,308 @@ + + + diff --git a/dashboard/src/views/bench/EditBenchTitleDialog.vue b/dashboard/src/views/bench/EditBenchTitleDialog.vue new file mode 100644 index 0000000..07e2c14 --- /dev/null +++ b/dashboard/src/views/bench/EditBenchTitleDialog.vue @@ -0,0 +1,63 @@ + + diff --git a/dashboard/src/views/bench/NewBench.vue b/dashboard/src/views/bench/NewBench.vue new file mode 100644 index 0000000..25fa35b --- /dev/null +++ b/dashboard/src/views/bench/NewBench.vue @@ -0,0 +1,248 @@ + + + diff --git a/dashboard/src/views/bench/NewBenchApp.vue b/dashboard/src/views/bench/NewBenchApp.vue new file mode 100644 index 0000000..da96394 --- /dev/null +++ b/dashboard/src/views/bench/NewBenchApp.vue @@ -0,0 +1,58 @@ + + + diff --git a/dashboard/src/views/billing/AccountBilling.vue b/dashboard/src/views/billing/AccountBilling.vue new file mode 100644 index 0000000..6972da0 --- /dev/null +++ b/dashboard/src/views/billing/AccountBilling.vue @@ -0,0 +1,57 @@ + + + diff --git a/dashboard/src/views/billing/AccountBillingCards.vue b/dashboard/src/views/billing/AccountBillingCards.vue new file mode 100644 index 0000000..ae798ce --- /dev/null +++ b/dashboard/src/views/billing/AccountBillingCards.vue @@ -0,0 +1,200 @@ + + + diff --git a/dashboard/src/views/billing/AccountBillingCreditBalance.vue b/dashboard/src/views/billing/AccountBillingCreditBalance.vue new file mode 100644 index 0000000..bcdc2c9 --- /dev/null +++ b/dashboard/src/views/billing/AccountBillingCreditBalance.vue @@ -0,0 +1,57 @@ + + diff --git a/dashboard/src/views/billing/AccountBillingDetails.vue b/dashboard/src/views/billing/AccountBillingDetails.vue new file mode 100644 index 0000000..219ae4b --- /dev/null +++ b/dashboard/src/views/billing/AccountBillingDetails.vue @@ -0,0 +1,55 @@ + + diff --git a/dashboard/src/views/billing/AccountBillingPayments.vue b/dashboard/src/views/billing/AccountBillingPayments.vue new file mode 100644 index 0000000..7b7b38e --- /dev/null +++ b/dashboard/src/views/billing/AccountBillingPayments.vue @@ -0,0 +1,187 @@ + + diff --git a/dashboard/src/views/billing/BillingOverview.vue b/dashboard/src/views/billing/BillingOverview.vue new file mode 100644 index 0000000..e31c551 --- /dev/null +++ b/dashboard/src/views/billing/BillingOverview.vue @@ -0,0 +1,25 @@ + + + diff --git a/dashboard/src/views/billing/BillingSummary.vue b/dashboard/src/views/billing/BillingSummary.vue new file mode 100644 index 0000000..ab28884 --- /dev/null +++ b/dashboard/src/views/billing/BillingSummary.vue @@ -0,0 +1,242 @@ + + diff --git a/dashboard/src/views/billing/FinalizeInvoicesDialog.vue b/dashboard/src/views/billing/FinalizeInvoicesDialog.vue new file mode 100644 index 0000000..c865fef --- /dev/null +++ b/dashboard/src/views/billing/FinalizeInvoicesDialog.vue @@ -0,0 +1,60 @@ + + + diff --git a/dashboard/src/views/billing/PaymentMethods.vue b/dashboard/src/views/billing/PaymentMethods.vue new file mode 100644 index 0000000..e9830f6 --- /dev/null +++ b/dashboard/src/views/billing/PaymentMethods.vue @@ -0,0 +1,25 @@ + + + diff --git a/dashboard/src/views/billing/UpcomingInvoiceSummary.vue b/dashboard/src/views/billing/UpcomingInvoiceSummary.vue new file mode 100644 index 0000000..f2a17ce --- /dev/null +++ b/dashboard/src/views/billing/UpcomingInvoiceSummary.vue @@ -0,0 +1,28 @@ + + diff --git a/dashboard/src/views/checkout/Checkout.vue b/dashboard/src/views/checkout/Checkout.vue new file mode 100644 index 0000000..9060cd3 --- /dev/null +++ b/dashboard/src/views/checkout/Checkout.vue @@ -0,0 +1,86 @@ + + + diff --git a/dashboard/src/views/checkout/CheckoutAddress.vue b/dashboard/src/views/checkout/CheckoutAddress.vue new file mode 100644 index 0000000..2c34569 --- /dev/null +++ b/dashboard/src/views/checkout/CheckoutAddress.vue @@ -0,0 +1,147 @@ + + + diff --git a/dashboard/src/views/checkout/CheckoutApps.vue b/dashboard/src/views/checkout/CheckoutApps.vue new file mode 100644 index 0000000..956f7ac --- /dev/null +++ b/dashboard/src/views/checkout/CheckoutApps.vue @@ -0,0 +1,40 @@ + + + diff --git a/dashboard/src/views/checkout/CheckoutPayment.vue b/dashboard/src/views/checkout/CheckoutPayment.vue new file mode 100644 index 0000000..9f14c34 --- /dev/null +++ b/dashboard/src/views/checkout/CheckoutPayment.vue @@ -0,0 +1,198 @@ + + + diff --git a/dashboard/src/views/checkout/CheckoutPlans.vue b/dashboard/src/views/checkout/CheckoutPlans.vue new file mode 100644 index 0000000..2b56622 --- /dev/null +++ b/dashboard/src/views/checkout/CheckoutPlans.vue @@ -0,0 +1,108 @@ + + + diff --git a/dashboard/src/views/checkout/PlanChangeSuccessful.vue b/dashboard/src/views/checkout/PlanChangeSuccessful.vue new file mode 100644 index 0000000..cc474bf --- /dev/null +++ b/dashboard/src/views/checkout/PlanChangeSuccessful.vue @@ -0,0 +1,19 @@ + + + diff --git a/dashboard/src/views/checkout/Subscription.vue b/dashboard/src/views/checkout/Subscription.vue new file mode 100644 index 0000000..0144da4 --- /dev/null +++ b/dashboard/src/views/checkout/Subscription.vue @@ -0,0 +1,177 @@ + + diff --git a/dashboard/src/views/general/404.vue b/dashboard/src/views/general/404.vue new file mode 100644 index 0000000..b4cfc14 --- /dev/null +++ b/dashboard/src/views/general/404.vue @@ -0,0 +1,20 @@ + + + diff --git a/dashboard/src/views/general/AgentJobs.vue b/dashboard/src/views/general/AgentJobs.vue new file mode 100644 index 0000000..85d440c --- /dev/null +++ b/dashboard/src/views/general/AgentJobs.vue @@ -0,0 +1,85 @@ + + diff --git a/dashboard/src/views/general/AnsiblePlays.vue b/dashboard/src/views/general/AnsiblePlays.vue new file mode 100644 index 0000000..b1a94ea --- /dev/null +++ b/dashboard/src/views/general/AnsiblePlays.vue @@ -0,0 +1,82 @@ + + diff --git a/dashboard/src/views/general/Home.vue b/dashboard/src/views/general/Home.vue new file mode 100644 index 0000000..e07b8f0 --- /dev/null +++ b/dashboard/src/views/general/Home.vue @@ -0,0 +1,49 @@ + + + diff --git a/dashboard/src/views/general/JobsDetail.vue b/dashboard/src/views/general/JobsDetail.vue new file mode 100644 index 0000000..9d3d69f --- /dev/null +++ b/dashboard/src/views/general/JobsDetail.vue @@ -0,0 +1,95 @@ + + diff --git a/dashboard/src/views/general/PlaysDetail.vue b/dashboard/src/views/general/PlaysDetail.vue new file mode 100644 index 0000000..b213b24 --- /dev/null +++ b/dashboard/src/views/general/PlaysDetail.vue @@ -0,0 +1,96 @@ + + diff --git a/dashboard/src/views/general/StepsDetail.vue b/dashboard/src/views/general/StepsDetail.vue new file mode 100644 index 0000000..99bd0c8 --- /dev/null +++ b/dashboard/src/views/general/StepsDetail.vue @@ -0,0 +1,139 @@ + + + + diff --git a/dashboard/src/views/marketplace/InstallMarketplaceApp.vue b/dashboard/src/views/marketplace/InstallMarketplaceApp.vue new file mode 100644 index 0000000..4240a02 --- /dev/null +++ b/dashboard/src/views/marketplace/InstallMarketplaceApp.vue @@ -0,0 +1,206 @@ + + + diff --git a/dashboard/src/views/marketplace/Marketplace.vue b/dashboard/src/views/marketplace/Marketplace.vue new file mode 100644 index 0000000..b1c19f3 --- /dev/null +++ b/dashboard/src/views/marketplace/Marketplace.vue @@ -0,0 +1,138 @@ + + + diff --git a/dashboard/src/views/marketplace/MarketplaceApp.vue b/dashboard/src/views/marketplace/MarketplaceApp.vue new file mode 100644 index 0000000..0131e7e --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplaceApp.vue @@ -0,0 +1,146 @@ + + + diff --git a/dashboard/src/views/marketplace/MarketplaceAppAnalytics.vue b/dashboard/src/views/marketplace/MarketplaceAppAnalytics.vue new file mode 100644 index 0000000..9c4943e --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplaceAppAnalytics.vue @@ -0,0 +1,253 @@ + + + diff --git a/dashboard/src/views/marketplace/MarketplaceAppDeployment.vue b/dashboard/src/views/marketplace/MarketplaceAppDeployment.vue new file mode 100644 index 0000000..19e83d0 --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplaceAppDeployment.vue @@ -0,0 +1,19 @@ + + + diff --git a/dashboard/src/views/marketplace/MarketplaceAppOverview.vue b/dashboard/src/views/marketplace/MarketplaceAppOverview.vue new file mode 100644 index 0000000..d13eebc --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplaceAppOverview.vue @@ -0,0 +1,28 @@ + + + diff --git a/dashboard/src/views/marketplace/MarketplaceAppPricing.vue b/dashboard/src/views/marketplace/MarketplaceAppPricing.vue new file mode 100644 index 0000000..c8d599e --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplaceAppPricing.vue @@ -0,0 +1,227 @@ + + + diff --git a/dashboard/src/views/marketplace/MarketplaceAppReview.vue b/dashboard/src/views/marketplace/MarketplaceAppReview.vue new file mode 100644 index 0000000..d23f366 --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplaceAppReview.vue @@ -0,0 +1,31 @@ + + + diff --git a/dashboard/src/views/marketplace/MarketplaceAppSubscriptions.vue b/dashboard/src/views/marketplace/MarketplaceAppSubscriptions.vue new file mode 100644 index 0000000..0c25769 --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplaceAppSubscriptions.vue @@ -0,0 +1,76 @@ + + + diff --git a/dashboard/src/views/marketplace/MarketplaceApps.vue b/dashboard/src/views/marketplace/MarketplaceApps.vue new file mode 100644 index 0000000..8b5b8dd --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplaceApps.vue @@ -0,0 +1,71 @@ + + + diff --git a/dashboard/src/views/marketplace/MarketplacePayoutDetails.vue b/dashboard/src/views/marketplace/MarketplacePayoutDetails.vue new file mode 100644 index 0000000..676c1dd --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplacePayoutDetails.vue @@ -0,0 +1,124 @@ + + diff --git a/dashboard/src/views/marketplace/MarketplacePayouts.vue b/dashboard/src/views/marketplace/MarketplacePayouts.vue new file mode 100644 index 0000000..9aa9fef --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplacePayouts.vue @@ -0,0 +1,88 @@ + + + diff --git a/dashboard/src/views/marketplace/MarketplacePublisherProfile.vue b/dashboard/src/views/marketplace/MarketplacePublisherProfile.vue new file mode 100644 index 0000000..ce38279 --- /dev/null +++ b/dashboard/src/views/marketplace/MarketplacePublisherProfile.vue @@ -0,0 +1,61 @@ + + + diff --git a/dashboard/src/views/marketplace/NewMarketplaceApp.vue b/dashboard/src/views/marketplace/NewMarketplaceApp.vue new file mode 100644 index 0000000..cd7773a --- /dev/null +++ b/dashboard/src/views/marketplace/NewMarketplaceApp.vue @@ -0,0 +1,96 @@ + + + diff --git a/dashboard/src/views/notifications/Notifications.vue b/dashboard/src/views/notifications/Notifications.vue new file mode 100644 index 0000000..e16770c --- /dev/null +++ b/dashboard/src/views/notifications/Notifications.vue @@ -0,0 +1,114 @@ + + + diff --git a/dashboard/src/views/onboarding/ComingSoon.vue b/dashboard/src/views/onboarding/ComingSoon.vue new file mode 100644 index 0000000..91b8e82 --- /dev/null +++ b/dashboard/src/views/onboarding/ComingSoon.vue @@ -0,0 +1,3 @@ + diff --git a/dashboard/src/views/onboarding/OnboardingStepCreateAccount.vue b/dashboard/src/views/onboarding/OnboardingStepCreateAccount.vue new file mode 100644 index 0000000..aa5e17d --- /dev/null +++ b/dashboard/src/views/onboarding/OnboardingStepCreateAccount.vue @@ -0,0 +1,25 @@ + + diff --git a/dashboard/src/views/onboarding/OnboardingStepCreateSite.vue b/dashboard/src/views/onboarding/OnboardingStepCreateSite.vue new file mode 100644 index 0000000..da35fbc --- /dev/null +++ b/dashboard/src/views/onboarding/OnboardingStepCreateSite.vue @@ -0,0 +1,28 @@ + + diff --git a/dashboard/src/views/onboarding/OnboardingStepSelectSitePlan.vue b/dashboard/src/views/onboarding/OnboardingStepSelectSitePlan.vue new file mode 100644 index 0000000..c1fbd72 --- /dev/null +++ b/dashboard/src/views/onboarding/OnboardingStepSelectSitePlan.vue @@ -0,0 +1,33 @@ + + diff --git a/dashboard/src/views/onboarding/OnboardingStepSetupPayment.vue b/dashboard/src/views/onboarding/OnboardingStepSetupPayment.vue new file mode 100644 index 0000000..4da11dd --- /dev/null +++ b/dashboard/src/views/onboarding/OnboardingStepSetupPayment.vue @@ -0,0 +1,183 @@ + + diff --git a/dashboard/src/views/onboarding/Support.vue b/dashboard/src/views/onboarding/Support.vue new file mode 100644 index 0000000..93d352c --- /dev/null +++ b/dashboard/src/views/onboarding/Support.vue @@ -0,0 +1,19 @@ + + + diff --git a/dashboard/src/views/onboarding/UserPrompts.vue b/dashboard/src/views/onboarding/UserPrompts.vue new file mode 100644 index 0000000..3772efd --- /dev/null +++ b/dashboard/src/views/onboarding/UserPrompts.vue @@ -0,0 +1,50 @@ + + + diff --git a/dashboard/src/views/onboarding/Welcome.vue b/dashboard/src/views/onboarding/Welcome.vue new file mode 100644 index 0000000..fe4acc3 --- /dev/null +++ b/dashboard/src/views/onboarding/Welcome.vue @@ -0,0 +1,57 @@ + + + diff --git a/dashboard/src/views/partials/LoginBox.vue b/dashboard/src/views/partials/LoginBox.vue new file mode 100644 index 0000000..8a52080 --- /dev/null +++ b/dashboard/src/views/partials/LoginBox.vue @@ -0,0 +1,64 @@ + + + diff --git a/dashboard/src/views/security/FirewallOverview.vue b/dashboard/src/views/security/FirewallOverview.vue new file mode 100644 index 0000000..7445317 --- /dev/null +++ b/dashboard/src/views/security/FirewallOverview.vue @@ -0,0 +1,12 @@ + + + diff --git a/dashboard/src/views/security/InfoSection.vue b/dashboard/src/views/security/InfoSection.vue new file mode 100644 index 0000000..7e8f643 --- /dev/null +++ b/dashboard/src/views/security/InfoSection.vue @@ -0,0 +1,70 @@ + + + + diff --git a/dashboard/src/views/security/SSHSession.vue b/dashboard/src/views/security/SSHSession.vue new file mode 100644 index 0000000..6d1cb73 --- /dev/null +++ b/dashboard/src/views/security/SSHSession.vue @@ -0,0 +1,74 @@ + + + diff --git a/dashboard/src/views/security/SSHSessionActivity.vue b/dashboard/src/views/security/SSHSessionActivity.vue new file mode 100644 index 0000000..865313a --- /dev/null +++ b/dashboard/src/views/security/SSHSessionActivity.vue @@ -0,0 +1,74 @@ + + + diff --git a/dashboard/src/views/security/SSHSessionOverview.vue b/dashboard/src/views/security/SSHSessionOverview.vue new file mode 100644 index 0000000..1b3311c --- /dev/null +++ b/dashboard/src/views/security/SSHSessionOverview.vue @@ -0,0 +1,71 @@ + + + diff --git a/dashboard/src/views/security/Security.vue b/dashboard/src/views/security/Security.vue new file mode 100644 index 0000000..727ec28 --- /dev/null +++ b/dashboard/src/views/security/Security.vue @@ -0,0 +1,119 @@ + + + diff --git a/dashboard/src/views/security/SecurityOverview.vue b/dashboard/src/views/security/SecurityOverview.vue new file mode 100644 index 0000000..f5feb6b --- /dev/null +++ b/dashboard/src/views/security/SecurityOverview.vue @@ -0,0 +1,17 @@ + + + diff --git a/dashboard/src/views/security/SecurityUpdateInfo.vue b/dashboard/src/views/security/SecurityUpdateInfo.vue new file mode 100644 index 0000000..e2f15cb --- /dev/null +++ b/dashboard/src/views/security/SecurityUpdateInfo.vue @@ -0,0 +1,96 @@ + + diff --git a/dashboard/src/views/security/SecurityUpdates.vue b/dashboard/src/views/security/SecurityUpdates.vue new file mode 100644 index 0000000..c751ce5 --- /dev/null +++ b/dashboard/src/views/security/SecurityUpdates.vue @@ -0,0 +1,103 @@ + + diff --git a/dashboard/src/views/security/SecurityUpdatesOverview.vue b/dashboard/src/views/security/SecurityUpdatesOverview.vue new file mode 100644 index 0000000..cb30e1b --- /dev/null +++ b/dashboard/src/views/security/SecurityUpdatesOverview.vue @@ -0,0 +1,83 @@ + + diff --git a/dashboard/src/views/security/Servers.vue b/dashboard/src/views/security/Servers.vue new file mode 100644 index 0000000..4021d94 --- /dev/null +++ b/dashboard/src/views/security/Servers.vue @@ -0,0 +1,210 @@ + + + diff --git a/dashboard/src/views/server/EditServerTitleDialog.vue b/dashboard/src/views/server/EditServerTitleDialog.vue new file mode 100644 index 0000000..2fa628e --- /dev/null +++ b/dashboard/src/views/server/EditServerTitleDialog.vue @@ -0,0 +1,63 @@ + + diff --git a/dashboard/src/views/server/NewAppServerPlans.vue b/dashboard/src/views/server/NewAppServerPlans.vue new file mode 100644 index 0000000..83d0b9e --- /dev/null +++ b/dashboard/src/views/server/NewAppServerPlans.vue @@ -0,0 +1,40 @@ + + diff --git a/dashboard/src/views/server/NewDBServerPlans.vue b/dashboard/src/views/server/NewDBServerPlans.vue new file mode 100644 index 0000000..d7e6f4b --- /dev/null +++ b/dashboard/src/views/server/NewDBServerPlans.vue @@ -0,0 +1,40 @@ + + diff --git a/dashboard/src/views/server/NewSelfHostedServer.vue b/dashboard/src/views/server/NewSelfHostedServer.vue new file mode 100644 index 0000000..cd62982 --- /dev/null +++ b/dashboard/src/views/server/NewSelfHostedServer.vue @@ -0,0 +1,235 @@ + + + diff --git a/dashboard/src/views/server/NewSelfHostedServerForm.vue b/dashboard/src/views/server/NewSelfHostedServerForm.vue new file mode 100644 index 0000000..6321a18 --- /dev/null +++ b/dashboard/src/views/server/NewSelfHostedServerForm.vue @@ -0,0 +1,104 @@ + + diff --git a/dashboard/src/views/server/NewSelfHostedServerHostname.vue b/dashboard/src/views/server/NewSelfHostedServerHostname.vue new file mode 100644 index 0000000..c5dea36 --- /dev/null +++ b/dashboard/src/views/server/NewSelfHostedServerHostname.vue @@ -0,0 +1,46 @@ + + diff --git a/dashboard/src/views/server/NewServer.vue b/dashboard/src/views/server/NewServer.vue new file mode 100644 index 0000000..e13d066 --- /dev/null +++ b/dashboard/src/views/server/NewServer.vue @@ -0,0 +1,201 @@ + + + diff --git a/dashboard/src/views/server/NewServerHostname.vue b/dashboard/src/views/server/NewServerHostname.vue new file mode 100644 index 0000000..65bc11b --- /dev/null +++ b/dashboard/src/views/server/NewServerHostname.vue @@ -0,0 +1,80 @@ + + diff --git a/dashboard/src/views/server/NewVerifyServer.vue b/dashboard/src/views/server/NewVerifyServer.vue new file mode 100644 index 0000000..6f4d0ea --- /dev/null +++ b/dashboard/src/views/server/NewVerifyServer.vue @@ -0,0 +1,86 @@ + + diff --git a/dashboard/src/views/server/SelfHostedServerPlan.vue b/dashboard/src/views/server/SelfHostedServerPlan.vue new file mode 100644 index 0000000..52721e9 --- /dev/null +++ b/dashboard/src/views/server/SelfHostedServerPlan.vue @@ -0,0 +1,37 @@ + + diff --git a/dashboard/src/views/server/SelfHostedServerVerify.vue b/dashboard/src/views/server/SelfHostedServerVerify.vue new file mode 100644 index 0000000..554b362 --- /dev/null +++ b/dashboard/src/views/server/SelfHostedServerVerify.vue @@ -0,0 +1,23 @@ + + diff --git a/dashboard/src/views/server/Server.vue b/dashboard/src/views/server/Server.vue new file mode 100644 index 0000000..79b3a96 --- /dev/null +++ b/dashboard/src/views/server/Server.vue @@ -0,0 +1,282 @@ + + + diff --git a/dashboard/src/views/server/ServerAnalytics.vue b/dashboard/src/views/server/ServerAnalytics.vue new file mode 100644 index 0000000..bf148cc --- /dev/null +++ b/dashboard/src/views/server/ServerAnalytics.vue @@ -0,0 +1,317 @@ + + + diff --git a/dashboard/src/views/server/ServerBenches.vue b/dashboard/src/views/server/ServerBenches.vue new file mode 100644 index 0000000..92966e2 --- /dev/null +++ b/dashboard/src/views/server/ServerBenches.vue @@ -0,0 +1,147 @@ + + diff --git a/dashboard/src/views/server/ServerDrop.vue b/dashboard/src/views/server/ServerDrop.vue new file mode 100644 index 0000000..0493158 --- /dev/null +++ b/dashboard/src/views/server/ServerDrop.vue @@ -0,0 +1,71 @@ + + + diff --git a/dashboard/src/views/server/ServerInstall.vue b/dashboard/src/views/server/ServerInstall.vue new file mode 100644 index 0000000..1bb0c71 --- /dev/null +++ b/dashboard/src/views/server/ServerInstall.vue @@ -0,0 +1,97 @@ + + + + + diff --git a/dashboard/src/views/server/ServerJobs.vue b/dashboard/src/views/server/ServerJobs.vue new file mode 100644 index 0000000..0b5f601 --- /dev/null +++ b/dashboard/src/views/server/ServerJobs.vue @@ -0,0 +1,36 @@ + + diff --git a/dashboard/src/views/server/ServerOverview.vue b/dashboard/src/views/server/ServerOverview.vue new file mode 100644 index 0000000..b4eebe5 --- /dev/null +++ b/dashboard/src/views/server/ServerOverview.vue @@ -0,0 +1,30 @@ + + + diff --git a/dashboard/src/views/server/ServerOverviewInfo.vue b/dashboard/src/views/server/ServerOverviewInfo.vue new file mode 100644 index 0000000..6cfbb08 --- /dev/null +++ b/dashboard/src/views/server/ServerOverviewInfo.vue @@ -0,0 +1,89 @@ + + diff --git a/dashboard/src/views/server/ServerOverviewPlan.vue b/dashboard/src/views/server/ServerOverviewPlan.vue new file mode 100644 index 0000000..1068071 --- /dev/null +++ b/dashboard/src/views/server/ServerOverviewPlan.vue @@ -0,0 +1,198 @@ + + diff --git a/dashboard/src/views/server/ServerPlays.vue b/dashboard/src/views/server/ServerPlays.vue new file mode 100644 index 0000000..231756d --- /dev/null +++ b/dashboard/src/views/server/ServerPlays.vue @@ -0,0 +1,35 @@ + + diff --git a/dashboard/src/views/server/ServerSettings.vue b/dashboard/src/views/server/ServerSettings.vue new file mode 100644 index 0000000..2371190 --- /dev/null +++ b/dashboard/src/views/server/ServerSettings.vue @@ -0,0 +1,23 @@ + + + diff --git a/dashboard/src/views/server/Servers.vue b/dashboard/src/views/server/Servers.vue new file mode 100644 index 0000000..9d4de14 --- /dev/null +++ b/dashboard/src/views/server/Servers.vue @@ -0,0 +1,332 @@ + + diff --git a/dashboard/src/views/settings/AccountAPI.vue b/dashboard/src/views/settings/AccountAPI.vue new file mode 100644 index 0000000..f2774c9 --- /dev/null +++ b/dashboard/src/views/settings/AccountAPI.vue @@ -0,0 +1,117 @@ + + diff --git a/dashboard/src/views/settings/AccountEmails.vue b/dashboard/src/views/settings/AccountEmails.vue new file mode 100644 index 0000000..eb08c35 --- /dev/null +++ b/dashboard/src/views/settings/AccountEmails.vue @@ -0,0 +1,81 @@ + + diff --git a/dashboard/src/views/settings/AccountGroups.vue b/dashboard/src/views/settings/AccountGroups.vue new file mode 100644 index 0000000..ff5d05a --- /dev/null +++ b/dashboard/src/views/settings/AccountGroups.vue @@ -0,0 +1,193 @@ + + diff --git a/dashboard/src/views/settings/AccountMembers.vue b/dashboard/src/views/settings/AccountMembers.vue new file mode 100644 index 0000000..536e8f4 --- /dev/null +++ b/dashboard/src/views/settings/AccountMembers.vue @@ -0,0 +1,174 @@ + + diff --git a/dashboard/src/views/settings/AccountPartner.vue b/dashboard/src/views/settings/AccountPartner.vue new file mode 100644 index 0000000..5a96aea --- /dev/null +++ b/dashboard/src/views/settings/AccountPartner.vue @@ -0,0 +1,123 @@ + + diff --git a/dashboard/src/views/settings/AccountProfile.vue b/dashboard/src/views/settings/AccountProfile.vue new file mode 100644 index 0000000..968602a --- /dev/null +++ b/dashboard/src/views/settings/AccountProfile.vue @@ -0,0 +1,275 @@ + + diff --git a/dashboard/src/views/settings/AccountReferral.vue b/dashboard/src/views/settings/AccountReferral.vue new file mode 100644 index 0000000..759cd14 --- /dev/null +++ b/dashboard/src/views/settings/AccountReferral.vue @@ -0,0 +1,42 @@ + + diff --git a/dashboard/src/views/settings/AccountSSHKey.vue b/dashboard/src/views/settings/AccountSSHKey.vue new file mode 100644 index 0000000..4600cd0 --- /dev/null +++ b/dashboard/src/views/settings/AccountSSHKey.vue @@ -0,0 +1,90 @@ + + diff --git a/dashboard/src/views/settings/AccountSettings.vue b/dashboard/src/views/settings/AccountSettings.vue new file mode 100644 index 0000000..1c71724 --- /dev/null +++ b/dashboard/src/views/settings/AccountSettings.vue @@ -0,0 +1,89 @@ + + + diff --git a/dashboard/src/views/settings/AccountTeam.vue b/dashboard/src/views/settings/AccountTeam.vue new file mode 100644 index 0000000..211a827 --- /dev/null +++ b/dashboard/src/views/settings/AccountTeam.vue @@ -0,0 +1,187 @@ + + + diff --git a/dashboard/src/views/settings/DeveloperSettings.vue b/dashboard/src/views/settings/DeveloperSettings.vue new file mode 100644 index 0000000..ec89814 --- /dev/null +++ b/dashboard/src/views/settings/DeveloperSettings.vue @@ -0,0 +1,27 @@ + + + diff --git a/dashboard/src/views/settings/EditPermissions.vue b/dashboard/src/views/settings/EditPermissions.vue new file mode 100644 index 0000000..9665805 --- /dev/null +++ b/dashboard/src/views/settings/EditPermissions.vue @@ -0,0 +1,187 @@ + + + diff --git a/dashboard/src/views/settings/FeatureFlags.vue b/dashboard/src/views/settings/FeatureFlags.vue new file mode 100644 index 0000000..eaadb31 --- /dev/null +++ b/dashboard/src/views/settings/FeatureFlags.vue @@ -0,0 +1,75 @@ + + diff --git a/dashboard/src/views/settings/ManageGroupMembers.vue b/dashboard/src/views/settings/ManageGroupMembers.vue new file mode 100644 index 0000000..89e0349 --- /dev/null +++ b/dashboard/src/views/settings/ManageGroupMembers.vue @@ -0,0 +1,117 @@ + + + diff --git a/dashboard/src/views/settings/PartnerCustomers.vue b/dashboard/src/views/settings/PartnerCustomers.vue new file mode 100644 index 0000000..7113837 --- /dev/null +++ b/dashboard/src/views/settings/PartnerCustomers.vue @@ -0,0 +1,56 @@ + + diff --git a/dashboard/src/views/settings/PartnerReferral.vue b/dashboard/src/views/settings/PartnerReferral.vue new file mode 100644 index 0000000..23fd75e --- /dev/null +++ b/dashboard/src/views/settings/PartnerReferral.vue @@ -0,0 +1,37 @@ + + + diff --git a/dashboard/src/views/settings/PartnerRequestStatus.vue b/dashboard/src/views/settings/PartnerRequestStatus.vue new file mode 100644 index 0000000..0a83862 --- /dev/null +++ b/dashboard/src/views/settings/PartnerRequestStatus.vue @@ -0,0 +1,113 @@ + + diff --git a/dashboard/src/views/settings/PartnerSettings.vue b/dashboard/src/views/settings/PartnerSettings.vue new file mode 100644 index 0000000..0cfe176 --- /dev/null +++ b/dashboard/src/views/settings/PartnerSettings.vue @@ -0,0 +1,30 @@ + + + diff --git a/dashboard/src/views/settings/ProfileSettings.vue b/dashboard/src/views/settings/ProfileSettings.vue new file mode 100644 index 0000000..0245999 --- /dev/null +++ b/dashboard/src/views/settings/ProfileSettings.vue @@ -0,0 +1,27 @@ + + + diff --git a/dashboard/src/views/settings/TeamSettings.vue b/dashboard/src/views/settings/TeamSettings.vue new file mode 100644 index 0000000..7b10e25 --- /dev/null +++ b/dashboard/src/views/settings/TeamSettings.vue @@ -0,0 +1,30 @@ + + + diff --git a/dashboard/src/views/site/AppSiteSetup.vue b/dashboard/src/views/site/AppSiteSetup.vue new file mode 100644 index 0000000..ee6afe8 --- /dev/null +++ b/dashboard/src/views/site/AppSiteSetup.vue @@ -0,0 +1,155 @@ + + + diff --git a/dashboard/src/views/site/DatabaseAccessDialog.vue b/dashboard/src/views/site/DatabaseAccessDialog.vue new file mode 100644 index 0000000..4f9ad98 --- /dev/null +++ b/dashboard/src/views/site/DatabaseAccessDialog.vue @@ -0,0 +1,322 @@ + + + diff --git a/dashboard/src/views/site/NewSite.vue b/dashboard/src/views/site/NewSite.vue new file mode 100644 index 0000000..0a26562 --- /dev/null +++ b/dashboard/src/views/site/NewSite.vue @@ -0,0 +1,334 @@ + + + diff --git a/dashboard/src/views/site/NewSiteApps.vue b/dashboard/src/views/site/NewSiteApps.vue new file mode 100644 index 0000000..ce000e3 --- /dev/null +++ b/dashboard/src/views/site/NewSiteApps.vue @@ -0,0 +1,241 @@ + + diff --git a/dashboard/src/views/site/NewSiteHostname.vue b/dashboard/src/views/site/NewSiteHostname.vue new file mode 100644 index 0000000..0198920 --- /dev/null +++ b/dashboard/src/views/site/NewSiteHostname.vue @@ -0,0 +1,84 @@ + + diff --git a/dashboard/src/views/site/NewSitePlans.vue b/dashboard/src/views/site/NewSitePlans.vue new file mode 100644 index 0000000..9098353 --- /dev/null +++ b/dashboard/src/views/site/NewSitePlans.vue @@ -0,0 +1,53 @@ + + diff --git a/dashboard/src/views/site/NewSiteRestore.vue b/dashboard/src/views/site/NewSiteRestore.vue new file mode 100644 index 0000000..edb98fb --- /dev/null +++ b/dashboard/src/views/site/NewSiteRestore.vue @@ -0,0 +1,267 @@ + + diff --git a/dashboard/src/views/site/Site.vue b/dashboard/src/views/site/Site.vue new file mode 100644 index 0000000..b4c41e0 --- /dev/null +++ b/dashboard/src/views/site/Site.vue @@ -0,0 +1,503 @@ + + + diff --git a/dashboard/src/views/site/SiteActivity.vue b/dashboard/src/views/site/SiteActivity.vue new file mode 100644 index 0000000..7911b7a --- /dev/null +++ b/dashboard/src/views/site/SiteActivity.vue @@ -0,0 +1,110 @@ + + + diff --git a/dashboard/src/views/site/SiteAlerts.vue b/dashboard/src/views/site/SiteAlerts.vue new file mode 100644 index 0000000..9b5d348 --- /dev/null +++ b/dashboard/src/views/site/SiteAlerts.vue @@ -0,0 +1,239 @@ + + + diff --git a/dashboard/src/views/site/SiteAnalyticsUptime.vue b/dashboard/src/views/site/SiteAnalyticsUptime.vue new file mode 100644 index 0000000..018155c --- /dev/null +++ b/dashboard/src/views/site/SiteAnalyticsUptime.vue @@ -0,0 +1,66 @@ + + diff --git a/dashboard/src/views/site/SiteAppsAndSubscriptions.vue b/dashboard/src/views/site/SiteAppsAndSubscriptions.vue new file mode 100644 index 0000000..39c0a93 --- /dev/null +++ b/dashboard/src/views/site/SiteAppsAndSubscriptions.vue @@ -0,0 +1,448 @@ + + diff --git a/dashboard/src/views/site/SiteAutoUpdate.vue b/dashboard/src/views/site/SiteAutoUpdate.vue new file mode 100644 index 0000000..e86bb37 --- /dev/null +++ b/dashboard/src/views/site/SiteAutoUpdate.vue @@ -0,0 +1,329 @@ + + + diff --git a/dashboard/src/views/site/SiteBinaryLogs.vue b/dashboard/src/views/site/SiteBinaryLogs.vue new file mode 100644 index 0000000..e9331dc --- /dev/null +++ b/dashboard/src/views/site/SiteBinaryLogs.vue @@ -0,0 +1,152 @@ + + + diff --git a/dashboard/src/views/site/SiteChangeGroupDialog.vue b/dashboard/src/views/site/SiteChangeGroupDialog.vue new file mode 100644 index 0000000..aad9a7d --- /dev/null +++ b/dashboard/src/views/site/SiteChangeGroupDialog.vue @@ -0,0 +1,177 @@ + + + diff --git a/dashboard/src/views/site/SiteChangeRegionDialog.vue b/dashboard/src/views/site/SiteChangeRegionDialog.vue new file mode 100644 index 0000000..13703b7 --- /dev/null +++ b/dashboard/src/views/site/SiteChangeRegionDialog.vue @@ -0,0 +1,154 @@ + + + diff --git a/dashboard/src/views/site/SiteChangeServerDialog.vue b/dashboard/src/views/site/SiteChangeServerDialog.vue new file mode 100644 index 0000000..f6ad894 --- /dev/null +++ b/dashboard/src/views/site/SiteChangeServerDialog.vue @@ -0,0 +1,200 @@ + + + diff --git a/dashboard/src/views/site/SiteCharts.vue b/dashboard/src/views/site/SiteCharts.vue new file mode 100644 index 0000000..c58c2ed --- /dev/null +++ b/dashboard/src/views/site/SiteCharts.vue @@ -0,0 +1,318 @@ + + + diff --git a/dashboard/src/views/site/SiteConfig.vue b/dashboard/src/views/site/SiteConfig.vue new file mode 100644 index 0000000..660723e --- /dev/null +++ b/dashboard/src/views/site/SiteConfig.vue @@ -0,0 +1,47 @@ + + + diff --git a/dashboard/src/views/site/SiteConsole.vue b/dashboard/src/views/site/SiteConsole.vue new file mode 100644 index 0000000..92ebc2d --- /dev/null +++ b/dashboard/src/views/site/SiteConsole.vue @@ -0,0 +1,21 @@ + + + diff --git a/dashboard/src/views/site/SiteDatabase.vue b/dashboard/src/views/site/SiteDatabase.vue new file mode 100644 index 0000000..50f0a15 --- /dev/null +++ b/dashboard/src/views/site/SiteDatabase.vue @@ -0,0 +1,17 @@ + + + diff --git a/dashboard/src/views/site/SiteDatabaseBackups.vue b/dashboard/src/views/site/SiteDatabaseBackups.vue new file mode 100644 index 0000000..7547fd2 --- /dev/null +++ b/dashboard/src/views/site/SiteDatabaseBackups.vue @@ -0,0 +1,331 @@ + + diff --git a/dashboard/src/views/site/SiteDatabaseRestore.vue b/dashboard/src/views/site/SiteDatabaseRestore.vue new file mode 100644 index 0000000..b039d63 --- /dev/null +++ b/dashboard/src/views/site/SiteDatabaseRestore.vue @@ -0,0 +1,411 @@ + + + diff --git a/dashboard/src/views/site/SiteDeadlockReport.vue b/dashboard/src/views/site/SiteDeadlockReport.vue new file mode 100644 index 0000000..6988a15 --- /dev/null +++ b/dashboard/src/views/site/SiteDeadlockReport.vue @@ -0,0 +1,147 @@ + + diff --git a/dashboard/src/views/site/SiteDrop.vue b/dashboard/src/views/site/SiteDrop.vue new file mode 100644 index 0000000..c5fe3ba --- /dev/null +++ b/dashboard/src/views/site/SiteDrop.vue @@ -0,0 +1,82 @@ + + + diff --git a/dashboard/src/views/site/SiteInstalling.vue b/dashboard/src/views/site/SiteInstalling.vue new file mode 100644 index 0000000..9820698 --- /dev/null +++ b/dashboard/src/views/site/SiteInstalling.vue @@ -0,0 +1,88 @@ + + + + + diff --git a/dashboard/src/views/site/SiteJobs.vue b/dashboard/src/views/site/SiteJobs.vue new file mode 100644 index 0000000..bf7bbdc --- /dev/null +++ b/dashboard/src/views/site/SiteJobs.vue @@ -0,0 +1,36 @@ + + diff --git a/dashboard/src/views/site/SiteList.vue b/dashboard/src/views/site/SiteList.vue new file mode 100644 index 0000000..15944f5 --- /dev/null +++ b/dashboard/src/views/site/SiteList.vue @@ -0,0 +1,60 @@ + + + diff --git a/dashboard/src/views/site/SiteLogs.vue b/dashboard/src/views/site/SiteLogs.vue new file mode 100644 index 0000000..32b91be --- /dev/null +++ b/dashboard/src/views/site/SiteLogs.vue @@ -0,0 +1,55 @@ + + + diff --git a/dashboard/src/views/site/SiteLogsDetail.vue b/dashboard/src/views/site/SiteLogsDetail.vue new file mode 100644 index 0000000..dfbffdc --- /dev/null +++ b/dashboard/src/views/site/SiteLogsDetail.vue @@ -0,0 +1,71 @@ + + diff --git a/dashboard/src/views/site/SiteMariaDBProcessList.vue b/dashboard/src/views/site/SiteMariaDBProcessList.vue new file mode 100644 index 0000000..2bf1055 --- /dev/null +++ b/dashboard/src/views/site/SiteMariaDBProcessList.vue @@ -0,0 +1,110 @@ + + diff --git a/dashboard/src/views/site/SiteMariaDBSlowQueries.vue b/dashboard/src/views/site/SiteMariaDBSlowQueries.vue new file mode 100644 index 0000000..5b7fa79 --- /dev/null +++ b/dashboard/src/views/site/SiteMariaDBSlowQueries.vue @@ -0,0 +1,150 @@ + + diff --git a/dashboard/src/views/site/SiteMonitorsList.vue b/dashboard/src/views/site/SiteMonitorsList.vue new file mode 100644 index 0000000..4c92498 --- /dev/null +++ b/dashboard/src/views/site/SiteMonitorsList.vue @@ -0,0 +1,61 @@ + + diff --git a/dashboard/src/views/site/SiteOverview.vue b/dashboard/src/views/site/SiteOverview.vue new file mode 100644 index 0000000..fe2ae46 --- /dev/null +++ b/dashboard/src/views/site/SiteOverview.vue @@ -0,0 +1,25 @@ + + + diff --git a/dashboard/src/views/site/SiteOverviewCPUUsage.vue b/dashboard/src/views/site/SiteOverviewCPUUsage.vue new file mode 100644 index 0000000..c012647 --- /dev/null +++ b/dashboard/src/views/site/SiteOverviewCPUUsage.vue @@ -0,0 +1,71 @@ + + diff --git a/dashboard/src/views/site/SiteOverviewDomains.vue b/dashboard/src/views/site/SiteOverviewDomains.vue new file mode 100644 index 0000000..aaee96a --- /dev/null +++ b/dashboard/src/views/site/SiteOverviewDomains.vue @@ -0,0 +1,395 @@ + + + diff --git a/dashboard/src/views/site/SiteOverviewInfo.vue b/dashboard/src/views/site/SiteOverviewInfo.vue new file mode 100644 index 0000000..92c211a --- /dev/null +++ b/dashboard/src/views/site/SiteOverviewInfo.vue @@ -0,0 +1,212 @@ + + diff --git a/dashboard/src/views/site/SiteOverviewPlan.vue b/dashboard/src/views/site/SiteOverviewPlan.vue new file mode 100644 index 0000000..3986132 --- /dev/null +++ b/dashboard/src/views/site/SiteOverviewPlan.vue @@ -0,0 +1,284 @@ + + diff --git a/dashboard/src/views/site/SitePlansDialog.vue b/dashboard/src/views/site/SitePlansDialog.vue new file mode 100644 index 0000000..f96e115 --- /dev/null +++ b/dashboard/src/views/site/SitePlansDialog.vue @@ -0,0 +1,140 @@ + + + diff --git a/dashboard/src/views/site/SiteRequestLogs.vue b/dashboard/src/views/site/SiteRequestLogs.vue new file mode 100644 index 0000000..5ca9d4f --- /dev/null +++ b/dashboard/src/views/site/SiteRequestLogs.vue @@ -0,0 +1,155 @@ + + + diff --git a/dashboard/src/views/site/SiteSettings.vue b/dashboard/src/views/site/SiteSettings.vue new file mode 100644 index 0000000..5a71826 --- /dev/null +++ b/dashboard/src/views/site/SiteSettings.vue @@ -0,0 +1,23 @@ + + + diff --git a/dashboard/src/views/site/SiteTransferDialog.vue b/dashboard/src/views/site/SiteTransferDialog.vue new file mode 100644 index 0000000..e61e2c1 --- /dev/null +++ b/dashboard/src/views/site/SiteTransferDialog.vue @@ -0,0 +1,86 @@ + + + diff --git a/dashboard/src/views/site/SiteVersionUpgradeDialog.vue b/dashboard/src/views/site/SiteVersionUpgradeDialog.vue new file mode 100644 index 0000000..d8eab29 --- /dev/null +++ b/dashboard/src/views/site/SiteVersionUpgradeDialog.vue @@ -0,0 +1,237 @@ + + + diff --git a/dashboard/src/views/site/Sites.vue b/dashboard/src/views/site/Sites.vue new file mode 100644 index 0000000..34ff82d --- /dev/null +++ b/dashboard/src/views/site/Sites.vue @@ -0,0 +1,572 @@ + + diff --git a/dashboard/src/views/spaces/CodeServer.vue b/dashboard/src/views/spaces/CodeServer.vue new file mode 100644 index 0000000..345a6bf --- /dev/null +++ b/dashboard/src/views/spaces/CodeServer.vue @@ -0,0 +1,178 @@ + + + diff --git a/dashboard/src/views/spaces/CodeServerJobs.vue b/dashboard/src/views/spaces/CodeServerJobs.vue new file mode 100644 index 0000000..68bd95e --- /dev/null +++ b/dashboard/src/views/spaces/CodeServerJobs.vue @@ -0,0 +1,36 @@ + + diff --git a/dashboard/src/views/spaces/CodeServerOverview.vue b/dashboard/src/views/spaces/CodeServerOverview.vue new file mode 100644 index 0000000..cffef02 --- /dev/null +++ b/dashboard/src/views/spaces/CodeServerOverview.vue @@ -0,0 +1,21 @@ + + + diff --git a/dashboard/src/views/spaces/CodeServerOverviewInfo.vue b/dashboard/src/views/spaces/CodeServerOverviewInfo.vue new file mode 100644 index 0000000..5036a2b --- /dev/null +++ b/dashboard/src/views/spaces/CodeServerOverviewInfo.vue @@ -0,0 +1,167 @@ + + + diff --git a/dashboard/src/views/spaces/CodeServersList.vue b/dashboard/src/views/spaces/CodeServersList.vue new file mode 100644 index 0000000..9dd5ec7 --- /dev/null +++ b/dashboard/src/views/spaces/CodeServersList.vue @@ -0,0 +1,52 @@ + + + diff --git a/dashboard/src/views/spaces/CreateCodeServerDialog.vue b/dashboard/src/views/spaces/CreateCodeServerDialog.vue new file mode 100644 index 0000000..9138d42 --- /dev/null +++ b/dashboard/src/views/spaces/CreateCodeServerDialog.vue @@ -0,0 +1,122 @@ + + + diff --git a/dashboard/src/views/spaces/NewCodeServer.vue b/dashboard/src/views/spaces/NewCodeServer.vue new file mode 100644 index 0000000..ae6fdc2 --- /dev/null +++ b/dashboard/src/views/spaces/NewCodeServer.vue @@ -0,0 +1,131 @@ + + + diff --git a/dashboard/src/views/spaces/NewCodeServerBench.vue b/dashboard/src/views/spaces/NewCodeServerBench.vue new file mode 100644 index 0000000..66306ae --- /dev/null +++ b/dashboard/src/views/spaces/NewCodeServerBench.vue @@ -0,0 +1,71 @@ + + + diff --git a/dashboard/src/views/spaces/NewCodeServerGroup.vue b/dashboard/src/views/spaces/NewCodeServerGroup.vue new file mode 100644 index 0000000..aee1c59 --- /dev/null +++ b/dashboard/src/views/spaces/NewCodeServerGroup.vue @@ -0,0 +1,77 @@ + + + diff --git a/dashboard/src/views/spaces/NewCodeServerHostname.vue b/dashboard/src/views/spaces/NewCodeServerHostname.vue new file mode 100644 index 0000000..21a1853 --- /dev/null +++ b/dashboard/src/views/spaces/NewCodeServerHostname.vue @@ -0,0 +1,81 @@ + + diff --git a/dashboard/src/views/spaces/Spaces.vue b/dashboard/src/views/spaces/Spaces.vue new file mode 100644 index 0000000..6b9f5c8 --- /dev/null +++ b/dashboard/src/views/spaces/Spaces.vue @@ -0,0 +1,208 @@ + + + diff --git a/dashboard/src/views/spaces/SpacesList.vue b/dashboard/src/views/spaces/SpacesList.vue new file mode 100644 index 0000000..a86923e --- /dev/null +++ b/dashboard/src/views/spaces/SpacesList.vue @@ -0,0 +1,18 @@ + + + diff --git a/dashboard/src2/App.vue b/dashboard/src2/App.vue new file mode 100644 index 0000000..cf36025 --- /dev/null +++ b/dashboard/src2/App.vue @@ -0,0 +1,98 @@ + + + + + diff --git a/dashboard/src2/components/ActionButton.vue b/dashboard/src2/components/ActionButton.vue new file mode 100644 index 0000000..b7321dd --- /dev/null +++ b/dashboard/src2/components/ActionButton.vue @@ -0,0 +1,50 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/ActiveServersDialog.vue b/dashboard/src2/components/ActiveServersDialog.vue new file mode 100644 index 0000000..c0776ab --- /dev/null +++ b/dashboard/src2/components/ActiveServersDialog.vue @@ -0,0 +1,43 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AddDomainDialog.vue b/dashboard/src2/components/AddDomainDialog.vue new file mode 100644 index 0000000..b714010 --- /dev/null +++ b/dashboard/src2/components/AddDomainDialog.vue @@ -0,0 +1,182 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AddTagDialog.vue b/dashboard/src2/components/AddTagDialog.vue new file mode 100644 index 0000000..804a65a --- /dev/null +++ b/dashboard/src2/components/AddTagDialog.vue @@ -0,0 +1,91 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AddressForm.vue b/dashboard/src2/components/AddressForm.vue new file mode 100644 index 0000000..bbaeadd --- /dev/null +++ b/dashboard/src2/components/AddressForm.vue @@ -0,0 +1,175 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/AddressableErrorDialog.vue b/dashboard/src2/components/AddressableErrorDialog.vue new file mode 100644 index 0000000..b682970 --- /dev/null +++ b/dashboard/src2/components/AddressableErrorDialog.vue @@ -0,0 +1,117 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AlertAddPaymentMode.vue b/dashboard/src2/components/AlertAddPaymentMode.vue new file mode 100644 index 0000000..e33e0a2 --- /dev/null +++ b/dashboard/src2/components/AlertAddPaymentMode.vue @@ -0,0 +1,34 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AlertAddressDetails.vue b/dashboard/src2/components/AlertAddressDetails.vue new file mode 100644 index 0000000..ff039d9 --- /dev/null +++ b/dashboard/src2/components/AlertAddressDetails.vue @@ -0,0 +1,34 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AlertAddressableError.vue b/dashboard/src2/components/AlertAddressableError.vue new file mode 100644 index 0000000..0090036 --- /dev/null +++ b/dashboard/src2/components/AlertAddressableError.vue @@ -0,0 +1,24 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AlertBanner.vue b/dashboard/src2/components/AlertBanner.vue new file mode 100644 index 0000000..7db081c --- /dev/null +++ b/dashboard/src2/components/AlertBanner.vue @@ -0,0 +1,46 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AlertCardExpired.vue b/dashboard/src2/components/AlertCardExpired.vue new file mode 100644 index 0000000..e1b29bf --- /dev/null +++ b/dashboard/src2/components/AlertCardExpired.vue @@ -0,0 +1,18 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AlertMandateInfo.vue b/dashboard/src2/components/AlertMandateInfo.vue new file mode 100644 index 0000000..85bd0b4 --- /dev/null +++ b/dashboard/src2/components/AlertMandateInfo.vue @@ -0,0 +1,34 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AlertUnpaidInvoices.vue b/dashboard/src2/components/AlertUnpaidInvoices.vue new file mode 100644 index 0000000..3a4f883 --- /dev/null +++ b/dashboard/src2/components/AlertUnpaidInvoices.vue @@ -0,0 +1,31 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/AppSidebar.vue b/dashboard/src2/components/AppSidebar.vue new file mode 100644 index 0000000..d0098d6 --- /dev/null +++ b/dashboard/src2/components/AppSidebar.vue @@ -0,0 +1,98 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/AppSidebarItem.vue b/dashboard/src2/components/AppSidebarItem.vue new file mode 100644 index 0000000..7a76e54 --- /dev/null +++ b/dashboard/src2/components/AppSidebarItem.vue @@ -0,0 +1,31 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/AppSidebarItemGroup.vue b/dashboard/src2/components/AppSidebarItemGroup.vue new file mode 100644 index 0000000..bcb5357 --- /dev/null +++ b/dashboard/src2/components/AppSidebarItemGroup.vue @@ -0,0 +1,55 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/AppTrialSubscriptionDialog.vue b/dashboard/src2/components/AppTrialSubscriptionDialog.vue new file mode 100644 index 0000000..ef1cb3a --- /dev/null +++ b/dashboard/src2/components/AppTrialSubscriptionDialog.vue @@ -0,0 +1,365 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/Autocomplete.vue b/dashboard/src2/components/Autocomplete.vue new file mode 100644 index 0000000..93121a1 --- /dev/null +++ b/dashboard/src2/components/Autocomplete.vue @@ -0,0 +1,214 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/AvatarGroup.vue b/dashboard/src2/components/AvatarGroup.vue new file mode 100644 index 0000000..b7a48aa --- /dev/null +++ b/dashboard/src2/components/AvatarGroup.vue @@ -0,0 +1,18 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/BuyPrepaidCreditsAlipay.vue b/dashboard/src2/components/BuyPrepaidCreditsAlipay.vue new file mode 100644 index 0000000..37ea8e0 --- /dev/null +++ b/dashboard/src2/components/BuyPrepaidCreditsAlipay.vue @@ -0,0 +1,117 @@ + + + + + \ No newline at end of file diff --git a/dashboard/src2/components/BuyPrepaidCreditsForm.vue b/dashboard/src2/components/BuyPrepaidCreditsForm.vue new file mode 100644 index 0000000..503aec6 --- /dev/null +++ b/dashboard/src2/components/BuyPrepaidCreditsForm.vue @@ -0,0 +1,243 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/BuyPrepaidCreditsRazorpay.vue b/dashboard/src2/components/BuyPrepaidCreditsRazorpay.vue new file mode 100644 index 0000000..b1b3f10 --- /dev/null +++ b/dashboard/src2/components/BuyPrepaidCreditsRazorpay.vue @@ -0,0 +1,140 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/BuyPrepaidCreditsStripe.vue b/dashboard/src2/components/BuyPrepaidCreditsStripe.vue new file mode 100644 index 0000000..5cdaaba --- /dev/null +++ b/dashboard/src2/components/BuyPrepaidCreditsStripe.vue @@ -0,0 +1,182 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/BuyPrepaidCreditsWeChatPay.vue b/dashboard/src2/components/BuyPrepaidCreditsWeChatPay.vue new file mode 100644 index 0000000..517b442 --- /dev/null +++ b/dashboard/src2/components/BuyPrepaidCreditsWeChatPay.vue @@ -0,0 +1,267 @@ + + + + + \ No newline at end of file diff --git a/dashboard/src2/components/ChurnFeedbackDialog.vue b/dashboard/src2/components/ChurnFeedbackDialog.vue new file mode 100644 index 0000000..f02c761 --- /dev/null +++ b/dashboard/src2/components/ChurnFeedbackDialog.vue @@ -0,0 +1,134 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/ClickToCopyField.vue b/dashboard/src2/components/ClickToCopyField.vue new file mode 100644 index 0000000..cdd8809 --- /dev/null +++ b/dashboard/src2/components/ClickToCopyField.vue @@ -0,0 +1,59 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/ConfigEditorDialog.vue b/dashboard/src2/components/ConfigEditorDialog.vue new file mode 100644 index 0000000..172e870 --- /dev/null +++ b/dashboard/src2/components/ConfigEditorDialog.vue @@ -0,0 +1,196 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/ConfigPreviewDialog.vue b/dashboard/src2/components/ConfigPreviewDialog.vue new file mode 100644 index 0000000..71771e9 --- /dev/null +++ b/dashboard/src2/components/ConfigPreviewDialog.vue @@ -0,0 +1,45 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/DateTimeControl.vue b/dashboard/src2/components/DateTimeControl.vue new file mode 100644 index 0000000..f2463cd --- /dev/null +++ b/dashboard/src2/components/DateTimeControl.vue @@ -0,0 +1,127 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/DialogWrapper.vue b/dashboard/src2/components/DialogWrapper.vue new file mode 100644 index 0000000..e81aafe --- /dev/null +++ b/dashboard/src2/components/DialogWrapper.vue @@ -0,0 +1,33 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/DismissableBanner.vue b/dashboard/src2/components/DismissableBanner.vue new file mode 100644 index 0000000..138b874 --- /dev/null +++ b/dashboard/src2/components/DismissableBanner.vue @@ -0,0 +1,62 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/EnvironmentVariableEditorDialog.vue b/dashboard/src2/components/EnvironmentVariableEditorDialog.vue new file mode 100644 index 0000000..0ab3229 --- /dev/null +++ b/dashboard/src2/components/EnvironmentVariableEditorDialog.vue @@ -0,0 +1,91 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/FilterControl.vue b/dashboard/src2/components/FilterControl.vue new file mode 100644 index 0000000..7a360a3 --- /dev/null +++ b/dashboard/src2/components/FilterControl.vue @@ -0,0 +1,26 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/FoldStep.vue b/dashboard/src2/components/FoldStep.vue new file mode 100644 index 0000000..cb97cc5 --- /dev/null +++ b/dashboard/src2/components/FoldStep.vue @@ -0,0 +1,107 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/GenericDialog.vue b/dashboard/src2/components/GenericDialog.vue new file mode 100644 index 0000000..a655482 --- /dev/null +++ b/dashboard/src2/components/GenericDialog.vue @@ -0,0 +1,28 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/GenericDialogField.vue b/dashboard/src2/components/GenericDialogField.vue new file mode 100644 index 0000000..0cd655b --- /dev/null +++ b/dashboard/src2/components/GenericDialogField.vue @@ -0,0 +1,42 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/GenericList.vue b/dashboard/src2/components/GenericList.vue new file mode 100644 index 0000000..ca2290f --- /dev/null +++ b/dashboard/src2/components/GenericList.vue @@ -0,0 +1,111 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/GitHubAppSelector.vue b/dashboard/src2/components/GitHubAppSelector.vue new file mode 100644 index 0000000..2e87504 --- /dev/null +++ b/dashboard/src2/components/GitHubAppSelector.vue @@ -0,0 +1,212 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/Header.vue b/dashboard/src2/components/Header.vue new file mode 100644 index 0000000..7ef4457 --- /dev/null +++ b/dashboard/src2/components/Header.vue @@ -0,0 +1,19 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/HomeSummary.vue b/dashboard/src2/components/HomeSummary.vue new file mode 100644 index 0000000..496bc74 --- /dev/null +++ b/dashboard/src2/components/HomeSummary.vue @@ -0,0 +1,57 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/InvoiceTable.vue b/dashboard/src2/components/InvoiceTable.vue new file mode 100644 index 0000000..0dc41ab --- /dev/null +++ b/dashboard/src2/components/InvoiceTable.vue @@ -0,0 +1,190 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/JobStep.vue b/dashboard/src2/components/JobStep.vue new file mode 100644 index 0000000..c5b4f61 --- /dev/null +++ b/dashboard/src2/components/JobStep.vue @@ -0,0 +1,24 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/LinkControl.vue b/dashboard/src2/components/LinkControl.vue new file mode 100644 index 0000000..00272d7 --- /dev/null +++ b/dashboard/src2/components/LinkControl.vue @@ -0,0 +1,89 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/ListSelection.vue b/dashboard/src2/components/ListSelection.vue new file mode 100644 index 0000000..3eab40e --- /dev/null +++ b/dashboard/src2/components/ListSelection.vue @@ -0,0 +1,9 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/LoadingIndicator.vue b/dashboard/src2/components/LoadingIndicator.vue new file mode 100644 index 0000000..0f5bb72 --- /dev/null +++ b/dashboard/src2/components/LoadingIndicator.vue @@ -0,0 +1,30 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/ManageSitePlansDialog.vue b/dashboard/src2/components/ManageSitePlansDialog.vue new file mode 100644 index 0000000..db72a4e --- /dev/null +++ b/dashboard/src2/components/ManageSitePlansDialog.vue @@ -0,0 +1,253 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/MarketplaceAppListing.vue b/dashboard/src2/components/MarketplaceAppListing.vue new file mode 100644 index 0000000..2fdf699 --- /dev/null +++ b/dashboard/src2/components/MarketplaceAppListing.vue @@ -0,0 +1,326 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/MobileNav.vue b/dashboard/src2/components/MobileNav.vue new file mode 100644 index 0000000..7f1e52b --- /dev/null +++ b/dashboard/src2/components/MobileNav.vue @@ -0,0 +1,80 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/MobileNavItem.vue b/dashboard/src2/components/MobileNavItem.vue new file mode 100644 index 0000000..7c73b95 --- /dev/null +++ b/dashboard/src2/components/MobileNavItem.vue @@ -0,0 +1,29 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/MobileNavItemGroup.vue b/dashboard/src2/components/MobileNavItemGroup.vue new file mode 100644 index 0000000..776764c --- /dev/null +++ b/dashboard/src2/components/MobileNavItemGroup.vue @@ -0,0 +1,49 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/NavigationItems.vue b/dashboard/src2/components/NavigationItems.vue new file mode 100644 index 0000000..070b295 --- /dev/null +++ b/dashboard/src2/components/NavigationItems.vue @@ -0,0 +1,201 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/NewAppDialog.vue b/dashboard/src2/components/NewAppDialog.vue new file mode 100644 index 0000000..fca84c8 --- /dev/null +++ b/dashboard/src2/components/NewAppDialog.vue @@ -0,0 +1,283 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/ObjectList.vue b/dashboard/src2/components/ObjectList.vue new file mode 100644 index 0000000..6bc7978 --- /dev/null +++ b/dashboard/src2/components/ObjectList.vue @@ -0,0 +1,490 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/ObjectListCell.vue b/dashboard/src2/components/ObjectListCell.vue new file mode 100644 index 0000000..83931bf --- /dev/null +++ b/dashboard/src2/components/ObjectListCell.vue @@ -0,0 +1,143 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/ObjectListFilters.vue b/dashboard/src2/components/ObjectListFilters.vue new file mode 100644 index 0000000..7ae0c75 --- /dev/null +++ b/dashboard/src2/components/ObjectListFilters.vue @@ -0,0 +1,90 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/Onboarding.vue b/dashboard/src2/components/Onboarding.vue new file mode 100644 index 0000000..9bec67e --- /dev/null +++ b/dashboard/src2/components/Onboarding.vue @@ -0,0 +1,196 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/OnboardingAppSelector.vue b/dashboard/src2/components/OnboardingAppSelector.vue new file mode 100644 index 0000000..e2207a8 --- /dev/null +++ b/dashboard/src2/components/OnboardingAppSelector.vue @@ -0,0 +1,57 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/OnboardingWithoutPayment.vue b/dashboard/src2/components/OnboardingWithoutPayment.vue new file mode 100644 index 0000000..6c5d045 --- /dev/null +++ b/dashboard/src2/components/OnboardingWithoutPayment.vue @@ -0,0 +1,65 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/OrderCheckout.vue b/dashboard/src2/components/OrderCheckout.vue new file mode 100644 index 0000000..d7f5a3b --- /dev/null +++ b/dashboard/src2/components/OrderCheckout.vue @@ -0,0 +1,434 @@ + + + + + \ No newline at end of file diff --git a/dashboard/src2/components/PayoutTable.vue b/dashboard/src2/components/PayoutTable.vue new file mode 100644 index 0000000..a8273c0 --- /dev/null +++ b/dashboard/src2/components/PayoutTable.vue @@ -0,0 +1,91 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/PlansCards.vue b/dashboard/src2/components/PlansCards.vue new file mode 100644 index 0000000..0c22dbe --- /dev/null +++ b/dashboard/src2/components/PlansCards.vue @@ -0,0 +1,88 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/SaaSSignupFields.vue b/dashboard/src2/components/SaaSSignupFields.vue new file mode 100644 index 0000000..df9f802 --- /dev/null +++ b/dashboard/src2/components/SaaSSignupFields.vue @@ -0,0 +1,147 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/SiteActionCell.vue b/dashboard/src2/components/SiteActionCell.vue new file mode 100644 index 0000000..86a0c52 --- /dev/null +++ b/dashboard/src2/components/SiteActionCell.vue @@ -0,0 +1,312 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/SiteActions.vue b/dashboard/src2/components/SiteActions.vue new file mode 100644 index 0000000..a888e76 --- /dev/null +++ b/dashboard/src2/components/SiteActions.vue @@ -0,0 +1,55 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/SiteDailyUsage.vue b/dashboard/src2/components/SiteDailyUsage.vue new file mode 100644 index 0000000..ac01a5f --- /dev/null +++ b/dashboard/src2/components/SiteDailyUsage.vue @@ -0,0 +1,75 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/SiteDatabaseAccessDialog.vue b/dashboard/src2/components/SiteDatabaseAccessDialog.vue new file mode 100644 index 0000000..2ce1a92 --- /dev/null +++ b/dashboard/src2/components/SiteDatabaseAccessDialog.vue @@ -0,0 +1,263 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/SiteDatabaseRestoreDialog.vue b/dashboard/src2/components/SiteDatabaseRestoreDialog.vue new file mode 100644 index 0000000..11a8772 --- /dev/null +++ b/dashboard/src2/components/SiteDatabaseRestoreDialog.vue @@ -0,0 +1,104 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/SiteOverview.vue b/dashboard/src2/components/SiteOverview.vue new file mode 100644 index 0000000..ea0b4a2 --- /dev/null +++ b/dashboard/src2/components/SiteOverview.vue @@ -0,0 +1,389 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/SitePlansCards.vue b/dashboard/src2/components/SitePlansCards.vue new file mode 100644 index 0000000..4713b12 --- /dev/null +++ b/dashboard/src2/components/SitePlansCards.vue @@ -0,0 +1,123 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/SiteRenewalDialog.vue b/dashboard/src2/components/SiteRenewalDialog.vue new file mode 100644 index 0000000..3284ae2 --- /dev/null +++ b/dashboard/src2/components/SiteRenewalDialog.vue @@ -0,0 +1,574 @@ + + + + + \ No newline at end of file diff --git a/dashboard/src2/components/SiteUpdateDialog.vue b/dashboard/src2/components/SiteUpdateDialog.vue new file mode 100644 index 0000000..48e1f0a --- /dev/null +++ b/dashboard/src2/components/SiteUpdateDialog.vue @@ -0,0 +1,229 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/StatusIndicator.vue b/dashboard/src2/components/StatusIndicator.vue new file mode 100644 index 0000000..7491c00 --- /dev/null +++ b/dashboard/src2/components/StatusIndicator.vue @@ -0,0 +1,40 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/StripeCard.vue b/dashboard/src2/components/StripeCard.vue new file mode 100644 index 0000000..9bf38e9 --- /dev/null +++ b/dashboard/src2/components/StripeCard.vue @@ -0,0 +1,346 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/StripeCardDialog.vue b/dashboard/src2/components/StripeCardDialog.vue new file mode 100644 index 0000000..54cf406 --- /dev/null +++ b/dashboard/src2/components/StripeCardDialog.vue @@ -0,0 +1,40 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/Summary.vue b/dashboard/src2/components/Summary.vue new file mode 100644 index 0000000..c555e76 --- /dev/null +++ b/dashboard/src2/components/Summary.vue @@ -0,0 +1,29 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/SwitchTeamDialog.vue b/dashboard/src2/components/SwitchTeamDialog.vue new file mode 100644 index 0000000..986f05b --- /dev/null +++ b/dashboard/src2/components/SwitchTeamDialog.vue @@ -0,0 +1,93 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/TabsWithRouter.vue b/dashboard/src2/components/TabsWithRouter.vue new file mode 100644 index 0000000..9fce3d9 --- /dev/null +++ b/dashboard/src2/components/TabsWithRouter.vue @@ -0,0 +1,46 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/TextInsideCircle.vue b/dashboard/src2/components/TextInsideCircle.vue new file mode 100644 index 0000000..83613e9 --- /dev/null +++ b/dashboard/src2/components/TextInsideCircle.vue @@ -0,0 +1,7 @@ + \ No newline at end of file diff --git a/dashboard/src2/components/ToggleContent.vue b/dashboard/src2/components/ToggleContent.vue new file mode 100644 index 0000000..58113a3 --- /dev/null +++ b/dashboard/src2/components/ToggleContent.vue @@ -0,0 +1,51 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/UpdateBillingDetails.vue b/dashboard/src2/components/UpdateBillingDetails.vue new file mode 100644 index 0000000..05579f4 --- /dev/null +++ b/dashboard/src2/components/UpdateBillingDetails.vue @@ -0,0 +1,116 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/UpdateBillingDetailsForm.vue b/dashboard/src2/components/UpdateBillingDetailsForm.vue new file mode 100644 index 0000000..87aa06d --- /dev/null +++ b/dashboard/src2/components/UpdateBillingDetailsForm.vue @@ -0,0 +1,104 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/UserWithAvatarCell.vue b/dashboard/src2/components/UserWithAvatarCell.vue new file mode 100644 index 0000000..663838b --- /dev/null +++ b/dashboard/src2/components/UserWithAvatarCell.vue @@ -0,0 +1,24 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/auth/Configure2FA.vue b/dashboard/src2/components/auth/Configure2FA.vue new file mode 100644 index 0000000..31c0d18 --- /dev/null +++ b/dashboard/src2/components/auth/Configure2FA.vue @@ -0,0 +1,212 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/auth/LoginBox.vue b/dashboard/src2/components/auth/LoginBox.vue new file mode 100644 index 0000000..584ab28 --- /dev/null +++ b/dashboard/src2/components/auth/LoginBox.vue @@ -0,0 +1,70 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/auth/SaaSLoginBox.vue b/dashboard/src2/components/auth/SaaSLoginBox.vue new file mode 100644 index 0000000..c465969 --- /dev/null +++ b/dashboard/src2/components/auth/SaaSLoginBox.vue @@ -0,0 +1,71 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/AddCardDialog.vue b/dashboard/src2/components/billing/AddCardDialog.vue new file mode 100644 index 0000000..87fec14 --- /dev/null +++ b/dashboard/src2/components/billing/AddCardDialog.vue @@ -0,0 +1,35 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/AddExchangeRate.vue b/dashboard/src2/components/billing/AddExchangeRate.vue new file mode 100644 index 0000000..9af2bd3 --- /dev/null +++ b/dashboard/src2/components/billing/AddExchangeRate.vue @@ -0,0 +1,109 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/AddPrepaidCreditsDialog.vue b/dashboard/src2/components/billing/AddPrepaidCreditsDialog.vue new file mode 100644 index 0000000..67ee979 --- /dev/null +++ b/dashboard/src2/components/billing/AddPrepaidCreditsDialog.vue @@ -0,0 +1,37 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/BillingDetails.vue b/dashboard/src2/components/billing/BillingDetails.vue new file mode 100644 index 0000000..a042257 --- /dev/null +++ b/dashboard/src2/components/billing/BillingDetails.vue @@ -0,0 +1,87 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/BillingDetailsDialog.vue b/dashboard/src2/components/billing/BillingDetailsDialog.vue new file mode 100644 index 0000000..d0bf9ba --- /dev/null +++ b/dashboard/src2/components/billing/BillingDetailsDialog.vue @@ -0,0 +1,37 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/BillingSummary.vue b/dashboard/src2/components/billing/BillingSummary.vue new file mode 100644 index 0000000..818eb74 --- /dev/null +++ b/dashboard/src2/components/billing/BillingSummary.vue @@ -0,0 +1,123 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/BuyCreditsRazorpay.vue b/dashboard/src2/components/billing/BuyCreditsRazorpay.vue new file mode 100644 index 0000000..cb24897 --- /dev/null +++ b/dashboard/src2/components/billing/BuyCreditsRazorpay.vue @@ -0,0 +1,124 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/BuyCreditsStripe.vue b/dashboard/src2/components/billing/BuyCreditsStripe.vue new file mode 100644 index 0000000..5c8f286 --- /dev/null +++ b/dashboard/src2/components/billing/BuyCreditsStripe.vue @@ -0,0 +1,168 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/CardForm.vue b/dashboard/src2/components/billing/CardForm.vue new file mode 100644 index 0000000..4857d98 --- /dev/null +++ b/dashboard/src2/components/billing/CardForm.vue @@ -0,0 +1,355 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/ChangeCardDialog.vue b/dashboard/src2/components/billing/ChangeCardDialog.vue new file mode 100644 index 0000000..779a433 --- /dev/null +++ b/dashboard/src2/components/billing/ChangeCardDialog.vue @@ -0,0 +1,138 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/DropdownItem.vue b/dashboard/src2/components/billing/DropdownItem.vue new file mode 100644 index 0000000..c51f5fa --- /dev/null +++ b/dashboard/src2/components/billing/DropdownItem.vue @@ -0,0 +1,23 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/FinalizeInvoicesDialog.vue b/dashboard/src2/components/billing/FinalizeInvoicesDialog.vue new file mode 100644 index 0000000..8fb8a29 --- /dev/null +++ b/dashboard/src2/components/billing/FinalizeInvoicesDialog.vue @@ -0,0 +1,72 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/NewAddressForm.vue b/dashboard/src2/components/billing/NewAddressForm.vue new file mode 100644 index 0000000..f405012 --- /dev/null +++ b/dashboard/src2/components/billing/NewAddressForm.vue @@ -0,0 +1,214 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/PaymentDetails.vue b/dashboard/src2/components/billing/PaymentDetails.vue new file mode 100644 index 0000000..045561b --- /dev/null +++ b/dashboard/src2/components/billing/PaymentDetails.vue @@ -0,0 +1,227 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/PrepaidCreditsForm.vue b/dashboard/src2/components/billing/PrepaidCreditsForm.vue new file mode 100644 index 0000000..1ba4082 --- /dev/null +++ b/dashboard/src2/components/billing/PrepaidCreditsForm.vue @@ -0,0 +1,250 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/UpcomingInvoiceDialog.vue b/dashboard/src2/components/billing/UpcomingInvoiceDialog.vue new file mode 100644 index 0000000..0a3e4ca --- /dev/null +++ b/dashboard/src2/components/billing/UpcomingInvoiceDialog.vue @@ -0,0 +1,37 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/mpesa/AddMpesaCredentials.vue b/dashboard/src2/components/billing/mpesa/AddMpesaCredentials.vue new file mode 100644 index 0000000..d70a0b6 --- /dev/null +++ b/dashboard/src2/components/billing/mpesa/AddMpesaCredentials.vue @@ -0,0 +1,171 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/mpesa/AddPaymentGateway.vue b/dashboard/src2/components/billing/mpesa/AddPaymentGateway.vue new file mode 100644 index 0000000..191d490 --- /dev/null +++ b/dashboard/src2/components/billing/mpesa/AddPaymentGateway.vue @@ -0,0 +1,172 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/mpesa/BuyPrepaidCreditsMpesa.vue b/dashboard/src2/components/billing/mpesa/BuyPrepaidCreditsMpesa.vue new file mode 100644 index 0000000..36427fe --- /dev/null +++ b/dashboard/src2/components/billing/mpesa/BuyPrepaidCreditsMpesa.vue @@ -0,0 +1,238 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/billing/mpesa/PartnerPaymentPayout.vue b/dashboard/src2/components/billing/mpesa/PartnerPaymentPayout.vue new file mode 100644 index 0000000..692f2c5 --- /dev/null +++ b/dashboard/src2/components/billing/mpesa/PartnerPaymentPayout.vue @@ -0,0 +1,221 @@ + + + + + \ No newline at end of file diff --git a/dashboard/src2/components/devtools/database/DatabaseAddIndexButton.vue b/dashboard/src2/components/devtools/database/DatabaseAddIndexButton.vue new file mode 100644 index 0000000..df68f8f --- /dev/null +++ b/dashboard/src2/components/devtools/database/DatabaseAddIndexButton.vue @@ -0,0 +1,76 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/devtools/database/DatabasePerformanceSchemaDisabledNotice.vue b/dashboard/src2/components/devtools/database/DatabasePerformanceSchemaDisabledNotice.vue new file mode 100644 index 0000000..4900af1 --- /dev/null +++ b/dashboard/src2/components/devtools/database/DatabasePerformanceSchemaDisabledNotice.vue @@ -0,0 +1,19 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/devtools/database/DatabaseProcessKillButton.vue b/dashboard/src2/components/devtools/database/DatabaseProcessKillButton.vue new file mode 100644 index 0000000..073959a --- /dev/null +++ b/dashboard/src2/components/devtools/database/DatabaseProcessKillButton.vue @@ -0,0 +1,55 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/devtools/database/DatabaseSQLPlaygroundLog.vue b/dashboard/src2/components/devtools/database/DatabaseSQLPlaygroundLog.vue new file mode 100644 index 0000000..a2600ae --- /dev/null +++ b/dashboard/src2/components/devtools/database/DatabaseSQLPlaygroundLog.vue @@ -0,0 +1,112 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/devtools/database/DatabaseTableSchemaDialog.vue b/dashboard/src2/components/devtools/database/DatabaseTableSchemaDialog.vue new file mode 100644 index 0000000..122ce55 --- /dev/null +++ b/dashboard/src2/components/devtools/database/DatabaseTableSchemaDialog.vue @@ -0,0 +1,213 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/devtools/database/DatabaseTableSchemaInfoDialog.vue b/dashboard/src2/components/devtools/database/DatabaseTableSchemaInfoDialog.vue new file mode 100644 index 0000000..270c570 --- /dev/null +++ b/dashboard/src2/components/devtools/database/DatabaseTableSchemaInfoDialog.vue @@ -0,0 +1,60 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/devtools/database/DatabaseTableSchemaSizeDetailsDialog.vue b/dashboard/src2/components/devtools/database/DatabaseTableSchemaSizeDetailsDialog.vue new file mode 100644 index 0000000..1c3da14 --- /dev/null +++ b/dashboard/src2/components/devtools/database/DatabaseTableSchemaSizeDetailsDialog.vue @@ -0,0 +1,122 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/devtools/database/ResultTable.vue b/dashboard/src2/components/devtools/database/ResultTable.vue new file mode 100644 index 0000000..6b4a68c --- /dev/null +++ b/dashboard/src2/components/devtools/database/ResultTable.vue @@ -0,0 +1,314 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/devtools/database/SQLCodeEditor.vue b/dashboard/src2/components/devtools/database/SQLCodeEditor.vue new file mode 100644 index 0000000..2c01951 --- /dev/null +++ b/dashboard/src2/components/devtools/database/SQLCodeEditor.vue @@ -0,0 +1,104 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/devtools/database/SQLResult.vue b/dashboard/src2/components/devtools/database/SQLResult.vue new file mode 100644 index 0000000..0ec2d83 --- /dev/null +++ b/dashboard/src2/components/devtools/database/SQLResult.vue @@ -0,0 +1,54 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/group/AddAppDialog.vue b/dashboard/src2/components/group/AddAppDialog.vue new file mode 100644 index 0000000..dcf7b79 --- /dev/null +++ b/dashboard/src2/components/group/AddAppDialog.vue @@ -0,0 +1,377 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/group/AddRegionDialog.vue b/dashboard/src2/components/group/AddRegionDialog.vue new file mode 100644 index 0000000..2ca4175 --- /dev/null +++ b/dashboard/src2/components/group/AddRegionDialog.vue @@ -0,0 +1,85 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/group/BenchLogsDialog.vue b/dashboard/src2/components/group/BenchLogsDialog.vue new file mode 100644 index 0000000..f13fbbb --- /dev/null +++ b/dashboard/src2/components/group/BenchLogsDialog.vue @@ -0,0 +1,142 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/group/ChangeAppBranchDialog.vue b/dashboard/src2/components/group/ChangeAppBranchDialog.vue new file mode 100644 index 0000000..eee8164 --- /dev/null +++ b/dashboard/src2/components/group/ChangeAppBranchDialog.vue @@ -0,0 +1,94 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/group/DependencyEditorDialog.vue b/dashboard/src2/components/group/DependencyEditorDialog.vue new file mode 100644 index 0000000..38d07ee --- /dev/null +++ b/dashboard/src2/components/group/DependencyEditorDialog.vue @@ -0,0 +1,178 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/group/PatchAppDialog.vue b/dashboard/src2/components/group/PatchAppDialog.vue new file mode 100644 index 0000000..214472f --- /dev/null +++ b/dashboard/src2/components/group/PatchAppDialog.vue @@ -0,0 +1,312 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/group/ReleaseGroupActionCell.vue b/dashboard/src2/components/group/ReleaseGroupActionCell.vue new file mode 100644 index 0000000..e4b8c7b --- /dev/null +++ b/dashboard/src2/components/group/ReleaseGroupActionCell.vue @@ -0,0 +1,164 @@ + + + diff --git a/dashboard/src2/components/group/ReleaseGroupActions.vue b/dashboard/src2/components/group/ReleaseGroupActions.vue new file mode 100644 index 0000000..9e77e3c --- /dev/null +++ b/dashboard/src2/components/group/ReleaseGroupActions.vue @@ -0,0 +1,57 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/group/SSHCertificateDialog.vue b/dashboard/src2/components/group/SSHCertificateDialog.vue new file mode 100644 index 0000000..33ca819 --- /dev/null +++ b/dashboard/src2/components/group/SSHCertificateDialog.vue @@ -0,0 +1,151 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/group/SupervisorProcessesDialog.vue b/dashboard/src2/components/group/SupervisorProcessesDialog.vue new file mode 100644 index 0000000..9299bbf --- /dev/null +++ b/dashboard/src2/components/group/SupervisorProcessesDialog.vue @@ -0,0 +1,37 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/group/UpdateReleaseGroupDialog.vue b/dashboard/src2/components/group/UpdateReleaseGroupDialog.vue new file mode 100644 index 0000000..a23fde5 --- /dev/null +++ b/dashboard/src2/components/group/UpdateReleaseGroupDialog.vue @@ -0,0 +1,632 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/marketplace/AppListingStepsDialog.vue b/dashboard/src2/components/marketplace/AppListingStepsDialog.vue new file mode 100644 index 0000000..9bfd7c6 --- /dev/null +++ b/dashboard/src2/components/marketplace/AppListingStepsDialog.vue @@ -0,0 +1,101 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/marketplace/ChangeAppBranchDialog.vue b/dashboard/src2/components/marketplace/ChangeAppBranchDialog.vue new file mode 100644 index 0000000..bcb1081 --- /dev/null +++ b/dashboard/src2/components/marketplace/ChangeAppBranchDialog.vue @@ -0,0 +1,103 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/marketplace/CodeReview.vue b/dashboard/src2/components/marketplace/CodeReview.vue new file mode 100644 index 0000000..9b4f44c --- /dev/null +++ b/dashboard/src2/components/marketplace/CodeReview.vue @@ -0,0 +1,296 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/marketplace/MarketplaceAppAnalytics.vue b/dashboard/src2/components/marketplace/MarketplaceAppAnalytics.vue new file mode 100644 index 0000000..65a2507 --- /dev/null +++ b/dashboard/src2/components/marketplace/MarketplaceAppAnalytics.vue @@ -0,0 +1,212 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/marketplace/NewComment.vue b/dashboard/src2/components/marketplace/NewComment.vue new file mode 100644 index 0000000..daa0afe --- /dev/null +++ b/dashboard/src2/components/marketplace/NewComment.vue @@ -0,0 +1,57 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/marketplace/NewMarketplaceAppDialog.vue b/dashboard/src2/components/marketplace/NewMarketplaceAppDialog.vue new file mode 100644 index 0000000..6ee8c36 --- /dev/null +++ b/dashboard/src2/components/marketplace/NewMarketplaceAppDialog.vue @@ -0,0 +1,186 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/marketplace/PlansDialog.vue b/dashboard/src2/components/marketplace/PlansDialog.vue new file mode 100644 index 0000000..a7d1acf --- /dev/null +++ b/dashboard/src2/components/marketplace/PlansDialog.vue @@ -0,0 +1,222 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/marketplace/ReplyMarketplaceApp.vue b/dashboard/src2/components/marketplace/ReplyMarketplaceApp.vue new file mode 100644 index 0000000..2f86ffa --- /dev/null +++ b/dashboard/src2/components/marketplace/ReplyMarketplaceApp.vue @@ -0,0 +1,78 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/partners/BuyPartnerCreditsRazorpay.vue b/dashboard/src2/components/partners/BuyPartnerCreditsRazorpay.vue new file mode 100644 index 0000000..e355977 --- /dev/null +++ b/dashboard/src2/components/partners/BuyPartnerCreditsRazorpay.vue @@ -0,0 +1,132 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/partners/BuyPartnerCreditsStripe.vue b/dashboard/src2/components/partners/BuyPartnerCreditsStripe.vue new file mode 100644 index 0000000..983040e --- /dev/null +++ b/dashboard/src2/components/partners/BuyPartnerCreditsStripe.vue @@ -0,0 +1,168 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/partners/PartnerApprovalRequests.vue b/dashboard/src2/components/partners/PartnerApprovalRequests.vue new file mode 100644 index 0000000..017a806 --- /dev/null +++ b/dashboard/src2/components/partners/PartnerApprovalRequests.vue @@ -0,0 +1,156 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/partners/PartnerContribution.vue b/dashboard/src2/components/partners/PartnerContribution.vue new file mode 100644 index 0000000..b29b215 --- /dev/null +++ b/dashboard/src2/components/partners/PartnerContribution.vue @@ -0,0 +1,102 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/partners/PartnerCreditsForm.vue b/dashboard/src2/components/partners/PartnerCreditsForm.vue new file mode 100644 index 0000000..cd7835a --- /dev/null +++ b/dashboard/src2/components/partners/PartnerCreditsForm.vue @@ -0,0 +1,125 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/partners/PartnerCustomerInvoices.vue b/dashboard/src2/components/partners/PartnerCustomerInvoices.vue new file mode 100644 index 0000000..d81f7f0 --- /dev/null +++ b/dashboard/src2/components/partners/PartnerCustomerInvoices.vue @@ -0,0 +1,69 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/partners/PartnerCustomers.vue b/dashboard/src2/components/partners/PartnerCustomers.vue new file mode 100644 index 0000000..e7c4097 --- /dev/null +++ b/dashboard/src2/components/partners/PartnerCustomers.vue @@ -0,0 +1,204 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/partners/PartnerLocalPaymentSetup.vue b/dashboard/src2/components/partners/PartnerLocalPaymentSetup.vue new file mode 100644 index 0000000..b8dd423 --- /dev/null +++ b/dashboard/src2/components/partners/PartnerLocalPaymentSetup.vue @@ -0,0 +1,109 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/partners/PartnerMembers.vue b/dashboard/src2/components/partners/PartnerMembers.vue new file mode 100644 index 0000000..a908d0d --- /dev/null +++ b/dashboard/src2/components/partners/PartnerMembers.vue @@ -0,0 +1,49 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/partners/PartnerOverview.vue b/dashboard/src2/components/partners/PartnerOverview.vue new file mode 100644 index 0000000..7c6751a --- /dev/null +++ b/dashboard/src2/components/partners/PartnerOverview.vue @@ -0,0 +1,294 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/server/ServerActionCell.vue b/dashboard/src2/components/server/ServerActionCell.vue new file mode 100644 index 0000000..bffb4d1 --- /dev/null +++ b/dashboard/src2/components/server/ServerActionCell.vue @@ -0,0 +1,164 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/server/ServerActions.vue b/dashboard/src2/components/server/ServerActions.vue new file mode 100644 index 0000000..1e6dbeb --- /dev/null +++ b/dashboard/src2/components/server/ServerActions.vue @@ -0,0 +1,101 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/server/ServerCharts.vue b/dashboard/src2/components/server/ServerCharts.vue new file mode 100644 index 0000000..7f8267b --- /dev/null +++ b/dashboard/src2/components/server/ServerCharts.vue @@ -0,0 +1,830 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/server/ServerLoadAverage.vue b/dashboard/src2/components/server/ServerLoadAverage.vue new file mode 100644 index 0000000..ba46ef4 --- /dev/null +++ b/dashboard/src2/components/server/ServerLoadAverage.vue @@ -0,0 +1,99 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/server/ServerOverview.vue b/dashboard/src2/components/server/ServerOverview.vue new file mode 100644 index 0000000..d4a2b13 --- /dev/null +++ b/dashboard/src2/components/server/ServerOverview.vue @@ -0,0 +1,453 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/server/ServerPlansCards.vue b/dashboard/src2/components/server/ServerPlansCards.vue new file mode 100644 index 0000000..df53ae3 --- /dev/null +++ b/dashboard/src2/components/server/ServerPlansCards.vue @@ -0,0 +1,50 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/server/ServerPlansDialog.vue b/dashboard/src2/components/server/ServerPlansDialog.vue new file mode 100644 index 0000000..bc9f5e8 --- /dev/null +++ b/dashboard/src2/components/server/ServerPlansDialog.vue @@ -0,0 +1,140 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/ActivateWebhookDialog.vue b/dashboard/src2/components/settings/ActivateWebhookDialog.vue new file mode 100644 index 0000000..806f729 --- /dev/null +++ b/dashboard/src2/components/settings/ActivateWebhookDialog.vue @@ -0,0 +1,135 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/AddNewWebhookDialog.vue b/dashboard/src2/components/settings/AddNewWebhookDialog.vue new file mode 100644 index 0000000..a0b56fb --- /dev/null +++ b/dashboard/src2/components/settings/AddNewWebhookDialog.vue @@ -0,0 +1,166 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/DeveloperSettings.vue b/dashboard/src2/components/settings/DeveloperSettings.vue new file mode 100644 index 0000000..71d8aef --- /dev/null +++ b/dashboard/src2/components/settings/DeveloperSettings.vue @@ -0,0 +1,474 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/EditWebhookDialog.vue b/dashboard/src2/components/settings/EditWebhookDialog.vue new file mode 100644 index 0000000..f8b9142 --- /dev/null +++ b/dashboard/src2/components/settings/EditWebhookDialog.vue @@ -0,0 +1,189 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/InviteTeamMemberDialog.vue b/dashboard/src2/components/settings/InviteTeamMemberDialog.vue new file mode 100644 index 0000000..5e6f3a0 --- /dev/null +++ b/dashboard/src2/components/settings/InviteTeamMemberDialog.vue @@ -0,0 +1,139 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/RoleConfigureDialog.vue b/dashboard/src2/components/settings/RoleConfigureDialog.vue new file mode 100644 index 0000000..334a960 --- /dev/null +++ b/dashboard/src2/components/settings/RoleConfigureDialog.vue @@ -0,0 +1,309 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/RoleList.vue b/dashboard/src2/components/settings/RoleList.vue new file mode 100644 index 0000000..f5c5797 --- /dev/null +++ b/dashboard/src2/components/settings/RoleList.vue @@ -0,0 +1,136 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/RolePermissions.vue b/dashboard/src2/components/settings/RolePermissions.vue new file mode 100644 index 0000000..716bcb0 --- /dev/null +++ b/dashboard/src2/components/settings/RolePermissions.vue @@ -0,0 +1,233 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/SettingsPermissions.vue b/dashboard/src2/components/settings/SettingsPermissions.vue new file mode 100644 index 0000000..0c7b422 --- /dev/null +++ b/dashboard/src2/components/settings/SettingsPermissions.vue @@ -0,0 +1,7 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/TeamSettings.vue b/dashboard/src2/components/settings/TeamSettings.vue new file mode 100644 index 0000000..0ef1d69 --- /dev/null +++ b/dashboard/src2/components/settings/TeamSettings.vue @@ -0,0 +1,95 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/TeamSettingsDialog.vue b/dashboard/src2/components/settings/TeamSettingsDialog.vue new file mode 100644 index 0000000..d5359d5 --- /dev/null +++ b/dashboard/src2/components/settings/TeamSettingsDialog.vue @@ -0,0 +1,41 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/WebhookAttemptDetails.vue b/dashboard/src2/components/settings/WebhookAttemptDetails.vue new file mode 100644 index 0000000..22118a5 --- /dev/null +++ b/dashboard/src2/components/settings/WebhookAttemptDetails.vue @@ -0,0 +1,85 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/WebhookAttemptsDialog.vue b/dashboard/src2/components/settings/WebhookAttemptsDialog.vue new file mode 100644 index 0000000..715a475 --- /dev/null +++ b/dashboard/src2/components/settings/WebhookAttemptsDialog.vue @@ -0,0 +1,122 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/profile/AccountEmails.vue b/dashboard/src2/components/settings/profile/AccountEmails.vue new file mode 100644 index 0000000..cea2dda --- /dev/null +++ b/dashboard/src2/components/settings/profile/AccountEmails.vue @@ -0,0 +1,91 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/profile/AccountPartner.vue b/dashboard/src2/components/settings/profile/AccountPartner.vue new file mode 100644 index 0000000..2ebd59e --- /dev/null +++ b/dashboard/src2/components/settings/profile/AccountPartner.vue @@ -0,0 +1,190 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/profile/AccountProfile.vue b/dashboard/src2/components/settings/profile/AccountProfile.vue new file mode 100644 index 0000000..87f4f42 --- /dev/null +++ b/dashboard/src2/components/settings/profile/AccountProfile.vue @@ -0,0 +1,427 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/profile/AccountReferral.vue b/dashboard/src2/components/settings/profile/AccountReferral.vue new file mode 100644 index 0000000..d6d8c92 --- /dev/null +++ b/dashboard/src2/components/settings/profile/AccountReferral.vue @@ -0,0 +1,37 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/profile/ProfileSettings.vue b/dashboard/src2/components/settings/profile/ProfileSettings.vue new file mode 100644 index 0000000..aa0d575 --- /dev/null +++ b/dashboard/src2/components/settings/profile/ProfileSettings.vue @@ -0,0 +1,17 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/settings/profile/TFADialog.vue b/dashboard/src2/components/settings/profile/TFADialog.vue new file mode 100644 index 0000000..c60ec9b --- /dev/null +++ b/dashboard/src2/components/settings/profile/TFADialog.vue @@ -0,0 +1,48 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/AnalyticsCard.vue b/dashboard/src2/components/site/AnalyticsCard.vue new file mode 100644 index 0000000..4e1ac03 --- /dev/null +++ b/dashboard/src2/components/site/AnalyticsCard.vue @@ -0,0 +1,16 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/ConfigureAutoUpdateDialog.vue b/dashboard/src2/components/site/ConfigureAutoUpdateDialog.vue new file mode 100644 index 0000000..55454cf --- /dev/null +++ b/dashboard/src2/components/site/ConfigureAutoUpdateDialog.vue @@ -0,0 +1,45 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/InstallAppDialog.vue b/dashboard/src2/components/site/InstallAppDialog.vue new file mode 100644 index 0000000..201cb59 --- /dev/null +++ b/dashboard/src2/components/site/InstallAppDialog.vue @@ -0,0 +1,167 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/NewSiteAppSelector.vue b/dashboard/src2/components/site/NewSiteAppSelector.vue new file mode 100644 index 0000000..1c31fe3 --- /dev/null +++ b/dashboard/src2/components/site/NewSiteAppSelector.vue @@ -0,0 +1,220 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SelectSiteForRestore.vue b/dashboard/src2/components/site/SelectSiteForRestore.vue new file mode 100644 index 0000000..6bac8a0 --- /dev/null +++ b/dashboard/src2/components/site/SelectSiteForRestore.vue @@ -0,0 +1,74 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteAnalytics.vue b/dashboard/src2/components/site/SiteAnalytics.vue new file mode 100644 index 0000000..3a443b9 --- /dev/null +++ b/dashboard/src2/components/site/SiteAnalytics.vue @@ -0,0 +1,482 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteAppPlanSelectDialog.vue b/dashboard/src2/components/site/SiteAppPlanSelectDialog.vue new file mode 100644 index 0000000..c185d35 --- /dev/null +++ b/dashboard/src2/components/site/SiteAppPlanSelectDialog.vue @@ -0,0 +1,54 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteAppPlanSelectorDialog.vue b/dashboard/src2/components/site/SiteAppPlanSelectorDialog.vue new file mode 100644 index 0000000..719a8b4 --- /dev/null +++ b/dashboard/src2/components/site/SiteAppPlanSelectorDialog.vue @@ -0,0 +1,72 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteChangeGroupDialog.vue b/dashboard/src2/components/site/SiteChangeGroupDialog.vue new file mode 100644 index 0000000..03fa988 --- /dev/null +++ b/dashboard/src2/components/site/SiteChangeGroupDialog.vue @@ -0,0 +1,202 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteChangeRegionDialog.vue b/dashboard/src2/components/site/SiteChangeRegionDialog.vue new file mode 100644 index 0000000..b1b3e2a --- /dev/null +++ b/dashboard/src2/components/site/SiteChangeRegionDialog.vue @@ -0,0 +1,173 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteChangeServerDialog.vue b/dashboard/src2/components/site/SiteChangeServerDialog.vue new file mode 100644 index 0000000..29dca5b --- /dev/null +++ b/dashboard/src2/components/site/SiteChangeServerDialog.vue @@ -0,0 +1,196 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteDatabaseRestoreFromURLDialog.vue b/dashboard/src2/components/site/SiteDatabaseRestoreFromURLDialog.vue new file mode 100644 index 0000000..ac7948b --- /dev/null +++ b/dashboard/src2/components/site/SiteDatabaseRestoreFromURLDialog.vue @@ -0,0 +1,156 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteInsights.vue b/dashboard/src2/components/site/SiteInsights.vue new file mode 100644 index 0000000..0cde752 --- /dev/null +++ b/dashboard/src2/components/site/SiteInsights.vue @@ -0,0 +1,90 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteJobs.vue b/dashboard/src2/components/site/SiteJobs.vue new file mode 100644 index 0000000..318d830 --- /dev/null +++ b/dashboard/src2/components/site/SiteJobs.vue @@ -0,0 +1,112 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteLogs.vue b/dashboard/src2/components/site/SiteLogs.vue new file mode 100644 index 0000000..b9555ce --- /dev/null +++ b/dashboard/src2/components/site/SiteLogs.vue @@ -0,0 +1,229 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteUptime.vue b/dashboard/src2/components/site/SiteUptime.vue new file mode 100644 index 0000000..d8573a7 --- /dev/null +++ b/dashboard/src2/components/site/SiteUptime.vue @@ -0,0 +1,65 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/SiteVersionUpgradeDialog.vue b/dashboard/src2/components/site/SiteVersionUpgradeDialog.vue new file mode 100644 index 0000000..b3144c5 --- /dev/null +++ b/dashboard/src2/components/site/SiteVersionUpgradeDialog.vue @@ -0,0 +1,257 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/performance/PerformanceReport.vue b/dashboard/src2/components/site/performance/PerformanceReport.vue new file mode 100644 index 0000000..b682c30 --- /dev/null +++ b/dashboard/src2/components/site/performance/PerformanceReport.vue @@ -0,0 +1,64 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/performance/SiteBinaryLogs.vue b/dashboard/src2/components/site/performance/SiteBinaryLogs.vue new file mode 100644 index 0000000..eb8cee1 --- /dev/null +++ b/dashboard/src2/components/site/performance/SiteBinaryLogs.vue @@ -0,0 +1,133 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/performance/SiteDatabaseProcess.vue b/dashboard/src2/components/site/performance/SiteDatabaseProcess.vue new file mode 100644 index 0000000..9a59e6b --- /dev/null +++ b/dashboard/src2/components/site/performance/SiteDatabaseProcess.vue @@ -0,0 +1,78 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/performance/SiteDeadlockReport.vue b/dashboard/src2/components/site/performance/SiteDeadlockReport.vue new file mode 100644 index 0000000..5b7c386 --- /dev/null +++ b/dashboard/src2/components/site/performance/SiteDeadlockReport.vue @@ -0,0 +1,114 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/performance/SitePerformance.vue b/dashboard/src2/components/site/performance/SitePerformance.vue new file mode 100644 index 0000000..ff01009 --- /dev/null +++ b/dashboard/src2/components/site/performance/SitePerformance.vue @@ -0,0 +1,79 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/site/performance/SiteProcessList.vue b/dashboard/src2/components/site/performance/SiteProcessList.vue new file mode 100644 index 0000000..395443d --- /dev/null +++ b/dashboard/src2/components/site/performance/SiteProcessList.vue @@ -0,0 +1,108 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/site/performance/SiteRequestLogs.vue b/dashboard/src2/components/site/performance/SiteRequestLogs.vue new file mode 100644 index 0000000..006c342 --- /dev/null +++ b/dashboard/src2/components/site/performance/SiteRequestLogs.vue @@ -0,0 +1,104 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/performance/SiteSlowQueries.vue b/dashboard/src2/components/site/performance/SiteSlowQueries.vue new file mode 100644 index 0000000..3803f5e --- /dev/null +++ b/dashboard/src2/components/site/performance/SiteSlowQueries.vue @@ -0,0 +1,133 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site/performance/SiteSlowQueryDialog.vue b/dashboard/src2/components/site/performance/SiteSlowQueryDialog.vue new file mode 100644 index 0000000..de13a2c --- /dev/null +++ b/dashboard/src2/components/site/performance/SiteSlowQueryDialog.vue @@ -0,0 +1,25 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/components/site_database_user/SiteDatabaseAddEditUserDialog.vue b/dashboard/src2/components/site_database_user/SiteDatabaseAddEditUserDialog.vue new file mode 100644 index 0000000..e0278fe --- /dev/null +++ b/dashboard/src2/components/site_database_user/SiteDatabaseAddEditUserDialog.vue @@ -0,0 +1,434 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/site_database_user/SiteDatabaseColumnsSelector.vue b/dashboard/src2/components/site_database_user/SiteDatabaseColumnsSelector.vue new file mode 100644 index 0000000..8e8d85d --- /dev/null +++ b/dashboard/src2/components/site_database_user/SiteDatabaseColumnsSelector.vue @@ -0,0 +1,92 @@ + + \ No newline at end of file diff --git a/dashboard/src2/components/site_database_user/SiteDatabaseUserCredentialDialog.vue b/dashboard/src2/components/site_database_user/SiteDatabaseUserCredentialDialog.vue new file mode 100644 index 0000000..ecaab44 --- /dev/null +++ b/dashboard/src2/components/site_database_user/SiteDatabaseUserCredentialDialog.vue @@ -0,0 +1,158 @@ + + \ No newline at end of file diff --git a/dashboard/src2/data/notifications.js b/dashboard/src2/data/notifications.js new file mode 100644 index 0000000..beb0ff0 --- /dev/null +++ b/dashboard/src2/data/notifications.js @@ -0,0 +1,7 @@ +import { createResource } from 'jingrow-ui'; + +export const unreadNotificationsCount = createResource({ + cache: 'Unread Notifications Count', + url: 'jcloud.api.notifications.get_unread_count', + initialData: 0 +}); diff --git a/dashboard/src2/data/plans.js b/dashboard/src2/data/plans.js new file mode 100644 index 0000000..83f71f7 --- /dev/null +++ b/dashboard/src2/data/plans.js @@ -0,0 +1,23 @@ +import { createResource } from 'jingrow-ui'; + +export let plans = createResource({ + url: 'jcloud.api.site.get_site_plans', + cache: 'site.plans', + initialData: [] +}); + +export function fetchPlans() { + plans.fetch(); +} + +/** + * Get plans + * @returns {Array} List of plans +*/ +export function getPlans() { + return plans.data || []; +} + +export function getPlan(planName) { + return getPlans().find(plan => plan.name === planName); +} diff --git a/dashboard/src2/data/session.js b/dashboard/src2/data/session.js new file mode 100644 index 0000000..1a49ab1 --- /dev/null +++ b/dashboard/src2/data/session.js @@ -0,0 +1,110 @@ +import { computed, reactive } from 'vue'; +import { createResource } from 'jingrow-ui'; +import { clear } from 'idb-keyval'; +import router from '../router'; + +export let session = reactive({ + login: createResource({ + url: 'login', + makeParams({ email, password }) { + return { + usr: email, + pwd: password + }; + } + }), + logout: createResource({ + url: 'logout', + async onSuccess() { + session.user = getSessionUser(); + await router.replace({ name: 'Login' }); + localStorage.removeItem('current_team'); + // On logout, reset posthog user identity and device id + if (window.posthog?.__loaded) { + posthog.reset(true); + } + + // clear all cache from the session + clear(); + + window.location.reload(); + } + }), + logoutWithoutReload: createResource({ + url: 'logout', + async onSuccess() { + session.user = getSessionUser(); + localStorage.removeItem('current_team'); + // On logout, reset posthog user identity and device id + if (window.posthog?.__loaded) { + posthog.reset(true); + } + + clear(); + } + }), + roles: createResource({ + url: 'jcloud.api.account.get_permission_roles', + cache: ['roles', localStorage.getItem('current_team')], + initialData: [] + }), + isTeamAdmin: computed( + () => + session.roles.data.length + ? session.roles.data.some(role => role.admin_access) + : false // if no roles, assume not admin and has member access + ), + hasBillingAccess: computed(() => + session.roles.data.length + ? session.roles.data.some(role => role.allow_billing) + : true + ), + hasWebhookConfigurationAccess: computed(() => + session.roles.data.length + ? session.roles.data.some(role => role.allow_webhook_configuration) + : true + ), + hasAppsAccess: computed(() => + session.roles.data.length + ? session.roles.data.some(role => role.allow_apps) + : true + ), + hasPartnerAccess: computed(() => + session.roles.data.length + ? session.roles.data.some(role => role.allow_partner) + : true + ), + hasSiteCreationAccess: computed(() => + session.roles.data.length + ? session.roles.data.some(role => role.allow_site_creation) + : true + ), + hasBenchCreationAccess: computed(() => + session.roles.data.length + ? session.roles.data.some(role => role.allow_bench_creation) + : true + ), + hasServerCreationAccess: computed(() => + session.roles.data.length + ? session.roles.data.some(role => role.allow_server_creation) + : true + ), + user: getSessionUser(), + isLoggedIn: computed(() => !!session.user), + isSystemUser: getSessionCookies().get('system_user') === 'yes' +}); + +export default session; + +export function getSessionUser() { + let cookies = getSessionCookies(); + let sessionUser = cookies.get('user_id'); + if (!sessionUser || sessionUser === 'Guest') { + sessionUser = null; + } + return sessionUser; +} + +function getSessionCookies() { + return new URLSearchParams(document.cookie.split('; ').join('&')); +} diff --git a/dashboard/src2/data/team.js b/dashboard/src2/data/team.js new file mode 100644 index 0000000..ca70e08 --- /dev/null +++ b/dashboard/src2/data/team.js @@ -0,0 +1,71 @@ +import { createDocumentResource, jingrowRequest } from 'jingrow-ui'; +import { clear } from 'idb-keyval'; + +let team; + +export function getTeam() { + if (!team) { + team = createDocumentResource({ + pagetype: 'Team', + name: getCurrentTeam(), + whitelistedMethods: { + getTeamMembers: 'get_team_members', + inviteTeamMember: 'invite_team_member', + removeTeamMember: 'remove_team_member' + } + }); + } + return team; +} + +function getCurrentTeam() { + if ( + document.cookie.includes('user_id=Guest') || + !document.cookie.includes('user_id') + ) { + return null; + } + let currentTeam = localStorage.getItem('current_team'); + if ( + !currentTeam || + (currentTeam !== window.default_team && + !window.valid_teams.map(t => t.name).includes(currentTeam) && + !window.is_system_user) + ) { + currentTeam = window.default_team; + if (currentTeam) localStorage.setItem('current_team', currentTeam); + } + return currentTeam; +} + +export async function switchToTeam(team) { + let canSwitch = false; + try { + canSwitch = await jingrowRequest({ + url: '/api/method/jcloud.api.account.can_switch_to_team', + params: { team } + }); + } catch (error) { + console.log(error); + canSwitch = false; + } + if (canSwitch) { + localStorage.setItem('current_team', team); + + // clear all cache from previous team session + clear(); + + window.location.reload(); + } +} + +export async function isLastSite(team) { + let count = 0; + count = await jingrowRequest({ + url: '/api/method/jcloud.api.account.get_site_count', + params: { team } + }); + return Boolean(count === 1); +} + +window.switchToTeam = switchToTeam; diff --git a/dashboard/src2/dialogs/ConfirmDialog.vue b/dashboard/src2/dialogs/ConfirmDialog.vue new file mode 100644 index 0000000..89eb442 --- /dev/null +++ b/dashboard/src2/dialogs/ConfirmDialog.vue @@ -0,0 +1,93 @@ + + diff --git a/dashboard/src2/dialogs/TagsDialog.vue b/dashboard/src2/dialogs/TagsDialog.vue new file mode 100644 index 0000000..ec18015 --- /dev/null +++ b/dashboard/src2/dialogs/TagsDialog.vue @@ -0,0 +1,84 @@ + + + diff --git a/dashboard/src2/globals.ts b/dashboard/src2/globals.ts new file mode 100644 index 0000000..0458054 --- /dev/null +++ b/dashboard/src2/globals.ts @@ -0,0 +1,27 @@ +import { toast } from 'vue-sonner'; +import { dayjsLocal } from './utils/dayjs'; +import { session } from './data/session'; +import theme from '../tailwind.theme.json'; +import { debounce } from 'jingrow-ui'; +import { getTeam } from './data/team'; +import * as formatters from './utils/format'; +import { getPlatform, isMobile } from './utils/device'; + +export default function globals(app) { + app.config.globalProperties.$session = session; + app.config.globalProperties.$team = session.user ? getTeam() : null; + app.config.globalProperties.$toast = toast; + app.config.globalProperties.$dayjs = dayjsLocal; + app.config.globalProperties.$theme = theme; + app.config.globalProperties.$platform = getPlatform(); + app.config.globalProperties.$format = formatters; + app.config.globalProperties.$log = console.log; + app.config.globalProperties.$debounce = debounce; + app.config.globalProperties.$isMobile = isMobile(); + + // legacy globals for old dashboard + // TODO: remove later + app.config.globalProperties.formatBytes = formatters.bytes; + app.config.globalProperties.$planTitle = formatters.planTitle; + app.config.globalProperties.$plural = formatters.plural; +} diff --git a/dashboard/src2/logo/AlipayLogo.vue b/dashboard/src2/logo/AlipayLogo.vue new file mode 100644 index 0000000..61113c5 --- /dev/null +++ b/dashboard/src2/logo/AlipayLogo.vue @@ -0,0 +1,45 @@ + + + + + \ No newline at end of file diff --git a/dashboard/src2/logo/PoweredByStripeLogo.vue b/dashboard/src2/logo/PoweredByStripeLogo.vue new file mode 100644 index 0000000..5c037cd --- /dev/null +++ b/dashboard/src2/logo/PoweredByStripeLogo.vue @@ -0,0 +1,27 @@ + diff --git a/dashboard/src2/logo/RazorpayLogo.vue b/dashboard/src2/logo/RazorpayLogo.vue new file mode 100644 index 0000000..8d2d20a --- /dev/null +++ b/dashboard/src2/logo/RazorpayLogo.vue @@ -0,0 +1,50 @@ + diff --git a/dashboard/src2/logo/StripeLogo.vue b/dashboard/src2/logo/StripeLogo.vue new file mode 100644 index 0000000..7258650 --- /dev/null +++ b/dashboard/src2/logo/StripeLogo.vue @@ -0,0 +1,48 @@ + diff --git a/dashboard/src2/logo/WeChatPayLogo.vue b/dashboard/src2/logo/WeChatPayLogo.vue new file mode 100644 index 0000000..59bb339 --- /dev/null +++ b/dashboard/src2/logo/WeChatPayLogo.vue @@ -0,0 +1,48 @@ + + + + + \ No newline at end of file diff --git a/dashboard/src2/main.js b/dashboard/src2/main.js new file mode 100644 index 0000000..ea69858 --- /dev/null +++ b/dashboard/src2/main.js @@ -0,0 +1,168 @@ +import { createApp } from 'vue'; +import { + setConfig, + jingrowRequest, + pageMetaPlugin, + resourcesPlugin, +} from 'jingrow-ui'; +import App from './App.vue'; +import router from './router'; +import { initSocket } from './socket'; +import { subscribeToJobUpdates } from './utils/agentJob'; +import { fetchPlans } from './data/plans.js'; +import * as Sentry from '@sentry/vue'; +import { session } from './data/session.js'; +import { unreadNotificationsCount } from './data/notifications.js'; +import './vendor/posthog.js'; + +const request = (options) => { + const _options = options || {}; + _options.headers = options.headers || {}; + const currentTeam = + localStorage.getItem('current_team') || window.default_team; + if (currentTeam) { + _options.headers['X-Jcloud-Team'] = currentTeam; + } + return jingrowRequest(_options); +}; +setConfig('resourceFetcher', request); +setConfig('defaultListUrl', 'jcloud.api.client.get_list'); +setConfig('defaultDocGetUrl', 'jcloud.api.client.get'); +setConfig('defaultDocInsertUrl', 'jcloud.api.client.insert'); +setConfig('defaultRunDocMethodUrl', 'jcloud.api.client.run_pg_method'); +setConfig('defaultDocUpdateUrl', 'jcloud.api.client.set_value'); +setConfig('defaultDocDeleteUrl', 'jcloud.api.client.delete'); + +let app; +let socket; + +getInitialData().then(() => { + app = createApp(App); + app.use(router); + app.use(resourcesPlugin); + app.use(pageMetaPlugin); + + socket = initSocket(); + app.config.globalProperties.$socket = socket; + window.$socket = socket; + subscribeToJobUpdates(socket); + if (session.isLoggedIn) { + fetchPlans(); + session.roles.fetch(); + unreadNotificationsCount.fetch(); + } + + if (window.jcloud_dashboard_sentry_dsn.includes('https://')) { + Sentry.init({ + app, + dsn: window.jcloud_dashboard_sentry_dsn, + integrations: [ + Sentry.browserTracingIntegration({ router }), + Sentry.replayIntegration({ + maskAllText: false, + blockAllMedia: false, + }), + Sentry.thirdPartyErrorFilterIntegration({ + // Specify the application keys that you specified in the Sentry bundler plugin + filterKeys: ['jcloud-dashboard'], + + // Defines how to handle errors that contain third party stack frames. + // Possible values are: + // - 'drop-error-if-contains-third-party-frames' + // - 'drop-error-if-exclusively-contains-third-party-frames' + // - 'apply-tag-if-contains-third-party-frames' + // - 'apply-tag-if-exclusively-contains-third-party-frames' + behaviour: 'apply-tag-if-contains-third-party-frames', + }), + ], + replaysSessionSampleRate: 0.1, + replaysOnErrorSampleRate: 1.0, + beforeSend(event, hint) { + const ignoreErrors = [ + /api\/method\/jcloud.api.client/, + /dynamically imported module/, + /NetworkError when attempting to fetch resource/, + /Failed to fetch/, + /Load failed/, + /jingrow is not defined/, + /Cannot read properties of undefined \(reading 'exc_type'\)/, + /Failed to execute 'transaction' on 'IDBDatabase': The database connection is closing/, + /Importing a module script failed./, + /o is undefined/, + /undefined is not an object \(evaluating 'o.exc_type'\)/, + /e is not defined/, + /Cannot set property ethereum of # which has only a getter/, + /Can't find variable: ResizeObserver/, + /Method not found/, + /Menu caption text is required/, + /Internal error opening backing store for indexedDB.open/, + ]; + const ignoreErrorTypes = [ + 'BuildValidationError', + 'ValidationError', + 'PermissionError', + 'SecurityException', + 'AAAARecordExists', + 'AuthenticationError', + 'RateLimitExceededError', + 'InsufficientSpaceOnServer', + 'ConflictingDNSRecord', + 'MultipleARecords', + ]; + const error = hint.originalException; + + if ( + error?.name === 'DashboardError' || + ignoreErrorTypes.includes(error?.exc_type) || + (error?.message && ignoreErrors.some((re) => re.test(error.message))) + ) { + return null; + } + + return event; + }, + logErrors: true, + }); + + Sentry.setTag('team', localStorage.getItem('current_team')); + } + + if ( + window.jcloud_frontend_posthog_project_id && + window.jcloud_frontend_posthog_host && + window.posthog + ) { + window.posthog.init(window.jcloud_frontend_posthog_project_id, { + api_host: window.jcloud_frontend_posthog_host, + person_profiles: 'identified_only', + autocapture: false, + disable_session_recording: true, + session_recording: { + maskAllInputs: true, + }, + }); + } else { + // unset posthog if not configured + window.posthog = undefined; + } + + importGlobals().then(() => { + app.mount('#app'); + }); +}); + +function getInitialData() { + if (import.meta.env.DEV) { + return jingrowRequest({ + url: '/api/method/jcloud.www.dashboard.get_context_for_dev', + }).then((values) => Object.assign(window, values)); + } else { + return Promise.resolve(); + } +} + +function importGlobals() { + return import('./globals.ts').then((globals) => { + app.use(globals.default); + }); +} diff --git a/dashboard/src2/objects/bench.ts b/dashboard/src2/objects/bench.ts new file mode 100644 index 0000000..0cd3003 --- /dev/null +++ b/dashboard/src2/objects/bench.ts @@ -0,0 +1,390 @@ +import Tooltip from 'jingrow-ui/src/components/Tooltip/Tooltip.vue'; +import LucideAppWindow from '~icons/lucide/app-window'; +import type { VNode } from 'vue'; +import { defineAsyncComponent, h } from 'vue'; +import { getTeam, switchToTeam } from '../data/team'; +import { icon } from '../utils/components'; +import { + clusterOptions, + getSitesTabColumns, + sitesTabRoute, + siteTabFilterControls +} from './common'; +import { getAppsTab } from './common/apps'; +import { getJobsTab } from './common/jobs'; +import type { + Breadcrumb, + BreadcrumbArgs, + ColumnField, + DashboardObject, + Detail, + FilterField, + List, + RouteDetail, + Row, + Tab +} from './common/types'; +import { getLogsTab } from './tabs/site/logs'; +import { getPatchesTab } from './common/patches'; + +export default { + pagetype: 'Bench', + whitelistedMethods: {}, + detail: getDetail(), + list: getList(), + routes: getRoutes() +} satisfies DashboardObject as DashboardObject; + +function getDetail() { + return { + titleField: 'name', + statusBadge: ({ documentResource: bench }) => ({ label: bench.pg.status }), + route: '/benches/:name', + tabs: getTabs(), + actions: ({ documentResource: res }) => { + const team = getTeam(); + return [ + { + label: '选项', + condition: () => team.pg?.is_desk_user ?? false, + options: [ + { + label: '在 Desk 中查看', + icon: icon('external-link'), + condition: () => team.pg?.is_desk_user, + onClick() { + window.open( + `${window.location.protocol}//${window.location.host}/app/bench/${res.name}`, + '_blank' + ); + } + }, + { + label: '模拟团队', + icon: defineAsyncComponent( + () => import('~icons/lucide/venetian-mask') + ), + condition: () => window.is_system_user ?? false, + onClick() { + switchToTeam(res.pg.team); + } + } + ] + } + ]; + } + // breadcrumbs // use default breadcrumbs + } satisfies Detail as Detail; +} + +function getTabs() { + return [ + getSitesTab(), + getAppsTab(false), + getJobsTab('Bench'), + getProcessesTab(), + getLogsTab(false), + getPatchesTab(true) + ] satisfies Tab[] as Tab[]; +} + +function getRoutes() { + return [ + { + name: 'Bench Job', + path: 'jobs/:id', + component: () => import('../pages/JobPage.vue') + }, + { + name: 'Bench Log', + path: 'logs/:logName', + component: () => import('../pages/LogPage.vue') + } + ] satisfies RouteDetail[] as RouteDetail[]; +} + +function getList() { + return { + route: '/benches', + title: '工作台', + fields: [ + 'group.title as group_title', + 'cluster.name as cluster_name', + 'cluster.image as cluster_image', + 'cluster.title as cluster_title' + ], + orderBy: 'creation desc', + searchField: 'name', + columns: [ + { + label: '工作台', + fieldname: 'name', + class: 'font-medium', + suffix: getBenchTitleSuffix + }, + { + label: '状态', + fieldname: 'status', + type: 'Badge', + width: '100px' + }, + { + label: '站点', + fieldname: 'site_count', + type: 'Number', + width: '100px', + align: 'right' + }, + { + label: '区域', + fieldname: 'cluster', + width: 0.75, + format: (value, row) => String(row.cluster_title || value || ''), + prefix: getClusterImagePrefix + }, + { label: '站点分组', fieldname: 'group_title', width: '350px' } + ], + filterControls + } satisfies List as List; +} + +function getBenchTitleSuffix(row: Row) { + const ch: VNode[] = []; + if (row.inplace_update_docker_image) ch.push(getInPlaceUpdatesSuffix(row)); + if (row.has_app_patch_applied) ch.push(getAppPatchSuffix(row)); + if (!ch.length) return; + + return h( + 'div', + { + class: 'flex flex-row gap-2' + }, + ch + ); +} +function getInPlaceUpdatesSuffix(row: Row) { + const count = Number( + String(row.inplace_update_docker_image).split('-').at(-1) + ); + + let title = '工作台已就地更新'; + if (!Number.isNaN(count) && count > 1) { + title += ` ${count} 次`; + } + + return h( + 'div', + { + title, + class: 'rounded-full bg-gray-100 p-1' + }, + h(icon('star', 'w-3 h-3')) + ); +} + +function getAppPatchSuffix(row: Row) { + return h( + 'div', + { + title: '此工作台中的应用可能已打补丁', + class: 'rounded-full bg-gray-100 p-1' + }, + h(icon('hash', 'w-3 h-3')) + ); +} + +function getClusterImagePrefix(row: Row) { + if (!row.cluster_image) return; + + return h('img', { + src: row.cluster_image, + class: 'w-4 h-4', + alt: row.cluster_title + }); +} + +function filterControls() { + return [ + { + type: 'select', + label: '状态', + fieldname: 'status', + options: [ + { label: '', value: '' }, + { label: '激活', value: 'Active' }, + { label: '待定', value: 'Pending' }, + { label: '安装中', value: 'Installing' }, + { label: '更新中', value: 'Updating' }, + { label: '损坏', value: 'Broken' }, + { label: '已归档', value: 'Archived' } + ] + }, + { + type: 'link', + label: '站点分组', + fieldname: 'group', + options: { + pagetype: 'Release Group' + } + }, + { + type: 'select', + label: '区域', + fieldname: 'cluster', + options: clusterOptions + } + ] satisfies FilterField[] as FilterField[]; +} + +export function getSitesTab() { + return { + label: '站点', + icon: icon(LucideAppWindow), + route: 'sites', + type: 'list', + list: { + pagetype: 'Site', + filters: r => ({ + group: r.pg.group, + bench: r.name, + skip_team_filter_for_system_user_and_support_agent: true + }), + fields: [ + 'name', + 'status', + 'host_name', + 'plan.plan_title as plan_title', + 'plan.price_usd as price_usd', + 'plan.price_cny as price_cny', + 'cluster.image as cluster_image', + 'cluster.title as cluster_title' + ], + orderBy: 'creation desc, bench desc', + pageLength: 99999, + columns: getSitesTabColumns(true), + filterControls: siteTabFilterControls, + route: sitesTabRoute, + primaryAction: r => { + return { + label: '新建站点', + slots: { + prefix: icon('plus', 'w-4 h-4') + }, + route: { + name: 'Release Group New Site', + params: { bench: r.documentResource.pg.group } + } + }; + }, + rowActions: ({ row }) => [ + { + label: '在桌面查看', + condition: () => getTeam()?.pg?.is_desk_user, + onClick() { + window.open( + `${window.location.protocol}//${window.location.host}/app/site/${row.name}`, + '_blank' + ); + } + } + ] + } + } satisfies Tab; +} + +export function getProcessesTab() { + const url = 'jcloud.api.bench.get_processes'; + return { + label: '进程', + icon: icon('cpu'), + route: 'processes', + type: 'list', + list: { + resource({ documentResource: res }) { + return { + params: { + name: res.name + }, + url, + auto: true, + cache: ['ObjectList', url, res.name] + }; + }, + columns: getProcessesColumns(), + rowActions: () => [] // TODO: 允许发出 supectl 命令 + } + } satisfies Tab as Tab; +} + +export function getProcessesColumns() { + const processStatusColorMap = { + Starting: 'blue', + Backoff: 'yellow', + Running: 'green', + Stopping: 'yellow', + Stopped: 'gray', + Exited: 'gray', + Unknown: 'gray', + Fatal: 'red' + }; + + type Status = keyof typeof processStatusColorMap; + return [ + { + label: '名称', + width: 2, + fieldname: 'name' + }, + { + label: '组', +width: 1.5, + fieldname: 'group', + format: v => String(v ?? '') + }, + { + label: '状态', + type: 'Badge', + width: 0.7, + fieldname: 'status', + theme: value => processStatusColorMap[value as Status] ?? 'gray', + suffix: ({ message }) => { + if (!message) { + return; + } + + return h( + Tooltip, + { + text: message, + placement: 'top' + }, + () => h(icon('alert-circle', 'w-3 h-3')) + ); + } + }, + { + label: '运行时间', + fieldname: 'uptime_string' + } + ] satisfies ColumnField[] as ColumnField[]; +} + +function breadcrumbs({ items, documentResource: bench }: BreadcrumbArgs) { + const $team = getTeam(); + const benchCrumb = { + label: bench.pg?.name, + route: `/benches/${bench.pg?.name}` + }; + + if (bench.pg.group_team == $team.pg?.name || $team.pg?.is_desk_user) { + return [ + { + label: bench.pg?.group_title, + route: `/groups/${bench.pg?.group}` + }, + benchCrumb + ] satisfies Breadcrumb[]; + } + + return [...items.slice(0, -1), benchCrumb] satisfies Breadcrumb[]; +} \ No newline at end of file diff --git a/dashboard/src2/objects/common/apps.ts b/dashboard/src2/objects/common/apps.ts new file mode 100644 index 0000000..3308307 --- /dev/null +++ b/dashboard/src2/objects/common/apps.ts @@ -0,0 +1,256 @@ +import { defineAsyncComponent, h } from 'vue'; +import { toast } from 'vue-sonner'; +import { getTeam } from '../../data/team'; +import router from '../../router'; +import { confirmDialog, icon, renderDialog } from '../../utils/components'; +import { planTitle } from '../../utils/format'; +import type { + ColumnField, + DialogConfig, + FilterField, + Tab, + TabList +} from './types'; +import { getUpsellBanner } from '.'; +import { isMobile } from '../../utils/device'; +import { getToastErrorMessage } from '../../utils/toast'; + +export function getAppsTab(forSite: boolean) { + return { + label: '应用', + icon: icon('grid'), + route: 'apps', + type: 'list', + condition: docResource => forSite && docResource.pg?.status !== 'Archived', + list: getAppsTabList(forSite) + } satisfies Tab as Tab; +} + +function getAppsTabList(forSite: boolean) { + const options = forSite ? siteAppListOptions : benchAppListOptions; + const list: TabList = { + pagetype: '', + filters: () => ({}), + ...options, + columns: getAppsTabColumns(forSite), + searchField: !forSite ? 'title' : undefined, + filterControls: r => { + if (forSite) return []; + else + return [ + { + type: 'select', + label: '分支', + class: !isMobile() ? 'w-24' : '', + fieldname: 'branch', + options: [ + '', + ...new Set(r.listResource.data?.map(i => String(i.branch)) || []) + ] + }, + { + type: 'select', + label: '所有者', + class: !isMobile() ? 'w-24' : '', + fieldname: 'repository_owner', + options: [ + '', + ...new Set( + r.listResource.data?.map( + i => String(i.repository_url).split('/').at(-2) || '' + ) || [] + ) + ] + } + ] satisfies FilterField[]; + } + }; + + return list; +} + +function getAppsTabColumns(forSite: boolean) { + const appTabColumns: ColumnField[] = [ + { + label: '应用', + fieldname: 'title', + width: 1, + suffix(row) { + if (!row.is_app_patched) { + return; + } + + return h( + 'div', + { + title: '应用已打补丁', + class: 'rounded-full bg-gray-100 p-1' + }, + h(icon('hash', 'w-3 h-3')) + ); + }, + format: (value, row) => value || row.app_title + }, + { + label: '计划', + width: 0.75, + class: 'text-gray-600 text-sm', + format(_, row) { + const planText = planTitle(row.plan_info); + if (planText) return `${planText}/月`; + else return '免费'; + } + }, + { + label: '仓库', + fieldname: 'repository_url', + format: value => String(value).split('/').slice(-2).join('/'), + link: value => String(value) + }, + { + label: '分支', + fieldname: 'branch', + type: 'Badge', + width: 1, + link: (value, row) => { + return `${row.repository_url}/tree/${value}`; + } + }, + { + label: '提交', + fieldname: 'hash', + type: 'Badge', + width: 1, + link: (value, row) => { + return `${row.repository_url}/commit/${value}`; + }, + format(value) { + return String(value).slice(0, 7); + } + }, + { + label: '提交信息', + fieldname: 'commit_message', + width: '30rem' + } + ]; + + if (forSite) return appTabColumns; + return appTabColumns.filter(c => c.label !== '计划'); +} + +const siteAppListOptions: Partial = { + pagetype: '站点应用', + filters: res => { + return { parenttype: 'Site', parent: res.pg?.name }; + }, + primaryAction({ listResource: apps, documentResource: site }) { + return { + label: '安装应用', + slots: { + prefix: icon('plus') + }, + onClick() { + const InstallAppDialog = defineAsyncComponent( + () => import('../../components/site/InstallAppDialog.vue') + ); + + renderDialog( + h(InstallAppDialog, { + site: site.name, + onInstalled() { + apps.reload(); + } + }) + ); + } + }; + }, + rowActions({ row, listResource: apps, documentResource: site }) { + let $team = getTeam(); + + return [ + { + label: '在 Desk 中查看', + condition: () => $team.pg?.is_desk_user, + onClick() { + window.open(`/app/app-source/${row.name}`, '_blank'); + } + }, + { + label: '更改计划', + condition: () => row.plan_info && row.plans.length > 1, + onClick() { + let SiteAppPlanChangeDialog = defineAsyncComponent( + () => import('../../components/site/SiteAppPlanSelectDialog.vue') + ); + renderDialog( + h(SiteAppPlanChangeDialog, { + app: row, + currentPlan: row.plans.find( + (plan: Record) => plan.name === row.plan_info.name + ), + onPlanChanged() { + apps.reload(); + } + }) + ); + } + }, + { + label: '卸载', + condition: () => row.app !== 'jingrow', + onClick() { + const dialogConfig: DialogConfig = { + title: `卸载应用`, + message: `您确定要从站点 ${site.pg?.name} 卸载应用 ${row.title} 吗?
+ 所有与此应用相关的文档类型和模块将被移除。`, + onSuccess({ hide }) { + if (site.uninstallApp.loading) return; + toast.promise( + site.uninstallApp.submit({ + app: row.app + }), + { + loading: '正在安排应用卸载...', + success: (jobId: string) => { + hide(); + router.push({ + name: 'Site Job', + params: { + name: site.name, + id: jobId + } + }); + return '应用卸载已安排'; + }, + error: (e: Error) => getToastErrorMessage(e) + } + ); + } + }; + confirmDialog(dialogConfig); + } + } + ]; + } +}; + +const benchAppListOptions: Partial = { + pagetype: 'Bench App', + filters: res => { + return { parenttype: 'Bench', parent: res.pg?.name }; + }, + rowActions({ row }) { + let $team = getTeam(); + return [ + { + label: '在 Desk 中查看', + condition: () => $team.pg?.is_desk_user, + onClick() { + window.open(`/app/app-release/${row.release}`, '_blank'); + } + } + ]; + } +}; \ No newline at end of file diff --git a/dashboard/src2/objects/common/index.ts b/dashboard/src2/objects/common/index.ts new file mode 100644 index 0000000..efbffb4 --- /dev/null +++ b/dashboard/src2/objects/common/index.ts @@ -0,0 +1,135 @@ +import { defineAsyncComponent, h } from 'vue'; +import { renderDialog } from '../../utils/components'; +import type { + BannerConfig, + ColumnField, + DocumentResource, + Route, + Row, +} from './types'; +import { trialDays } from '../../utils/site'; +import { planTitle } from '../../utils/format'; + +export const unreachable = Error('unreachable'); // used to indicate that a codepath is unreachable + +export const clusterOptions = [ + '', + '中国大陆', + '中国香港', + '美国-洛杉矶', + '新加坡', + '英国-伦敦', + '德国-法兰克福', + '阿联酋-迪拜', +]; + +export function getUpsellBanner(site: DocumentResource, title: string) { + if ( + !site.pg.current_plan || + site.pg.current_plan?.private_benches || + site.pg.current_plan?.is_trial_plan || + !site.pg.group_public + ) + return; + + return { + title: title, + dismissable: true, + id: site.name, + type: 'gray', + button: { + label: '升级计划', + variant: 'outline', + onClick() { + let SitePlansDialog = defineAsyncComponent( + () => import('../../components/ManageSitePlansDialog.vue') + ); + renderDialog(h(SitePlansDialog, { site: site.name })); + }, + }, + } satisfies BannerConfig as BannerConfig; +} + +export function getSitesTabColumns(forBenchTab: boolean) { + return [ + { + label: '站点', + fieldname: 'host_name', + format(value, row) { + return value || row.name; + }, + prefix: () => { + if (forBenchTab) return; + return h('div', { class: 'ml-2 w-3.5 h-3.5' }); + }, + }, + { + label: '状态', + fieldname: 'status', + type: 'Badge', + width: 0.5, + }, + { + label: '区域', + fieldname: 'cluster_title', + width: 0.5, + prefix(row) { + if (row.cluster_title) + return h('img', { + src: row.cluster_image, + class: 'w-4 h-4', + alt: row.cluster_title, + }); + }, + }, + { + label: '计划', + width: 0.5, + format(value, row) { + if (row.trial_end_date) { + return trialDays(row.trial_end_date); + } + return planTitle(row); + }, + }, + ] satisfies ColumnField[] as ColumnField[]; +} + +export function siteTabFilterControls() { + return [ + { + type: 'select', + label: '状态', + fieldname: 'status', + options: [ + { label: '', value: '' }, + { label: '激活', value: 'Active' }, + { label: '未激活', value: 'Inactive' }, + { label: '已暂停', value: 'Suspended' }, + { label: '已损坏', value: 'Broken' } + ], + }, + { + type: 'select', + label: '区域', + fieldname: 'cluster', + options: [ + '', + '中国大陆', + '中国香港', + '美国-洛杉矶', + '新加坡', + '英国-伦敦', + '德国-法兰克福', + '阿联酋-迪拜', + ], + }, + ]; +} + +export function sitesTabRoute(r: Row) { + return { + name: '站点详情', + params: { name: r.name }, + } satisfies Route as Route; +} \ No newline at end of file diff --git a/dashboard/src2/objects/common/jobs.ts b/dashboard/src2/objects/common/jobs.ts new file mode 100644 index 0000000..4e9153f --- /dev/null +++ b/dashboard/src2/objects/common/jobs.ts @@ -0,0 +1,125 @@ +import { unreachable } from '.'; +import { getTeam } from '../../data/team'; +import { icon } from '../../utils/components'; +import { isMobile } from '../../utils/device'; +import { duration } from '../../utils/format'; +import { ColumnField, Tab } from './types'; + +type JobDocTypes = 'Site' | 'Bench' | 'Server' | 'Release Group'; + +export function getJobsTab(pagetype: JobDocTypes) { + const jobRoute = getJobRoute(pagetype); + + return { + label: '任务', + icon: icon('truck'), + childrenRoutes: [jobRoute], + route: 'jobs', + type: 'list', + list: { + pagetype: 'Agent Job', + filters: res => { + if (pagetype === 'Site') return { site: res.name }; + else if (pagetype === 'Bench') return { bench: res.name }; + else if (pagetype === 'Server') return { server: res.name }; + else if (pagetype === 'Release Group') return { group: res.name }; + throw unreachable; + }, + route(row) { + return { + name: jobRoute, + params: { id: row.name } + }; + }, + orderBy: 'creation desc', + searchField: 'job_type', + fields: ['end', 'job_id'], + filterControls() { + return [ + { + type: 'select', + label: '状态', + fieldname: 'status', + class: !isMobile() ? 'w-24' : '', + options: ['', 'Pending', 'Running', 'Success', 'Failure'] + }, + { + type: 'link', + label: '类型', + fieldname: 'job_type', + options: { + pagetype: 'Agent Job Type', + orderBy: 'name asc', + pageLength: 100 + } + } + ]; + }, + rowActions: ({ row }) => [ + { + label: '在桌面查看', + condition: () => getTeam()?.pg?.is_desk_user, + onClick() { + window.open( + `${window.location.protocol}//${window.location.host}/app/agent-job/${row.name}`, + '_blank' + ); + } + } + ], + columns: getJobTabColumns(pagetype) + } + } satisfies Tab as Tab; +} + +function getJobRoute(pagetype: JobDocTypes) { + if (pagetype === 'Site') return 'Site Job'; + else if (pagetype === 'Bench') return 'Bench Job'; + else if (pagetype === 'Server') return 'Server Job'; + else if (pagetype === 'Release Group') return 'Release Group Job'; + throw unreachable; +} + +function getJobTabColumns(pagetype: JobDocTypes) { + const columns: ColumnField[] = [ + { + label: '任务类型', + fieldname: 'job_type', + class: 'font-medium' + }, + { + label: '状态', + fieldname: 'status', + type: 'Badge', + width: 0.5 + }, + { + label: '站点', + fieldname: 'site', + width: 1.2 + }, + { + label: '持续时间', + fieldname: 'duration', + width: 0.35, + format: (value, row) => { + if (row.job_id === 0 || !row.end) return; + return duration(value); + } + }, + { + label: '创建者', + fieldname: 'owner' + }, + { + label: '', + fieldname: 'creation', + type: 'Timestamp', + width: 0.75, + align: 'right' + } + ]; + + if (pagetype !== 'Site') return columns; + return columns.filter(c => c.fieldname !== 'site'); +} \ No newline at end of file diff --git a/dashboard/src2/objects/common/patches.ts b/dashboard/src2/objects/common/patches.ts new file mode 100644 index 0000000..fab7e56 --- /dev/null +++ b/dashboard/src2/objects/common/patches.ts @@ -0,0 +1,180 @@ +import { toast } from 'vue-sonner'; +import { getTeam } from '../../data/team'; +import { confirmDialog, icon, renderDialog } from '../../utils/components'; +import { h } from 'vue'; +import PatchAppDialog from '../../components/group/PatchAppDialog.vue'; +import { ColumnField, FilterField, Tab } from './types'; +import { isMobile } from '../../utils/device'; + +const statusTheme = { + Applied: 'green', + 'Not Applied': 'gray', + 'In Process': 'orange', + Failure: 'red' +}; + +type Status = keyof typeof statusTheme; + +export function getPatchesTab(forBench: boolean) { + return { + label: '补丁', + icon: icon('hash'), + route: 'patches', + type: 'list', + list: { + experimental: true, // If removing this, uncheck App Patch pagetype beta flag. + documentation: 'https://jingrow.com/docs/benches/app-patches', + pagetype: 'App Patch', + filters: res => ({ [forBench ? 'bench' : 'group']: res.name }), + searchField: 'filename', + filterControls: r => + [ + { + type: 'select', + label: '状态', + fieldname: 'status', + options: ['', 'Not Applied', 'In Process', 'Failed', 'Applied'] + }, + { + type: 'select', + label: '应用', + fieldname: 'app', + class: !isMobile() ? 'w-24' : '', + options: [ + '', + ...new Set(r.listResource.data?.map(i => String(i.app)) || []) + ] + } + ] satisfies FilterField[], + columns: getPatchesTabColumns(forBench), + primaryAction({ listResource: apps, documentResource: pg }) { + return { + label: '应用补丁', + slots: { + prefix: icon('plus') + }, + onClick() { + const group = pg.pagetype === 'Bench' ? pg.pg.group : pg.name; + + renderDialog(h(PatchAppDialog, { group: group, app: '' })); + } + }; + }, + rowActions({ row, listResource }) { + let team = getTeam(); + return [ + { + label: '在桌面查看', + condition: () => team.pg?.is_desk_user, + onClick() { + window.open( + `${window.location.protocol}//${window.location.host}/app/app-patch/${row.name}`, + '_blank' + ); + } + }, + { + label: '应用补丁', + condition: () => row.status !== 'In Process', + onClick: () => { + toast.promise( + listResource.runDocMethod.submit({ + method: 'apply_patch', + name: String(row.name) + }), + { + loading: '正在创建应用补丁的任务', + success: () => '补丁应用正在进行中', + error: () => '应用补丁失败' + } + ); + } + }, + { + label: '回滚补丁', + condition: () => row.status !== 'In Process', + onClick: () => { + toast.promise( + listResource.runDocMethod.submit({ + method: 'revert_patch', + name: String(row.name) + }), + { + loading: '正在创建回滚补丁的任务', + success: () => '补丁回滚正在进行中', + error: () => '回滚补丁失败' + } + ); + } + }, + { + label: '删除', + condition: () => row.status === 'Not Applied', + onClick: () => { + confirmDialog({ + title: '删除补丁', + message: '确定要删除此补丁吗?', + onSuccess: ({ hide }) => { + toast.promise( + listResource.delete.submit(row.name, { + onSuccess: () => hide() + }), + { + loading: '正在删除...', + success: () => '补丁已删除', + error: () => '删除补丁失败' + } + ); + } + }); + } + } + ]; + } + } + } satisfies Tab; +} + +function getPatchesTabColumns(forBench: boolean) { + const columns: ColumnField[] = [ + { + label: '文件名', + fieldname: 'filename', + width: forBench ? '400px' : '300px' + }, + { + label: '应用', + fieldname: 'app', + width: 0.4 + }, + { + label: '状态', + type: 'Badge', + fieldname: 'status', + theme: value => statusTheme[value as Status], + width: 0.4 + }, + { + label: '工作台', + fieldname: 'bench', + width: 0.8 + }, + { + label: '补丁链接', + fieldname: 'url', + width: forBench ? undefined : '300px', + format(value) { + if (!value) { + return '-'; + } + + const url = new URL(value); + return url.hostname + url.pathname; + }, + link: value => String(value) + } + ]; + + if (forBench) return columns.filter(f => f.fieldname !== 'bench'); + return columns; +} \ No newline at end of file diff --git a/dashboard/src2/objects/common/tags.js b/dashboard/src2/objects/common/tags.js new file mode 100644 index 0000000..8847fa6 --- /dev/null +++ b/dashboard/src2/objects/common/tags.js @@ -0,0 +1,83 @@ +import { h, defineAsyncComponent } from 'vue'; +import { confirmDialog, icon, renderDialog } from '../../utils/components'; +import { toast } from 'vue-sonner'; +import { getToastErrorMessage } from '../../utils/toast'; + +export function tagTab() { + return { + label: '标签', + icon: icon('tag'), + route: 'tags', + type: 'list', + list: { + pagetype: 'Resource Tag', + filters: documentResource => { + return { + parent: documentResource.name, + parenttype: documentResource.pagetype + }; + }, + orderBy: 'creation desc', + columns: [ + { + label: '标签', + fieldname: 'tag_name' + } + ], + primaryAction({ listResource: tags, documentResource }) { + return { + label: '添加标签', + slots: { + prefix: icon('plus') + }, + onClick() { + let AddTagDialog = defineAsyncComponent(() => + import('../../components/AddTagDialog.vue') + ); + renderDialog( + h(AddTagDialog, { + pagetype: documentResource.pagetype, + docname: documentResource.name, + onAdded() { + tags.reload(); + } + }) + ); + } + }; + }, + rowActions({ row, listResource: tags, documentResource }) { + return [ + { + label: '移除', + onClick() { + if (documentResource.removeTag.loading) return; + confirmDialog({ + title: '移除标签', + message: `确定要移除标签 ${row.tag_name} 吗?`, + onSuccess({ hide }) { + documentResource.removeTag.submit( + { + tag: row.tag_name + }, + { + onSuccess() { + tags.reload(); + hide(); + } + } + ); + toast.promise(documentResource.removeTag.promise, { + loading: '正在移除标签...', + success: () => `标签 ${row.tag_name} 已移除`, + error: e => getToastErrorMessage(e, '移除标签失败') + }); + } + }); + } + } + ]; + } + } + }; +} \ No newline at end of file diff --git a/dashboard/src2/objects/common/types.ts b/dashboard/src2/objects/common/types.ts new file mode 100644 index 0000000..613f5a1 --- /dev/null +++ b/dashboard/src2/objects/common/types.ts @@ -0,0 +1,216 @@ +import type { defineAsyncComponent, h, Component } from 'vue'; +import type { icon } from '../../utils/components'; + +type ListResource = { + data: Record[]; + reload: () => void; + runDocMethod: { + submit: (r: { method: string; [key: string]: any }) => Promise; + }; + delete: { + submit: (name: string, cb: { onSuccess: () => void }) => Promise; + }; +}; +export interface ResourceBase { + url: string; + auto: boolean; + cache: string[]; +} + +export interface ResourceWithParams extends ResourceBase { + params: Record; +} + +export interface ResourceWithMakeParams extends ResourceBase { + makeParams: () => Record; +} + +export type Resource = ResourceWithParams | ResourceWithMakeParams; + +export interface DocumentResource { + name: string; + pg: Record; + [key: string]: any; +} + +type Icon = ReturnType; +type AsyncComponent = ReturnType; + +export interface DashboardObject { + pagetype: string; + whitelistedMethods: Record; + list: List; + detail: Detail; + routes: RouteDetail[]; +} + +export interface Detail { + titleField: string; + statusBadge: StatusBadge; + breadcrumbs?: Breadcrumbs; + route: string; + tabs: Tab[]; + actions: (r: { documentResource: DocumentResource }) => Action[]; +} + +export interface List { + route: string; + title: string; + fields: string[]; // TODO: Incomplete + searchField: string; + columns: ColumnField[]; + orderBy: string; + filterControls: FilterControls; + primaryAction?: PrimaryAction; +} +type R = { + listResource: ListResource; + documentResource: DocumentResource; +}; +type FilterControls = (r: R) => FilterField[]; +type PrimaryAction = (r: R) => { + label: string; + variant?: string; + slots: { + prefix: Icon; + }; + onClick?: () => void; +}; +type StatusBadge = (r: { documentResource: DocumentResource }) => { + label: string; +}; +export type Breadcrumb = { label: string; route: string }; +export type BreadcrumbArgs = { + documentResource: DocumentResource; + items: Breadcrumb[]; +}; +export type Breadcrumbs = (r: BreadcrumbArgs) => Breadcrumb[]; + +export interface FilterField { + label: string; + fieldname: string; + type: string; + class?: string; + options?: + | { + pagetype: string; + filters?: { + pagetype_name?: string; + }; + } + | string[]; +} + +export interface ColumnField { + label: string; + fieldname?: string; + class?: string; + width?: string | number; + type?: string; + format?: (value: any, row: Row) => string | undefined; + link?: (value: unknown, row: Row) => string; + prefix?: (row: Row) => Component | undefined; + suffix?: (row: Row) => Component | undefined; + theme?: (value: unknown) => string; + align?: 'left' | 'right'; +} + +export type Row = Record; + +export interface Tab { + label: string; + icon: Icon; + route: string; + type: string; + condition?: (r: DocumentResource) => boolean; + childrenRoutes?: string[]; + component?: AsyncComponent; + props?: (r: DocumentResource) => Record; + list?: TabList; +} + +export interface TabList { + pagetype?: string; + orderBy?: string; + filters?: (r: DocumentResource) => Record; + route?: (row: Row) => Route; + pageLength?: number; + columns: ColumnField[]; + fields?: Record[] | string[]; + rowActions?: (r: { + row: Row; + listResource: ListResource; + documentResource: DocumentResource; + }) => Action[]; + primaryAction?: PrimaryAction; + filterControls?: FilterControls; + banner?: (r: { + documentResource: DocumentResource; + }) => BannerConfig | undefined; + searchField?: string; + experimental?: boolean; + documentation?: string; + resource?: (r: { documentResource: DocumentResource }) => Resource; +} + +interface Action { + label: string; + slots?: { + prefix?: Icon; + }; + theme?: string; + variant?: string; + onClick?: () => void; + condition?: () => boolean; + route?: Route; + options?: Option[]; +} + +export interface Route { + name: string; + params: Record; +} + +export interface RouteDetail { + name: string; + path: string; + component: Component; +} + +interface Option { + label: string; + icon: Icon | AsyncComponent; + condition: () => boolean; + onClick: () => void; +} + +export interface BannerConfig { + title: string; +} +dismissable: boolean; + id: string; + type?: string; + button?: { + label: string; + variant: string; + onClick?: () => void; + }; +} + +export interface DialogConfig { + title: string; + message: string; + primaryAction?: { onClick: () => void }; + onSuccess?: (o: { hide: () => void }) => void; +} + +export interface Process { + program: string; + name: string; + status: string; + uptime?: number; + uptime_string?: string; + message?: string; + group?: string; + pid?: number; +} \ No newline at end of file diff --git a/dashboard/src2/objects/generateRoutes.js b/dashboard/src2/objects/generateRoutes.js new file mode 100644 index 0000000..838d734 --- /dev/null +++ b/dashboard/src2/objects/generateRoutes.js @@ -0,0 +1,71 @@ +import objects from './index.js'; + +export default function generateRoutes() { + let routes = []; + for (let objectType in objects) { + let object = objects[objectType]; + if (object.list) { + let routeName = `${object.pagetype} List`; + object.list.routeName = routeName; + routes.push({ + name: routeName, + path: object.list.route, + component: () => import('../pages/ListPage.vue'), + props: route => { + return { objectType, ...route.params }; + } + }); + } + if (object.detail) { + let children = object.detail.tabs.map(tab => { + const routeName = `${object.pagetype} Detail ${tab.label}`; + tab.routeName = routeName; + const nestedChildren = []; + + // nested children shouldn't be added to the main children array + for (let route of tab.nestedChildrenRoutes || []) { + nestedChildren.push({ + ...route, + props: route => { + return { objectType, ...route.params }; + } + }); + } + + return { + name: routeName, + path: tab.route, + component: () => import('../pages/DetailTab.vue'), + props: route => { + return { ...route.params }; + }, + redirect: nestedChildren.length ? { name: tab.redirectTo } : null, + children: nestedChildren + }; + }); + if (object.routes) { + for (let route of object.routes) { + children.push({ + ...route, + props: route => { + return { objectType, ...route.params }; + } + }); + } + } + + object.detail.routeName = `${object.pagetype} Detail`; + routes.push({ + name: object.detail.routeName, + path: object.detail.route, + component: () => import('../pages/DetailPage.vue'), + props: route => { + return { objectType, ...route.params }; + }, + redirect: children.length ? { name: children[0].name } : null, + children + }); + } + } + return routes; +} diff --git a/dashboard/src2/objects/group.js b/dashboard/src2/objects/group.js new file mode 100644 index 0000000..e5582a1 --- /dev/null +++ b/dashboard/src2/objects/group.js @@ -0,0 +1,1039 @@ +import { LoadingIndicator, Tooltip } from 'jingrow-ui'; +import { defineAsyncComponent, h } from 'vue'; +import { toast } from 'vue-sonner'; +import LucideAppWindow from '~icons/lucide/app-window'; +import LucideHardDriveDownload from '~icons/lucide/hard-drive-download'; +import LucideRocket from '~icons/lucide/rocket'; +import AddAppDialog from '../components/group/AddAppDialog.vue'; +import ChangeAppBranchDialog from '../components/group/ChangeAppBranchDialog.vue'; +import PatchAppDialog from '../components/group/PatchAppDialog.vue'; +import { getTeam, switchToTeam } from '../data/team'; +import router from '../router'; +import { confirmDialog, icon, renderDialog } from '../utils/components'; +import { getToastErrorMessage } from '../utils/toast'; +import { date, duration } from '../utils/format'; +import { getJobsTab } from './common/jobs'; +import { getPatchesTab } from './common/patches'; +import { tagTab } from './common/tags'; + +export default { + pagetype: 'Release Group', + whitelistedMethods: { + addApp: 'add_app', + removeApp: 'remove_app', + changeAppBranch: 'change_app_branch', + fetchLatestAppUpdates: 'fetch_latest_app_update', + deleteConfig: 'delete_config', + updateConfig: 'update_config', + updateEnvironmentVariable: 'update_environment_variable', + deleteEnvironmentVariable: 'delete_environment_variable', + updateDependency: 'update_dependency', + addRegion: 'add_region', + deployedVersions: 'deployed_versions', + getAppVersions: 'get_app_versions', + getCertificate: 'get_certificate', + generateCertificate: 'generate_certificate', + sendTransferRequest: 'send_change_team_request', + addTag: 'add_resource_tag', + removeTag: 'remove_resource_tag', + redeploy: 'redeploy', + initialDeploy: 'initial_deploy' + }, + list: { + route: '/groups', + title: '站点分组', + fields: [{ apps: ['app'] }], + searchField: 'title', + filterControls() { + return [ + { + type: 'link', + label: '版本', + fieldname: 'version', + options: { + pagetype: 'Jingrow Version' + } + }, + { + type: 'link', + label: '标签', + fieldname: 'tags.tag', + options: { + pagetype: 'Jcloud Tag', + filters: { + pagetype_name: 'Release Group' + } + } + } + ]; + }, + columns: [ + { label: '标题', fieldname: 'title', class: 'font-medium' }, + { + label: '状态', + fieldname: 'active_benches', + type: 'Badge', + width: 0.5, + format: (value, row) => { + if (!value) return '等待部署'; + else return '激活'; + } + }, + { + label: '版本', + fieldname: 'version', + width: 0.5 + }, + { + label: '应用', + fieldname: 'app', + format: (value, row) => { + return (row.apps || []).map(d => d.app).join(', '); + }, + width: '25rem' + }, + { + label: '站点', + fieldname: 'site_count', + class: 'text-gray-600', + width: 0.25 + } + ], + primaryAction() { + return { + label: '新建站点分组', + variant: 'solid', + slots: { + prefix: icon('plus') + }, + onClick() { + router.push({ name: 'New Release Group' }); + } + }; + } + }, + detail: { + titleField: 'title', + statusBadge({ documentResource: releaseGroup }) { + return { label: releaseGroup.pg.status }; + }, + breadcrumbs({ items, documentResource: releaseGroup }) { + if (!releaseGroup.pg.server_team) return items; + + let breadcrumbs = []; + let $team = getTeam(); + + if ( + releaseGroup.pg.server_team == $team.pg?.name || + $team.pg?.is_desk_user + ) { + breadcrumbs.push( + { + label: releaseGroup.pg?.server_title || releaseGroup.pg?.server, + route: `/servers/${releaseGroup.pg?.server}` + }, + items[1] + ); + } else { + breadcrumbs.push(...items); + } + return breadcrumbs; + }, + route: '/groups/:name', + tabs: [ + { + label: '站点', + icon: icon(LucideAppWindow), + route: 'sites', + type: 'Component', + component: defineAsyncComponent(() => + import('../pages/ReleaseGroupBenchSites.vue') + ), + props: releaseGroup => { + return { releaseGroup: releaseGroup.pg.name }; + } + }, + { + label: '应用', + icon: icon('grid'), + route: 'apps', + type: 'list', + list: { + pagetype: 'Release Group App', + filters: releaseGroup => { + return { + parenttype: 'Release Group', + parent: releaseGroup.pg.name + }; + }, + pageLength: 99999, + columns: [ + { + label: '应用', + fieldname: 'title', + width: 1 + }, + { + label: '仓库', + width: 1, + format(value, row) { + return `${row.repository_owner}/${row.repository}`; + }, + link(value, row) { + return row.repository_url; + } + }, + { + label: '分支', + fieldname: 'branch', + type: 'Badge', + width: 0.5, + link(value, row) { + return `${row.repository_url}/tree/${value}`; + } + }, + { + label: '版本', + type: 'Badge', + fieldname: 'tag', + width: 0.5, + format(value, row) { + return value || row.hash?.slice(0, 7); + } + }, + { + label: '状态', + type: 'Badge', + suffix(row) { + if (!row.last_github_poll_failed) return; + + return h( + Tooltip, + { + text: "这是什么?", + placement: 'top', + class: 'rounded-full bg-gray-100 p-1' + }, + () => [ + h( + 'a', + { + href: 'https://jingrow.com/docs/faq/app-installation-issue', + target: '_blank' + }, + [h(icon('help-circle', 'w-3 h-3'), {})] + ) + ] + ); + }, + format(value, row) { + let { update_available, deployed, last_github_poll_failed } = + row; + + return last_github_poll_failed + ? '需要操作' + : !deployed + ? '未部署' + : update_available + ? '有可用更新' + : '最新版本'; + }, + width: 0.5 + } + ], + rowActions({ + row, + listResource: apps, + documentResource: releaseGroup + }) { + let team = getTeam(); + return [ + { + label: '在桌面查看', + condition: () => team.pg?.is_desk_user, + onClick() { + window.open( + `${window.location.protocol}//${window.location.host}/app/app/${row.name}`, + '_blank' + ); + } + }, + { + label: '获取最新更新', + onClick() { + toast.promise( + releaseGroup.fetchLatestAppUpdates.submit({ + app: row.name + }), + { + loading: `正在为 ${row.title} 获取最新更新...`, + success: () => { + apps.reload(); + return `已为 ${row.title} 获取最新更新`; + }, + error: e => getToastErrorMessage(e) + } + ); + } + }, + { + label: '更改分支', + onClick() { + renderDialog( + h(ChangeAppBranchDialog, { + bench: releaseGroup.name, + app: row, + onBranchChange() { + apps.reload(); + } + }) + ); + } + }, + { + label: '移除应用', + condition: () => row.name !== 'jingrow', + onClick() { + if (releaseGroup.removeApp.loading) return; + confirmDialog({ + title: '移除应用', + message: `确定要移除应用 ${row.title} 吗?`, + onSuccess: ({ hide }) => { + toast.promise( + releaseGroup.removeApp.submit({ + app: row.name + }), + { + loading: '正在移除应用...', + success: () => { + hide(); + apps.reload(); + return '应用已移除'; + }, + error: e => getToastErrorMessage(e) + } + ); + } + }); + } + }, + { + label: '访问仓库', + onClick() { + window.open( + `${row.repository_url}/tree/${row.branch}`, + '_blank' + ); + } + }, + { + label: '应用补丁', + onClick: () => { + renderDialog( + h(PatchAppDialog, { + group: releaseGroup.name, + app: row.name + }) + ); + } + } + ]; + }, + primaryAction({ + listResource: apps, + documentResource: releaseGroup + }) { + return { + label: '添加应用', + slots: { + prefix: icon('plus') + }, + onClick() { + renderDialog( + h(AddAppDialog, { + group: releaseGroup.pg, + onAppAdd() { + apps.reload(); + releaseGroup.reload(); + }, + onNewApp(app, isUpdate) { + const loading = isUpdate + ? '替换应用中...' + : '添加应用中...'; + + toast.promise( + releaseGroup.addApp.submit({ + app, + is_update: isUpdate + }), + { + loading, + success: () => { + apps.reload(); + releaseGroup.reload(); + + if (isUpdate) { + return `应用 ${app.title} 已更新`; + } + + return `应用 ${app.title} 已添加`; + }, + error: e => getToastErrorMessage(e) + } + ); + } + }) + ); + } + }; + } + } + }, + { + label: '部署', + route: 'deploys', + icon: icon('package'), + childrenRoutes: ['Deploy Candidate'], + type: 'list', + list: { + pagetype: 'Deploy Candidate', + route: row => ({ + name: 'Deploy Candidate', + params: { id: row.name } + }), + filters: releaseGroup => { + return { + group: releaseGroup.name + }; + }, + orderBy: 'creation desc', + fields: [{ apps: ['app'] }], + filterControls() { + return [ + { + type: 'select', + label: '状态', + fieldname: 'status', + options: [ + '', + 'Draft', + 'Scheduled', + 'Pending', + 'Preparing', + 'Running', + 'Success', + 'Failure' + ] + } + ]; + }, + banner({ documentResource: releaseGroup }) { + if (releaseGroup.pg.are_builds_suspended) { + return { + title: + '构建已暂停:更新将在构建恢复后计划运行。', + type: 'warning' + }; + } else { + return null; + } + }, + columns: [ + { + label: '部署', + fieldname: 'creation', + format(value) { + return `部署于 ${date(value, 'llll')}`; + }, + width: '20rem' + }, + { + label: '状态', + fieldname: 'status', + type: 'Badge', + width: 0.5, + suffix(row) { + if (!row.addressable_notification) { + return; + } + + return h( + Tooltip, + { + text: '需要关注!', + placement: 'top', + class: 'rounded-full bg-gray-100 p-1' + }, + () => h(icon('alert-circle', 'w-3 h-3'), {}) + ); + } + }, + { + label: '应用', + format(value, row) { + return (row.apps || []).map(d => d.app).join(', '); + }, + width: '20rem' + }, + { + label: '持续时间', + fieldname: 'build_duration', + format: duration, + class: 'text-gray-600', + width: 1 + }, + { + label: '部署者', + fieldname: 'owner', + width: 1 + } + ], + primaryAction({ listResource: deploys, documentResource: group }) { + return { + label: '部署', + slots: { + prefix: icon(LucideRocket) + }, + onClick() { + if (group.pg.deploy_information.deploy_in_progress) { + return toast.error( + '部署正在进行中。请等待其完成。' + ); + } else if (group.pg.deploy_information.update_available) { + let UpdateReleaseGroupDialog = defineAsyncComponent(() => + import('../components/group/UpdateReleaseGroupDialog.vue') + ); + renderDialog( + h(UpdateReleaseGroupDialog, { + bench: group.name, + onSuccess(candidate) { + group.pg.deploy_information.deploy_in_progress = true; + if (candidate) { + group.pg.deploy_information.last_deploy.name = + candidate; + } + } + }) + ); + } else { + confirmDialog({ + title: '无需应用更新即可部署?', + message: + '未检测到应用更新。部署时将应用依赖项和环境变量的更改。', + onSuccess: ({ hide }) => { + toast.promise(group.redeploy.submit(), { + loading: '正在部署...', + success: () => { + hide(); + deploys.reload(); + return '更改已部署'; + }, + error: e => getToastErrorMessage(e) + }); + } + }); + } + } + }; + } + } + }, + getJobsTab('Release Group'), + { + label: '配置', + icon: icon('settings'), + route: 'bench-config', + type: 'list', + list: { + pagetype: 'Common Site Config', + filters: releaseGroup => { + return { + parenttype: 'Release Group', + parent: releaseGroup.name + }; + }, + orderBy: 'creation desc', + fields: ['name'], + pageLength: 999, + columns: [ + { + label: '配置名称', + fieldname: 'key', + format(value, row) { + if (row.title) { + return `${row.title} (${row.key})`; + } + return row.key; + } + }, + { + label: '配置值', + fieldname: 'value', + class: 'font-mono' + }, + { + label: '类型', + fieldname: 'type', + type: 'Badge', + width: '100px' + } + ], + primaryAction({ + listResource: configs, + documentResource: releaseGroup + }) { + return { + label: '添加配置', + slots: { + prefix: icon('plus') + }, + onClick() { + let ConfigEditorDialog = defineAsyncComponent(() => + import('../components/ConfigEditorDialog.vue') + ); + renderDialog( + h(ConfigEditorDialog, { + group: releaseGroup.pg.name, + onSuccess() { + configs.reload(); + } + }) + ); + } + }; + }, + secondaryAction({ listResource: configs }) { + return { + label: '预览', + slots: { + prefix: icon('eye') + }, + onClick() { + let ConfigPreviewDialog = defineAsyncComponent(() => + import('../components/ConfigPreviewDialog.vue') + ); + renderDialog( + h(ConfigPreviewDialog, { + configs: configs.data + }) + ); + } + }; + }, + rowActions({ + row, + listResource: configs, + documentResource: releaseGroup + }) { + return [ + { + label: '编辑', + onClick() { + let ConfigEditorDialog = defineAsyncComponent(() => + import('../components/ConfigEditorDialog.vue') + ); + renderDialog( + h(ConfigEditorDialog, { + group: releaseGroup.pg.name, + config: row, + onSuccess() { + configs.reload(); + } + }) + ); + } + }, + { + label: '删除', + onClick() { + confirmDialog({ + title: '删除配置', + message: `确定要删除配置 ${row.key} 吗?`, + onSuccess({ hide }) { + if (releaseGroup.deleteConfig.loading) return; + toast.promise( + releaseGroup.deleteConfig.submit( + { key: row.key }, + { + onSuccess: () => { + configs.reload(); + hide(); + } + } + ), + { + loading: '正在删除配置...', + success: () => `配置 ${row.key} 已删除`, + error: e => getToastErrorMessage(e) + } + ); + } + }); + } + } + ]; + } + } + }, + { + label: '操作', + icon: icon('sliders'), + route: 'actions', + type: 'Component', + component: defineAsyncComponent(() => + import('../components/group/ReleaseGroupActions.vue') + ), + props: releaseGroup => { + return { releaseGroup: releaseGroup.name }; + } + }, + { + label: '区域', + icon: icon('globe'), + route: 'regions', + type: 'list', + list: { + pagetype: 'Cluster', + filters: releaseGroup => { + return { group: releaseGroup.name }; + }, + columns: [ + { + label: '区域', + fieldname: 'title' + }, + { + label: '国家', + fieldname: 'image', + format(value, row) { + return ''; + }, + prefix(row) { + return h('img', { + src: row.image, + class: 'w-4 h-4', + alt: row.title + }); + } + } + ], + primaryAction({ + listResource: clusters, + documentResource: releaseGroup + }) { + return { + label: '添加区域', + slots: { + prefix: icon('plus') + }, + onClick() { + let AddRegionDialog = defineAsyncComponent(() => + import('../components/group/AddRegionDialog.vue') + ); + renderDialog( + h(AddRegionDialog, { + group: releaseGroup.pg.name, + onSuccess() { + clusters.reload(); + } + }) + ); + } + }; + } + } + }, + getPatchesTab(false), + { + label: '依赖项', + icon: icon('box'), + route: 'bench-dependencies', + type: 'list', + list: { + pagetype: 'Release Group Dependency', + filters: releaseGroup => { + return { + parenttype: 'Release Group', + parent: releaseGroup.name + }; + }, + columns: [ + { + label: '依赖项', + fieldname: 'dependency', + format(value, row) { + return row.title; + } + }, + { + label: '版本', + fieldname: 'version', + suffix(row) { + if (!row.is_custom) { + return; + } + + return h( + Tooltip, + { + text: '自定义版本', + placement: 'top', + class: 'rounded-full bg-gray-100 p-1' + }, + () => h(icon('alert-circle', 'w-3 h-3'), {}) + ); + } + } + ], + rowActions({ + row, + listResource: dependencies, + documentResource: releaseGroup + }) { + return [ + { + label: '编辑', + onClick() { + let DependencyEditorDialog = defineAsyncComponent(() => + import('../components/group/DependencyEditorDialog.vue') + ); + renderDialog( + h(DependencyEditorDialog, { + group: releaseGroup.pg, + dependency: row, + onSuccess() { + dependencies.reload(); + } + }) + ); + } + } + ]; + } + } + }, + { + label: '环境', + icon: icon('tool'), + route: 'bench-environment-variable', + type: 'list', + list: { + pagetype: 'Release Group Variable', + filters: releaseGroup => { + return { + parenttype: 'Release Group', + parent: releaseGroup.name + }; + }, + orderBy: 'creation desc', + fields: ['name'], + columns: [ + { + label: '环境变量名称', + fieldname: 'key' + }, + { + label: '环境变量值', + fieldname: 'value' + } + ], + primaryAction({ + listResource: environmentVariables, + documentResource: releaseGroup + }) { + return { + label: '添加环境变量', + slots: { + prefix: icon('plus') + }, + onClick() { + let EnvironmentVariableEditorDialog = defineAsyncComponent(() => + import('../components/EnvironmentVariableEditorDialog.vue') + ); + renderDialog( + h(EnvironmentVariableEditorDialog, { + group: releaseGroup.pg.name, + onSuccess() { + environmentVariables.reload(); + } + }) + ); + } + }; + }, + rowActions({ + row, + listResource: environmentVariables, + documentResource: releaseGroup + }) { + return [ + { + label: '编辑', + onClick() { + let ConfigEditorDialog = defineAsyncComponent(() => + import('../components/EnvironmentVariableEditorDialog.vue') + ); + renderDialog( + h(ConfigEditorDialog, { + group: releaseGroup.pg.name, + environment_variable: row, + onSuccess() { + environmentVariables.reload(); + } + }) + ); + } + }, + { + label: '删除', + onClick() { + confirmDialog({ + title: '删除环境变量', + message: `确定要删除环境变量 ${row.key} 吗?`, + onSuccess({ hide }) { + if (releaseGroup.deleteEnvironmentVariable.loading) + return; + toast.promise( + releaseGroup.deleteEnvironmentVariable.submit( + { key: row.key }, + { + onSuccess: () => { + environmentVariables.reload(); + hide(); + } + } + ), + { + loading: '正在删除环境变量...', + success: () => + `环境变量 ${row.key} 已删除`, + error: e => getToastErrorMessage(e) + } + ); + } + }); + } + } + ]; + } + } + }, + tagTab() + ], + actions(context) { + let { documentResource: group } = context; + let team = getTeam(); + + return [ + { + label: '模拟组所有者', + title: '模拟组所有者', + slots: { + icon: defineAsyncComponent(() => + import('~icons/lucide/venetian-mask') + ) + }, + condition: () => + team.pg?.is_desk_user && group.pg.team !== team.name, + onClick() { + switchToTeam(group.pg.team); + } + }, + { + label: group.pg?.deploy_information?.last_deploy + ? '有可用更新' + : '立即部署', + slots: { + prefix: group.pg?.deploy_information?.last_deploy + ? icon(LucideHardDriveDownload) + : icon(LucideRocket) + }, + variant: 'solid', + condition: () => + !group.pg.deploy_information.deploy_in_progress && + group.pg.deploy_information.update_available && + ['Awaiting Deploy', 'Active'].includes(group.pg.status), + onClick() { + if (group.pg?.deploy_information?.last_deploy) { + let UpdateReleaseGroupDialog = defineAsyncComponent(() => + import('../components/group/UpdateReleaseGroupDialog.vue') + ); + renderDialog( + h(UpdateReleaseGroupDialog, { + bench: group.name, + onSuccess(candidate) { + group.pg.deploy_information.deploy_in_progress = true; + if (candidate) { + group.pg.deploy_information.last_deploy.name = candidate; + } + } + }) + ); + } else { + confirmDialog({ + title: '部署', + message: "立即部署吗?", + onSuccess({ hide }) { + toast.promise( + group.initialDeploy.submit(null, { + onSuccess: () => { + group.reload(); + hide(); + } + }), + { + success: '部署计划成功', + error: '部署计划失败', + loading: '正在计划部署...' + } + ); + } + }); + } + } + }, + { + label: '部署进行中', + slots: { + prefix: () => h(LoadingIndicator, { class: 'w-4 h-4' }) + }, + theme: 'green', + condition: () => group.pg.deploy_information.deploy_in_progress, + route: { + name: 'Deploy Candidate', + params: { id: group.pg?.deploy_information?.last_deploy?.name } + } + }, + { + label: '选项', + condition: () => team.pg?.is_desk_user, + options: [ + { + label: '在 Desk 中查看', + icon: icon('external-link'), + condition: () => team.pg?.is_desk_user, + onClick() { + window.open( + `${window.location.protocol}//${window.location.host}/app/release-group/${group.name}`, + '_blank' + ); + } + } + ] + } + ]; + } + }, + routes: [ + { + name: 'Deploy Candidate', + path: 'deploys/:id', + component: () => import('../pages/DeployCandidate.vue') + }, + { + name: 'Release Group Job', + path: 'jobs/:id', + component: () => import('../pages/JobPage.vue') + } + ] +}; \ No newline at end of file diff --git a/dashboard/src2/objects/index.js b/dashboard/src2/objects/index.js new file mode 100644 index 0000000..06e60c6 --- /dev/null +++ b/dashboard/src2/objects/index.js @@ -0,0 +1,21 @@ +import site from './site'; +import group from './group'; +import bench from './bench'; +import marketplace from './marketplace'; +import server from './server'; +import notification from './notification'; + +let objects = { + Site: site, + Group: group, + Bench: bench, + Marketplace: marketplace, + Server: server, + Notification: notification +}; + +export function getObject(name) { + return objects[name]; +} + +export default objects; \ No newline at end of file diff --git a/dashboard/src2/objects/marketplace.js b/dashboard/src2/objects/marketplace.js new file mode 100644 index 0000000..f47f314 --- /dev/null +++ b/dashboard/src2/objects/marketplace.js @@ -0,0 +1,658 @@ +import { defineAsyncComponent, h } from 'vue'; +import { Button, Badge } from 'jingrow-ui'; +import { toast } from 'vue-sonner'; +import ChangeAppBranchDialog from '../components/marketplace/ChangeAppBranchDialog.vue'; +import { confirmDialog, icon, renderDialog } from '../utils/components'; +import PlansDialog from '../components/marketplace/PlansDialog.vue'; +import CodeReview from '../components/marketplace/CodeReview.vue'; +import GenericDialog from '../components/GenericDialog.vue'; +import ObjectList from '../components/ObjectList.vue'; +import { userCurrency, currency } from '../utils/format'; +import { getToastErrorMessage } from '../utils/toast'; +import { isMobile } from '../utils/device'; +import router from '../router'; + +export default { + pagetype: 'Marketplace App', + whitelistedMethods: { + removeVersion: 'remove_version', + addVersion: 'add_version', + siteInstalls: 'site_installs', + createApprovalRequest: 'create_approval_request', + cancelApprovalRequest: 'cancel_approval_request', + updateListing: 'update_listing', + markAppReadyForReview: 'mark_app_ready_for_review' + }, + list: { + route: '/apps', + title: '应用市场', + fields: ['image', 'title', 'status', 'description'], + columns: [ + { + label: '应用', + fieldname: 'title', + class: 'font-medium', + width: 0.3, + prefix(row) { + return row.image + ? h('img', { + src: row.image, + class: 'w-6 h-6 rounded-sm', + alt: row.title + }) + : h( + 'div', + { + class: + 'w-6 h-6 rounded bg-gray-300 text-gray-600 flex items-center justify-center' + }, + row.title[0].toUpperCase() + ); + } + }, + { + label: '状态', + type: 'Badge', + fieldname: 'status', + width: 0.3 + }, + { + label: '描述', + fieldname: 'description', + width: 1.0 + } + ], + primaryAction() { + return { + label: '新建应用', + variant: 'solid', + slots: { + prefix: icon('plus') + }, + onClick() { + const NewMarketplaceAppDialog = defineAsyncComponent(() => + import('../components/marketplace/NewMarketplaceAppDialog.vue') + ); + + renderDialog(h(NewMarketplaceAppDialog)); + } + }; + } + }, + detail: { + titleField: 'name', + route: '/apps/:name', + statusBadge({ documentResource: app }) { + return { label: app.pg.status }; + }, + breadcrumbs({ items, documentResource: app }) { + return [ + items[0], + { + label: app.pg.title, + route: `/apps/${app.pg.name}` + } + ]; + }, + tabs: [ + { + label: '分析', + icon: icon('bar-chart-2'), + route: 'analytics', + type: 'Component', + component: defineAsyncComponent(() => + import('../components/marketplace/MarketplaceAppAnalytics.vue') + ), + props: app => { + return { app: app.pg.name }; + } + }, + { + label: '列表', + icon: icon('shopping-cart'), + route: 'listing', + type: 'Component', + component: defineAsyncComponent(() => + import('../components/MarketplaceAppListing.vue') + ), + props: app => { + return { app: app }; + } + }, + { + label: '版本', + icon: icon('package'), + route: 'versions', + type: 'list', + list: { + pagetype: 'Marketplace App Version', + filters: app => { + return { parent: app.pg.name, parenttype: 'Marketplace App' }; + }, + onRowClick: (row, context) => { + const { listResource: versions, documentResource: app } = context; + showReleases(row, app); + }, + fields: [ + 'source.repository_owner as repository_owner', + 'source.repository as repository', + 'source.branch as branch' + ], + columns: [ + { + label: '版本', + fieldname: 'version', + width: 0.5 + }, + { + label: '来源', + fieldname: 'source', + width: 0.5 + }, + { + label: '仓库', + width: 0.5, + format: (value, row) => { + return `${row.repository_owner}/${row.repository}`; + } + }, + { + label: '分支', + fieldname: 'branch', + type: 'Badge', + width: 0.5 + } + ], + primaryAction({ listResource: versions, documentResource: app }) { + return { + label: '新建版本', + slots: { + prefix: icon('plus') + }, + onClick() { + renderDialog( + h( + GenericDialog, + { + options: { + title: `为 ${app.pg.title} 添加版本支持`, + size: '4xl' + } + }, + { + default: () => + h(ObjectList, { + options: { + label: '版本', + fieldname: 'version', + fieldtype: 'ListSelection', + columns: [ + { + label: '版本', + fieldname: 'version' + }, + { + label: '分支', + type: 'Select', + fieldname: 'branch', + format: (value, row) => { + row.selectedOption = value[0]; + return value.map(v => ({ + label: v, + value: v, + onClick: () => { + row.selectedOption = v; + } + })); + } + }, + { + label: '', + fieldname: '', + align: 'right', + type: 'Button', + width: '5rem', + Button({ row, listResource: versionsOptions }) { + return { + label: '添加', + onClick() { + if (app.addVersion.loading) return; + toast.promise( + app.addVersion.submit({ + version: row.version, + branch: row.selectedOption + }), + { + loading: '正在添加新版本...', + success: () => { + versions.reload(); + versionsOptions.reload(); + return '新版本已添加'; + }, + error: e => getToastErrorMessage(e) + } + ); + } + }; + } + } + ], + resource() { + return { + url: 'jcloud.api.marketplace.options_for_version', + params: { + name: app.pg.name + }, + auto: true + }; + } + } + }) + } + ) + ); + } + }; + }, + rowActions({ row, listResource: versions, documentResource: app }) { + return [ + { + label: '显示发布', + slots: { + prefix: icon('plus') + }, + onClick() { + showReleases(row, app); + } + }, + { + label: '更改分支', + onClick() { + renderDialog( + h(ChangeAppBranchDialog, { + app: app.pg.name, + source: row.source, + version: row.version, + activeBranch: row.branch, + onBranchChanged() { + versions.reload(); + } + }) + ); + } + }, + { + label: '移除版本', + onClick() { + toast.promise( + app.removeVersion.submit({ version: row.version }), + { + loading: '正在移除版本...', + success: () => { + versions.reload(); + return '版本已成功移除'; + }, + error: e => getToastErrorMessage(e) + } + ); + } + } + ]; + } + } + }, + { + label: '定价', + icon: icon('dollar-sign'), + route: 'pricing', + type: 'list', + list: { + pagetype: 'Marketplace App Plan', + filters: app => { + return { app: app.pg.name }; + }, + fields: ['name', 'title', 'price_cny', 'price_usd', 'enabled'], + columns: [ + { + label: '标题', + fieldname: 'title' + }, + { + label: '启用', + type: 'Badge', + fieldname: 'enabled', + format: value => { + return value == 1 ? '已启用' : '已禁用'; + } + }, + { + label: '价格 (CNY)', + fieldname: 'price_cny', + format: value => { + return currency(value, 'CNY'); + } + }, + { + label: '价格 (USD)', + fieldname: 'price_usd', + format: value => { + return currency(value, 'USD'); + } + } + ], + primaryAction({ listResource: plans, documentResource: app }) { + return { + label: '新建计划', + slots: { + prefix: icon('plus') + }, + onClick() { + renderDialog( + h(PlansDialog, { + app: app.pg.name, + onPlanCreated() { + plans.reload(); + } + }) + ); + } + }; + }, + rowActions({ row, listResource: plans, documentResource: app }) { + return [ + { + label: '编辑', + onClick() { + renderDialog( + h(PlansDialog, { + app: app.pg.name, + plan: row, + onPlanUpdated() { + plans.reload(); + } + }) + ); + } + } + ]; + } + } + }, + { + label: '订阅', + icon: icon('users'), + route: 'subscription', + type: 'list', + list: { + pagetype: 'Subscription', + filters: app => { + return { + document_type: 'Marketplace App', + document_name: app.name + }; + }, + fields: ['site', 'enabled', 'team'], + filterControls() { + return [ + { + type: 'select', + label: '状态', + class: !isMobile() ? 'w-24' : '', + fieldname: 'enabled', + options: ['', '激活', '禁用'] + } + ]; + }, + columns: [ + { + label: '站点', + fieldname: 'site', + width: 0.6 + }, + { + label: '状态', + type: 'Badge', + fieldname: 'enabled', + width: 0.3, + format: value => { + return value ? '激活' : '禁用'; + } + }, + { + label: '价格', + fieldname: 'price', + width: 0.3, + format: value => { + return userCurrency(value); + } + }, + { + label: '已激活天数', + fieldname: 'active_for', + width: 0.3, + format: value => { + return value + (value == 1 ? ' 天' : ' 天'); + } + } + ] + } + } + ], + actions({ documentResource: app }) { + return [ + { + label: '在市场中查看', + slots: { + prefix: icon('external-link') + }, + condition: () => app.pg.status === 'Published', + onClick() { + window.open( + `${window.location.origin}/marketplace/apps/${app.name}`, + '_blank' + ); + } + }, + { + label: '指南', + slots: { + icon: icon('help-circle') + }, + condition: () => app.pg.status === 'Draft', + onClick() { + window.open( + 'https://jingrow.com/docs/marketplace/marketplace-guidelines', + '_blank' + ); + } + }, + { + label: '完成列表', + variant: 'solid', + slots: { + prefix: icon('alert-circle') + }, + condition: () => app.pg.status === 'Draft', + onClick() { + let AppListingStepsDialog = defineAsyncComponent(() => + import('../components/marketplace/AppListingStepsDialog.vue') + ); + + renderDialog( + h(AppListingStepsDialog, { + app: app.pg.name + }) + ); + } + }, + { + label: '选项', + condition: () => app.pg.status === 'Draft', + options: [ + { + label: '删除', + icon: icon('trash-2'), + condition: () => app.pg.status === 'Draft', + onClick() { + confirmDialog({ + title: `删除应用 ${app.pg.title}`, + message: '您确定要删除此应用吗?', + onSuccess({ hide }) { + toast.promise(app.delete.submit(), { + loading: '正在删除应用...', + success: () => { + hide(); + router.push({ name: 'Marketplace App List' }); + return '应用删除成功'; + }, + error: e => getToastErrorMessage(e) + }); + } + }); + } + } + ] + } + ]; + } + } +}; + +function showReleases(row, app) { + renderDialog( + h( + GenericDialog, + { + options: { + title: `${app.pg.name} 在 ${row.branch} 分支的发布`, + size: '6xl' + } + }, + { + default: () => + h(ObjectList, { + options: { + label: '版本', + type: 'list', + pagetype: 'App Release', + filters: { + app: app.pg.name, + source: row.source + }, + fields: ['message', 'tag', 'author', 'status'], + orderBy: 'creation desc', + columns: [ + { + label: '提交信息', + fieldname: 'message', + class: 'w-64', + width: 0.5 + }, + { + label: '哈希值', + fieldname: 'hash', + class: 'w-24', + type: 'Badge', + width: 0.2, + format: value => { + return value.slice(0, 7); + } + }, + { + label: '作者', + fieldname: 'author', + width: 0.2 + }, + { + label: '状态', + fieldname: 'status', + type: 'Badge', + width: 0.3 + }, + { + label: '代码审查', + type: 'Component', + width: 0.2, + component: ({ row, listResource: releases, app }) => { + if ( + (row.status === 'Awaiting Approval' || + row.status === 'Rejected') && + row.screening_status === 'Complete' + ) { + return h(Button, { + label: '代码审查', + variant: 'subtle', + theme: 'blue', + size: 'sm', + onClick: () => + codeReview(row, app, window.is_system_user) + }); + } + return h(Badge, { + label: row.screening_status || '未开始' + }); + } + }, + { + label: '', + fieldname: '', + align: 'right', + type: 'Button', + width: 0.2, + Button({ row, listResource: releases }) { + let label = ''; + let successMessage = ''; + let loadingMessage = ''; + + if (row.status === 'Awaiting Approval') { + label = '取消'; + successMessage = '发布已取消'; + loadingMessage = '正在取消发布...'; + } else if (row.status === 'Draft') { + label = '提交'; + loadingMessage = '正在提交发布以供审批...'; + successMessage = + '发布已提交以供审批'; + } + + return { + label: label, + onClick() { + toast.promise( + row.status === 'Awaiting Approval' + ? app.cancelApprovalRequest.submit({ + app_release: row.name + }) + : app.createApprovalRequest.submit({ + app_release: row.name + }), + { + loading: loadingMessage, + success: () => { + releases.reload(); + return successMessage; + }, + error: e => getToastErrorMessage(e) + } + ); + } + }; + } + } + ] + } + }) + } + ) + ); +} + +function codeReview(row, app, isSystemUser) { + renderDialog( + h(CodeReview, { + row: row, + app: app, + isSystemUser: isSystemUser + }) + ); +} \ No newline at end of file diff --git a/dashboard/src2/objects/notification.js b/dashboard/src2/objects/notification.js new file mode 100644 index 0000000..5958391 --- /dev/null +++ b/dashboard/src2/objects/notification.js @@ -0,0 +1,141 @@ +import { h } from 'vue'; +import router from '../router'; +import { getDocResource } from '../utils/resource'; +import { unreadNotificationsCount } from '../data/notifications'; +import { Tooltip, jingrowRequest } from 'jingrow-ui'; +import { icon } from '../utils/components'; +import { getTeam } from '../data/team'; +import { toast } from 'vue-sonner'; + +const getNotification = (name) => { + return getDocResource({ + pagetype: 'Jcloud Notification', + name: name, + whitelistedMethods: { + markNotificationAsRead: 'mark_as_read', + }, + }); +}; + +export default { + pagetype: 'Jcloud Notification', + whitelistedMethods: {}, + list: { + resource() { + let $team = getTeam(); + return { + type: 'list', + pagetype: 'Jcloud Notification', + url: 'jcloud.api.notifications.get_notifications', + auto: true, + filters: { + read: 'Unread', + }, + cache: ['Notifications'], + }; + }, + route: '/notifications', + title: '通知', + orderBy: 'creation desc', + filterControls() { + return [ + { + type: 'tab', + label: '已读', + fieldname: 'read', + options: ['全部', '未读'], + default: '未读', + }, + ]; + }, + onRowClick(row) { + const notification = getNotification(row.name); + + notification.markNotificationAsRead.submit().then(() => { + unreadNotificationsCount.setData((data) => data - 1); + if (row.route) router.push(row.route); + }); + }, + actions({ listResource: notifications }) { + return [ + { + label: '全部标记为已读', + slots: { + prefix: icon('check-circle'), + }, + async onClick() { + toast.promise( + jingrowRequest({ + url: '/api/method/jcloud.api.notifications.mark_all_notifications_as_read', + }), + { + success: () => { + notifications.reload(); + return '所有通知已标记为已读'; + }, + loading: '正在将所有通知标记为已读...', + error: (error) => + error.messages?.length + ? error.messages.join('\n') + : error.message, + }, + ); + }, + }, + ]; + }, + columns: [ + { + label: '标题', + fieldname: 'title', + width: '20rem', + format(value, row) { + return value || row.type; + }, + suffix(row) { + if (row.is_actionable && !row.is_addressed) { + let AlertIcon = icon('alert-circle'); + return h( + Tooltip, + { + text: '此通知需要您的关注', + }, + { + default: () => + h( + 'div', + { + class: 'ml-2 text-red-500', + }, + h(AlertIcon), + ), + }, + ); + } + }, + }, + { + label: '消息', + fieldname: 'message', + type: 'Component', + width: '40rem', + component({ row }) { + return h('div', { + class: 'truncate text-base text-gray-600', + // replace all html tags except + innerHTML: row.message + .replace(/<(?!\/?b\b)[^>]*>/g, '') + .split('\n')[0], + }); + }, + }, + { + label: '', + fieldname: 'creation', + type: 'Timestamp', + align: 'right', + }, + ], + }, + routes: [], +}; \ No newline at end of file diff --git a/dashboard/src2/objects/server.js b/dashboard/src2/objects/server.js new file mode 100644 index 0000000..17c36d2 --- /dev/null +++ b/dashboard/src2/objects/server.js @@ -0,0 +1,523 @@ +import { defineAsyncComponent, h } from 'vue'; +import LucideAppWindow from '~icons/lucide/app-window'; +import ServerActions from '../components/server/ServerActions.vue'; +import { getTeam } from '../data/team'; +import router from '../router'; +import { icon } from '../utils/components'; +import { duration, planTitle, userCurrency } from '../utils/format'; +import { trialDays } from '../utils/site'; +import { getJobsTab } from './common/jobs'; +import { tagTab } from './common/tags'; + +export default { + pagetype: 'Server', + whitelistedMethods: { + increaseDiskSize: 'increase_disk_size_for_server', + configureAutoAddStorage: 'configure_auto_add_storage', + changePlan: 'change_plan', + reboot: 'reboot', + rename: 'rename', + dropServer: 'drop_server', + addTag: 'add_resource_tag', + removeTag: 'remove_resource_tag' + }, + list: { + route: '/servers', + title: '服务器', + fields: [ + 'title', + 'database_server', + 'plan.title as plan_title', + 'plan.price_usd as price_usd', + 'plan.price_cny as price_cny', + 'cluster.image as cluster_image', + 'cluster.title as cluster_title' + ], + filterControls() { + return [ + { + type: 'select', + label: '状态', + fieldname: 'status', + options: [ + { label: '', value: '' }, + { label: '激活', value: 'Active' }, + { label: '待定', value: 'Pending' } + ] + }, + { + type: 'select', + label: '区域', + fieldname: 'cluster', + options: [ + '', + '中国大陆', + '中国香港', + '美国-洛杉矶', + '新加坡', + '英国-伦敦', + '德国-法兰克福', + '阿联酋-迪拜', + ] + } + ]; + }, + orderBy: 'creation desc', + columns: [ + { + label: '服务器', + fieldname: 'name', + width: 1.5, + class: 'font-medium', + format(value, row) { + return row.title || value; + } + }, + { label: '状态', fieldname: 'status', type: 'Badge', width: 0.8 }, + { + label: '应用服务器计划', + format(value, row) { + return planTitle(row); + } + }, + { + label: '数据库服务器计划', + fieldname: 'db_plan', + format(value) { + if (!value) return ''; + return planTitle(value); + } + }, + { + label: '区域', + fieldname: 'cluster', + format(value, row) { + return row.cluster_title || value; + }, + prefix(row) { + return h('img', { + src: row.cluster_image, + class: 'w-4 h-4', + alt: row.cluster_title + }); + } + } + ], + primaryAction({ listResource: servers }) { + return { + label: '新建服务器', + variant: 'solid', + slots: { + prefix: icon('plus') + }, + onClick() { + router.push({ name: 'New Server' }); + } + }; + } + }, + detail: { + titleField: 'name', + route: '/servers/:name', + statusBadge({ documentResource: server }) { + return { + label: server.pg.status + }; + }, + breadcrumbs({ documentResource: server }) { + return [ + { + label: '服务器', + route: '/servers' + }, + { + label: server.pg.title || server.pg.name, + route: `/servers/${server.pg.name}` + } + ]; + }, + actions({ documentResource: server }) { + let $team = getTeam(); + + return [ + { + label: '模拟服务器所有者', + title: '模拟服务器所有者', // for label to pop-up on hover + slots: { + icon: defineAsyncComponent(() => + import('~icons/lucide/venetian-mask') + ) + }, + condition: () => + $team.pg?.is_desk_user && server.pg.team !== $team.name, + onClick() { + switchToTeam(server.pg.team); + } + }, + { + label: '选项', + button: { + label: '选项', + slots: { + icon: icon('more-horizontal') + } + }, + options: [ + { + label: '在 Desk 中查看', + icon: icon('external-link'), + condition: () => $team.pg?.is_desk_user, + onClick() { + window.open( + `${window.location.protocol}//${ + window.location.host + }/app/${server.pagetype.replace(' ', '-').toLowerCase()}/${ + server.pg.name + }`, + '_blank' + ); + } + }, + { + label: '访问服务器', + icon: icon('external-link'), + condition: () => + server.pg.status === 'Active' && $team.pg?.is_desk_user, + onClick() { + window.open(`https://${server.pg.name}`, '_blank'); + } + } + ] + } + ]; + }, + tabs: [ + { + label: '概览', + icon: icon('home'), + route: 'overview', + type: 'Component', + component: defineAsyncComponent(() => + import('../components/server/ServerOverview.vue') + ), + props: server => { + return { server: server.pg.name }; + } + }, + { + label: '分析', + icon: icon('bar-chart-2'), + route: 'analytics', + type: 'Component', + component: defineAsyncComponent(() => + import('../components/server/ServerCharts.vue') + ), + props: server => { + return { + serverName: server.pg.name + }; + } + }, + { + label: '站点', + icon: icon(LucideAppWindow), + route: 'sites', + type: 'list', + list: { + pagetype: 'Site', + filters: server => { + return { server: server.pg.name }; + }, + fields: [ + 'plan.plan_title as plan_title', + 'plan.price_usd as price_usd', + 'plan.price_cny as price_cny', + 'group.title as group_title', + 'group.public as group_public', + 'group.team as group_team', + 'group.version as version', + 'trial_end_date' + ], + orderBy: 'creation desc', + searchField: 'host_name', + route(row) { + return { name: '站点详情', params: { name: row.name } }; + }, + filterControls() { + return [ + { + type: 'select', + label: '状态', + fieldname: 'status', + options: [ + { label: '', value: '' }, + { label: '激活', value: 'Active' }, + { label: '未激活', value: 'Inactive' }, + { label: '已暂停', value: 'Suspended' }, + { label: '损坏', value: 'Broken' } + ] + }, + { + type: 'link', + label: '版本', + fieldname: 'group.version', + options: { + pagetype: 'Jingrow Version' + } + }, + { + type: 'link', + label: '站点分组', + fieldname: 'group', + options: { + pagetype: 'Release Group' + } + }, + { + type: 'link', + label: '标签', + fieldname: 'tags.tag', + options: { + pagetype: 'Jcloud Tag', + filters: { + pagetype_name: 'Site' + } + } + } + ]; + }, + columns: [ + { + label: '站点', + fieldname: 'host_name', + width: 1.5, + class: 'font-medium', + format(value, row) { + return value || row.name; + } + }, + { label: '状态', fieldname: 'status', type: 'Badge', width: 0.6 }, + { + label: '计划', + fieldname: 'plan', + width: 0.85, + format(value, row) { + if (row.trial_end_date) { + return trialDays(row.trial_end_date); + } + let $team = getTeam(); + if (row.price_usd > 0) { + let china = $team.pg.country == 'china'; + let formattedValue = userCurrency( + china ? row.price_cny : row.price_usd, + 0 + ); + return `${formattedValue}/月`; + } + return row.plan_title; + } + }, + { + label: '站点分组', + fieldname: 'group_title', + width: '15rem' + }, + { + label: '版本', + fieldname: 'version', + width: 0.5 + } + ] + } + }, + { + label: '站点分组', + icon: icon('package'), + route: 'groups', + type: 'list', + list: { + pagetype: 'Release Group', + filters: server => { + return { server: server.pg.name }; + }, + fields: [{ apps: ['app'] }, { servers: ['server'] }], + columns: [ + { label: '标题', fieldname: 'title' }, + { + label: '状态', + fieldname: 'active_benches', + type: 'Badge', + width: 0.5, + format: (value, row) => { + if (!value) return '等待部署'; + else return '激活'; + } + }, + { + label: '版本', + fieldname: 'version', + width: 0.5 + }, + { + label: '应用', + fieldname: 'app', + format: (value, row) => { + return (row.apps || []).map(d => d.app).join(', '); + }, + width: '25rem' + }, + { + label: '站点', + fieldname: 'site_count', + width: 0.25 + } + ], + filterControls() { + return [ + { + type: 'link', + label: '版本', + fieldname: 'version', + options: { + pagetype: 'Jingrow Version' + } + }, + { + type: 'link', + label: '标签', + fieldname: 'tags.tag', + options: { + pagetype: 'Jcloud Tag', + filters: { + pagetype_name: 'Release Group' + } + } + } + ]; + }, + route(row) { + return { + name: 'Release Group Detail', + params: { name: row.name } + }; + }, + primaryAction({ listResource: benches, documentResource: server }) { + return { + label: '新建站点分组', + slots: { + prefix: icon('plus') + }, + onClick() { + router.push({ + name: 'Server New Release Group', + params: { server: server.pg.name } + }); + } + }; + } + } + }, + getJobsTab('Server'), + { + label: '执行', + icon: icon('play'), + childrenRoutes: ['Server Play'], + route: 'plays', + type: 'list', + list: { + pagetype: 'Ansible Play', + filterControls({ documentResource: server }) { + return [ + { + type: 'select', + label: '服务器', + fieldname: 'server', + options: [ + server.pg.name, + server.pg.database_server, + server.pg.replication_server + ].filter(Boolean) + } + ]; + }, + filters: server => { + return { + server: [ + 'in', + [ + server.pg.name, + server.pg.database_server, + server.pg.replication_server + ].filter(Boolean) + ] + }; + }, + route(row) { + return { + name: 'Server Play', + params: { id: row.name } + }; + }, + orderBy: 'creation desc', + fields: ['server', 'end'], + columns: [ + { + label: '执行', + fieldname: 'play', + width: 2 + }, + { + label: '状态', + fieldname: 'status', + type: 'Badge', + width: 0.5 + }, + { + label: '服务器', + fieldname: 'server', + width: 2 + }, + { + label: '持续时间', + fieldname: 'duration', + width: 0.5, + format(value, row) { + if (row.job_id === 0 || !row.end) return; + return duration(value); + } + }, + { + label: '', + fieldname: 'creation', + type: 'Timestamp', + align: 'right' + } + ] + } + }, + { + label: '操作', + icon: icon('sliders'), + route: 'actions', + type: 'Component', + component: ServerActions, + props: server => { + return { server: server.pg.name }; + } + }, + tagTab() + ] + }, + routes: [ + { + name: 'Server Job', + path: 'jobs/:id', + component: () => import('../pages/JobPage.vue') + }, + { + name: 'Server Play', + path: 'plays/:id', +component: () => import('../pages/PlayPage.vue') + } + ] +}; \ No newline at end of file diff --git a/dashboard/src2/objects/site.js b/dashboard/src2/objects/site.js new file mode 100644 index 0000000..6b8c230 --- /dev/null +++ b/dashboard/src2/objects/site.js @@ -0,0 +1,1334 @@ +import { + createListResource, + createResource, + LoadingIndicator, +} from 'jingrow-ui'; +import { defineAsyncComponent, h } from 'vue'; +import { unparse } from 'papaparse'; +import { toast } from 'vue-sonner'; +import AddDomainDialog from '../components/AddDomainDialog.vue'; +import GenericDialog from '../components/GenericDialog.vue'; +import ObjectList from '../components/ObjectList.vue'; +import SiteActions from '../components/SiteActions.vue'; +import { getTeam, switchToTeam } from '../data/team'; +import router from '../router'; +import { getRunningJobs } from '../utils/agentJob'; +import { confirmDialog, icon, renderDialog } from '../utils/components'; +import dayjs from '../utils/dayjs'; +import { bytes, date, userCurrency } from '../utils/format'; +import { getToastErrorMessage } from '../utils/toast'; +import { getDocResource } from '../utils/resource'; +import { trialDays } from '../utils/site'; +import { clusterOptions, getUpsellBanner } from './common'; +import { getAppsTab } from './common/apps'; +import { isMobile } from '../utils/device'; + +export default { + pagetype: 'Site', + whitelistedMethods: { + activate: 'activate', + addDomain: 'add_domain', + archive: 'archive', + backup: 'backup', + clearSiteCache: 'clear_site_cache', + deactivate: 'deactivate', + disableReadWrite: 'disable_read_write', + enableReadWrite: 'enable_read_write', + installApp: 'install_app', + uninstallApp: 'uninstall_app', + migrate: 'migrate', + moveToBench: 'move_to_bench', + moveToGroup: 'move_to_group', + loginAsAdmin: 'login_as_admin', + loginAsTeam: 'login_as_team', + isSetupWizardComplete: 'is_setup_wizard_complete', + reinstall: 'reinstall', + removeDomain: 'remove_domain', + redirectToPrimary: 'set_redirect', + removeRedirect: 'unset_redirect', + setPrimaryDomain: 'set_host_name', + restoreSite: 'restore_site', + restoreSiteFromFiles: 'restore_site_from_files', + scheduleUpdate: 'schedule_update', + editScheduledUpdate: 'edit_scheduled_update', + cancelUpdate: 'cancel_scheduled_update', + setPlan: 'set_plan', + updateConfig: 'update_config', + deleteConfig: 'delete_config', + sendTransferRequest: 'send_change_team_request', + addTag: 'add_resource_tag', + removeTag: 'remove_resource_tag', + getBackupDownloadLink: 'get_backup_download_link', + fetchDatabaseTableSchemas: 'fetch_database_table_schemas', + }, + list: { + route: '/sites', + title: '站点', + fields: [ + 'plan.plan_title as plan_title', + 'plan.price_usd as price_usd', + 'plan.price_cny as price_cny', + 'group.title as group_title', + 'group.public as group_public', + 'group.team as group_team', + 'group.version as version', + 'cluster.image as cluster_image', + 'cluster.title as cluster_title', + 'trial_end_date', + 'site_end_date', + ], + orderBy: 'creation desc', + searchField: 'host_name', + filterControls() { + return [ + { + type: 'select', + label: '状态', + fieldname: 'status', + options: [ + { label: '', value: '' }, + { label: '激活', value: 'Active' }, + { label: '未激活', value: 'Inactive' }, + { label: '已暂停', value: 'Suspended' }, + { label: '损坏', value: 'Broken' }, + { label: '已归档', value: 'Archived' }, + ], + }, + { + type: 'link', + label: '版本', + fieldname: 'group.version', + options: { + pagetype: 'Jingrow Version', + }, + }, + { + type: 'link', + label: '站点分组', + fieldname: 'group', + options: { + pagetype: 'Release Group', + }, + }, + { + type: 'select', + label: '区域', + fieldname: 'cluster', + options: clusterOptions, + }, + { + type: 'link', + label: '标签', + fieldname: 'tags.tag', + options: { + pagetype: 'Jcloud Tag', + filters: { + pagetype_name: 'Site', + }, + }, + }, + ]; + }, + columns: [ + { + label: '站点', + fieldname: 'host_name', + width: 1.5, + class: 'font-medium', + format(value, row) { + return value || row.name; + }, + }, + { + label: '状态', + fieldname: 'status', + type: 'Badge', + width: '140px', + format(value) { + const statusMap = { + 'Active': '激活', + 'Inactive': '未激活', + 'Suspended': '已暂停', + 'Broken': '损坏', + 'Archived': '已归档', + 'Pending': '待处理', + 'Running': '运行中', + 'Success': '成功', + 'Failure': '失败' + }; + return statusMap[value] || value; + } + }, + { + label: '计划', + fieldname: 'plan', + width: 0.85, + format(value, row) { + if (row.trial_end_date) { + return trialDays(row.trial_end_date); + } + const $team = getTeam(); + if (row.price_usd > 0) { + const china = $team.pg?.currency === 'CNY'; + const formattedValue = userCurrency( + china ? row.price_cny : row.price_usd, + 0, + ); + return `${formattedValue}/月`; + } + return row.plan_title; + }, + }, + { + label: '区域', + fieldname: 'cluster', + width: 1, + format(value, row) { + return row.cluster_title || value; + }, + prefix(row) { + return h('img', { + src: row.cluster_image, + class: 'w-4 h-4', + alt: row.cluster_title, + }); + }, + }, + { + label: '站点分组', + fieldname: 'group', + width: '15rem', + format(value, row) { + return row.group_public ? '公域' : row.group_title || value; + }, + }, + { + label: '版本', + fieldname: 'version', + width: 0.5, + }, + { + label: '到期时间', + fieldname: 'site_end_date', + width: 1, + format(value) { + return value ? date(value, 'YYYY-MM-DD') : ''; + }, + }, + ], + primaryAction({ listResource: sites }) { + return { + label: '新建站点', + variant: 'solid', + slots: { + prefix: icon('plus'), + }, + onClick() { + router.push({ name: 'New Site' }); + }, + }; + }, + moreActions({ listResource: sites }) { + return [ + { + label: '导出为CSV', + icon: 'download', + onClick() { + const fields = [ + 'host_name', + 'plan_title', + 'cluster_title', + 'group_title', + 'version', + ]; + + const data = sites.data.map((site) => { + const row = {}; + fields.forEach((field) => { + row[field] = site[field]; + }); + return row; + }); + + let csv = unparse({ + fields, + data, + }); + csv = '\uFEFF' + csv; // for utf-8 + + // create a blob and trigger a download + const blob = new Blob([csv], { type: 'text/csv;charset=utf-8' }); + const today = new Date().toISOString().split('T')[0]; + const filename = `sites-${today}.csv`; + const link = document.createElement('a'); + link.href = URL.createObjectURL(blob); + link.download = filename; + link.click(); + URL.revokeObjectURL(link.href); + }, + }, + ]; + }, + }, + detail: { + titleField: 'name', + route: '/sites/:name', + statusBadge({ documentResource: site }) { + const statusMap = { + 'Active': '激活', + 'Inactive': '未激活', + 'Suspended': '已暂停', + 'Broken': '损坏', + 'Archived': '已归档', + 'Pending': '待处理', + 'Running': '运行中', + 'Success': '成功', + 'Failure': '失败' + }; + return { label: statusMap[site.pg.status] || site.pg.status }; + }, + breadcrumbs({ items, documentResource: site }) { + let breadcrumbs = []; + let $team = getTeam(); + let siteCrumb = { + label: site.pg.host_name || site.pg?.name, + route: `/sites/${site.pg?.name}`, + }; + + if ( + (site.pg.server_team == $team.pg?.name && + site.pg.group_team == $team.pg?.name) || + $team.pg?.is_desk_user + ) { + breadcrumbs.push({ + label: site.pg?.server_title || site.pg?.server, + route: `/servers/${site.pg?.server}`, + }); + } + if ( + site.pg.group_team == $team.pg?.name || + $team.pg?.is_desk_user || + $team.pg?.is_support_agent + ) { + breadcrumbs.push( + { + label: site.pg?.group_title, + route: `/groups/${site.pg?.group}`, + }, + siteCrumb, + ); + } else { + breadcrumbs.push(...items.slice(0, -1), siteCrumb); + } + return breadcrumbs; + }, + tabs: [ + { + label: '概览', + icon: icon('home'), + route: 'overview', + type: 'Component', + condition: (site) => site.pg?.status !== 'Archived', + component: defineAsyncComponent( + () => import('../components/SiteOverview.vue'), + ), + props: (site) => { + return { site: site.pg?.name }; + }, + }, + getAppsTab(true), + { + label: '域名', + icon: icon('external-link'), + route: 'domains', + type: 'list', + condition: (site) => site.pg?.status !== 'Archived', + list: { + pagetype: 'Site Domain', + fields: ['redirect_to_primary'], + filters: (site) => { + return { site: site.pg?.name }; + }, + columns: [ + { + label: '域名', + fieldname: 'domain', + }, + { + label: '状态', + fieldname: 'status', + type: 'Badge', + format(value) { + const statusMap = { + 'Active': '激活', + 'Inactive': '未激活', + 'Suspended': '已暂停', + 'Broken': '损坏', + 'Archived': '已归档', + 'Pending': '待处理', + 'Running': '运行中', + 'Success': '成功', + 'Failure': '失败' + }; + return statusMap[value] || value; + } + }, + { + label: '主域名', + fieldname: 'primary', + type: 'Icon', + Icon(value) { + return value ? 'check' : ''; + }, + }, + { + label: 'DNS 类型', + fieldname: 'dns_type', + type: 'Badge', + }, + ], + banner({ documentResource: site }) { + if (site.pg.broken_domain_error) { + return { + title: + '获取您的域名的 HTTPS 证书时出错。', + type: 'error', + button: { + label: '查看错误', + variant: 'outline', + onClick() { + renderDialog( + h( + GenericDialog, + { + options: { + title: '获取证书时出错', + size: 'xl', + }, + }, + { + default: () => { + return h('pre', { + class: + 'whitespace-pre-wrap text-sm rounded border-2 p-3 border-gray-200 bg-gray-100', + innerHTML: site.pg.broken_domain_error, + }); + }, + }, + ), + ); + }, + }, + }; + } else { + return null; + } + }, + primaryAction({ listResource: domains, documentResource: site }) { + return { + label: '添加域名', + slots: { + prefix: icon('plus'), + }, + onClick() { + renderDialog( + h(AddDomainDialog, { + site: site.pg, + onDomainAdded() { + domains.reload(); + }, + }), + ); + }, + }; + }, + rowActions({ row, listResource: domains, documentResource: site }) { + return [ + { + label: '移除', + condition: () => row.domain !== site.pg?.name, + onClick() { + confirmDialog({ + title: `移除域名`, + message: `您确定要从站点 ${site.pg?.name} 中移除域名 ${row.domain} 吗?`, + onSuccess({ hide }) { + if (site.removeDomain.loading) return; + toast.promise( + site.removeDomain.submit({ + domain: row.domain, + }), + { + loading: '正在移除域名...', + success: () => { + hide(); + return '域名已移除'; + }, + error: (e) => getToastErrorMessage(e), + }, + ); + }, + }); + }, + }, + { + label: '设为主域名', + condition: () => !row.primary && row.status === 'Active', + onClick() { + confirmDialog({ + title: `设为主域名`, + message: `您确定要将域名 ${row.domain} 设为站点 ${site.pg?.name} 的主域名吗?`, + onSuccess({ hide }) { + if (site.setPrimaryDomain.loading) return; + toast.promise( + site.setPrimaryDomain.submit({ + domain: row.domain, + }), + { + loading: '正在设置主域名...', + success: () => { + hide(); + return '主域名已设置'; + }, + error: (e) => getToastErrorMessage(e), + }, + ); + }, + }); + }, + }, + { + label: '重定向到主域名', + condition: () => + !row.primary && + !row.redirect_to_primary && + row.status === 'Active', + onClick() { + confirmDialog({ + title: `重定向域名`, + message: `您确定要将域名 ${row.domain} 重定向到站点 ${site.pg?.name} 的主域名吗?`, + onSuccess({ hide }) { + if (site.redirectToPrimary.loading) return; + toast.promise( + site.redirectToPrimary.submit({ + domain: row.domain, + }), + { + loading: '正在重定向域名...', + success: () => { + hide(); + return '域名已重定向'; + }, + error: (e) => getToastErrorMessage(e), + }, + ); + }, + }); + }, + }, + { + label: '移除重定向', + condition: () => + !row.primary && + row.redirect_to_primary && + row.status === 'Active', + onClick() { + confirmDialog({ + title: `移除重定向`, + message: `您确定要移除从域名 ${row.domain} 到站点 ${site.pg?.name} 主域名的重定向吗?`, + onSuccess({ hide }) { + if (site.removeRedirect.loading) return; + toast.promise( + site.removeRedirect.submit({ + domain: row.domain, + }), + { + loading: '正在移除重定向...', + success: () => { + hide(); + return '重定向已移除'; + }, + error: (e) => getToastErrorMessage(e), + }, + ); + }, + }); + }, + }, + ]; + }, + }, + }, + { + label: '备份', + icon: icon('archive'), + route: 'backups', + type: 'list', + list: { + pagetype: 'Site Backup', + filters: (site) => { + return { + site: site.pg?.name, + files_availability: 'Available', + status: ['in', ['Pending', 'Running', 'Success']], + }; + }, + orderBy: 'creation desc', + fields: [ + 'job', + 'status', + 'database_url', + 'public_url', + 'private_url', + 'config_file_url', + 'site', + 'remote_database_file', + 'remote_public_file', + 'remote_private_file', + 'remote_config_file', + 'physical', + ], + columns: [ + { + label: '时间戳', + fieldname: 'creation', + width: 1, + format(value) { + return `备份于 ${date(value, 'llll')}`; + }, + }, + + { + label: '数据库', + fieldname: 'database_size', + width: 0.5, + format(value) { + return value ? bytes(value) : ''; + }, + }, + { + label: '公共文件', + fieldname: 'public_size', + width: 0.5, + format(value) { + return value ? bytes(value) : ''; + }, + }, + { + label: '私有文件', + fieldname: 'private_size', + width: 0.5, + format(value) { + return value ? bytes(value) : ''; + }, + }, + { + label: '包含文件的备份', + fieldname: 'with_files', + type: 'Icon', + width: 0.5, + Icon(value) { + return value ? 'check' : ''; + }, + }, + { + label: '异地备份', + fieldname: 'offsite', + width: 0.5, + type: 'Icon', + Icon(value) { + return value ? 'check' : ''; + }, + }, + ], + filterControls() { + return [ + { + type: 'checkbox', + label: '异地备份', + fieldname: 'offsite', + }, + ]; + }, + rowActions({ row, documentResource: site }) { + if (row.status != 'Success') return; + + function getFileName(file) { + if (file == 'database') return 'database'; + if (file == 'public') return 'public files'; + if (file == 'private') return 'private files'; + if (file == 'config') return 'config file'; + } + + function confirmDownload(backup, file) { + confirmDialog({ + title: '下载备份', + message: `您将下载站点 ${ + site.pg?.host_name || site.pg?.name + } 的 ${getFileName(file)} 备份,该备份创建于 ${date(backup.creation, 'llll')}。${ + !backup.offsite + ? '

您需要以 系统管理员 身份登录 您的站点 才能下载备份。
' + : '' + }`, + onSuccess() { + downloadBackup(backup, file); + }, + }); + } + + async function downloadBackup(backup, file) { + // file: database, public, or private + if (backup.offsite) { + site.getBackupDownloadLink.submit( + { backup: backup.name, file }, + { + onSuccess(r) { + // TODO: fix this in documentResource, it should return message directly + if (r.message) { + window.open(r.message); + } + }, + }, + ); + } else { + const url = + file == 'config' + ? backup.config_file_url + : backup[file + '_url']; + + const domainRegex = /^(https?:\/\/)?([^/]+)\/?/; + const newUrl = url.replace( + domainRegex, + `$1${site.pg.host_name}/`, + ); + window.open(newUrl); + } + } + + return [ + { + group: '详情', + items: [ + { + label: '查看任务', + onClick() { + router.push({ + name: 'Site Job', + params: { name: site.name, id: row.job }, + }); + }, + }, + ], + }, + { + group: '下载', + items: [ + { + label: '下载数据库', + onClick() { + return confirmDownload(row, 'database'); + }, + }, + { + label: '下载公共文件', + onClick() { + return confirmDownload(row, 'public'); + }, + condition: () => row.public_url, + }, + { + label: '下载私有文件', + onClick() { + return confirmDownload(row, 'private'); + }, + condition: () => row.private_url, + }, + { + label: '下载配置文件', + onClick() { + return confirmDownload(row, 'config'); + }, + condition: () => row.config_file_url, + }, + ], + }, + { + group: '恢复', + condition: () => row.offsite, + items: [ + { + label: '恢复备份', + condition: () => site.pg.status !== 'Archived', + onClick() { + confirmDialog({ + title: '恢复备份', + message: `您确定要将您的站点恢复到${dayjs( + row.creation, + ).format('lll')}的异地备份吗?`, + onSuccess({ hide }) { + toast.promise( + site.restoreSiteFromFiles.submit({ + files: { + database: row.remote_database_file, + public: row.remote_public_file, + private: row.remote_private_file, + config: row.remote_config_file, + }, + }), + { + loading: '正在安排备份恢复...', + success: (jobId) => { + hide(); + router.push({ + name: 'Site Job', + params: { + name: site.name, + id: jobId, + }, + }); + return '备份恢复已成功安排。'; + }, + error: (e) => getToastErrorMessage(e), + }, + ); + }, + }); + }, + }, + { + label: '在另一个站点上恢复备份', + onClick() { + let SelectSiteForRestore = defineAsyncComponent( + () => + import('../components/site/SelectSiteForRestore.vue'), + ); + renderDialog( + h(SelectSiteForRestore, { + site: site.name, + onRestore(siteName) { + const restoreSite = createResource({ + url: 'jcloud.api.site.restore', + }); + + return toast.promise( + restoreSite.submit({ + name: siteName, + files: { + database: row.remote_database_file, + public: row.remote_public_file, + private: row.remote_private_file, + config: row.remote_config_file, + }, + }), + { + loading: '正在安排备份恢复...', + success: (jobId) => { + router.push({ + name: 'Site Job', + params: { name: siteName, id: jobId }, + }); + return '备份恢复已成功安排。'; + }, + error: (e) => getToastErrorMessage(e), + }, + ); + }, + }), + ); + }, + }, + ], + }, + ].filter((d) => (d.condition ? d.condition() : true)); + }, + primaryAction({ listResource: backups, documentResource: site }) { + return { + label: '安排备份', + slots: { + prefix: icon('upload-cloud'), + }, + loading: site.backup.loading, + onClick() { + confirmDialog({ + title: '安排备份', + message: + '您确定要安排备份吗?这将创建一个本地备份。', + onSuccess({ hide }) { + toast.promise( + site.backup.submit({ + with_files: true, + }), + { + loading: '正在安排备份...', + success: () => { + hide(); + router.push({ + name: 'Site Jobs', + params: { name: site.name }, + }); + return '备份已成功安排。'; + }, + error: (e) => getToastErrorMessage(e), + }, + ); + }, + }); + }, + }; + }, + }, + }, + { + label: '操作', + icon: icon('sliders'), + route: 'actions', + type: 'Component', + condition: (site) => site.pg?.status !== 'Archived', + component: SiteActions, + props: (site) => { + return { site: site.pg?.name }; + }, + }, + { + label: '更新', + icon: icon('arrow-up-circle'), + route: 'updates', + type: 'list', + condition: (site) => site.pg?.status !== 'Archived', + childrenRoutes: ['Site Update'], + list: { + pagetype: 'Site Update', + filters: (site) => { + return { site: site.pg?.name }; + }, + orderBy: 'creation', + fields: [ + 'difference', + 'update_job.end as updated_on', + 'update_job', + 'backup_type', + 'recover_job', + ], + columns: [ + { + label: '类型', + fieldname: 'deploy_type', + width: 0.3, + }, + { + label: '状态', + fieldname: 'status', + type: 'Badge', + width: 0.5, + }, + // { + // label: '备份', + // width: 0.4, + // type: 'Component', + // component({ row }) { + // return h( + // 'div', + // { + // class: 'truncate text-base', + // }, + // row.skipped_backups + // ? '跳过' + // : row.backup_type || '逻辑', + // ); + // }, + // }, + { + label: '创建者', + fieldname: 'owner', + }, + { + label: '计划时间', + fieldname: 'scheduled_time', + format(value) { + return date(value, 'lll'); + }, + }, + { + label: '更新时间', + fieldname: 'updated_on', + format(value) { + return date(value, 'lll'); + }, + }, + ], + rowActions({ row, documentResource: site }) { + return [ + { + label: '编辑', + condition: () => row.status === 'Scheduled', + onClick() { + let SiteUpdateDialog = defineAsyncComponent( + () => import('../components/SiteUpdateDialog.vue'), + ); + renderDialog( + h(SiteUpdateDialog, { + site: site.pg?.name, + existingUpdate: row.name, + }), + ); + }, + }, + { + label: '取消', + condition: () => row.status === 'Scheduled', + onClick() { + confirmDialog({ + title: '取消更新', + message: `您确定要取消计划的更新吗?`, + onSuccess({ hide }) { + if (site.cancelUpdate.loading) return; + toast.promise( + site.cancelUpdate.submit({ site_update: row.name }), + { + loading: '正在取消更新...', + success: () => { + hide(); + site.reload(); + return '更新已取消'; + }, + error: (e) => getToastErrorMessage(e), + }, + ); + }, + }); + }, + }, + { + label: '查看任务', + condition: () => row.status !== 'Scheduled', + onClick() { + router.push({ + name: 'Site Update', + params: { id: row.name }, + }); + }, + }, + { + label: '立即更新', + condition: () => row.status === 'Scheduled', + onClick() { + let siteUpdate = getDocResource({ + pagetype: 'Site Update', + name: row.name, + whitelistedMethods: { + updateNow: 'start', + }, + }); + + toast.promise(siteUpdate.updateNow.submit(), { + loading: '正在更新站点...', + success: () => { + router.push({ + name: 'Site Update', + params: { id: row.name }, + }); + + return '站点更新已启动'; + }, + error: '更新站点失败', + }); + }, + }, + { + label: '查看应用更改', + onClick() { + createListResource({ + pagetype: 'Deploy Candidate Difference App', + fields: [ + 'difference.github_diff_url as diff_url', + 'difference.source_hash as source_hash', + 'difference.destination_hash as destination_hash', + 'app.title as app', + ], + filters: { + parenttype: 'Deploy Candidate Difference', + parent: row.difference, + }, + auto: true, + pageLength: 99, + onSuccess(data) { + if (data?.length) { + renderDialog( + h( + GenericDialog, + { + options: { + title: '应用更改', + size: '2xl', + }, + }, + { + default: () => + h(ObjectList, { + options: { + data: () => data, + columns: [ + { + label: '应用', + fieldname: 'app', + }, + { + label: '从', + fieldname: 'source_hash', + type: 'Button', + Button({ row }) { + return { + label: + row.source_tag || + row.source_hash.slice(0, 7), + variant: 'ghost', + class: 'font-mono', + link: `${ + row.diff_url.split('/compare')[0] + }/commit/${row.source_hash}`, + }; + }, + }, + { + label: '到', + fieldname: 'destination_hash', + type: 'Button', + Button({ row }) { + return { + label: + row.destination_tag || + row.destination_hash.slice(0, 7), + variant: 'ghost', + class: 'font-mono', + link: `${ + row.diff_url.split('/compare')[0] + }/commit/${row.destination_hash}`, + }; + }, + }, + { + label: '应用变更', + fieldname: 'diff_url', + align: 'right', + type: 'Button', + Button({ row }) { + return { + label: '查看', + variant: 'ghost', + slots: { + prefix: icon('external-link'), + }, + link: row.diff_url, + }; + }, + }, + ], + }, + }), + }, + ), + ); + } else toast.error('未找到应用变更'); + }, + }); + }, + }, + ]; + }, + actions({ documentResource: site }) { + if (site.pg.group_public) return []; + + return [ + { + label: '配置', + slots: { + prefix: icon('settings'), + }, + onClick() { + let ConfigureAutoUpdateDialog = defineAsyncComponent( + () => + import( + '../components/site/ConfigureAutoUpdateDialog.vue' + ), + ); + + renderDialog( + h(ConfigureAutoUpdateDialog, { + site: site.pg?.name, + }), + ); + }, + }, + ]; + }, + }, + }, + + ], + actions(context) { + let { documentResource: site } = context; + let $team = getTeam(); + let runningJobs = getRunningJobs({ site: site.pg?.name }); + + return [ + { + label: '进行中的任务', + slots: { + prefix: () => h(LoadingIndicator, { class: 'w-4 h-4' }), + }, + condition() { + return ( + runningJobs.filter((job) => + ['Pending', 'Running'].includes(job.status), + ).length > 0 + ); + }, + onClick() { + router.push({ + name: 'Site Jobs', + params: { name: site.name }, + }); + }, + }, + { + label: '有可用更新', + variant: site.pg?.setup_wizard_complete ? 'solid' : 'subtle', + slots: { + prefix: icon('alert-circle'), + }, + condition() { + return ( + !site.pg?.has_scheduled_updates && + site.pg.update_information?.update_available && + ['Active', 'Inactive', 'Suspended', 'Broken'].includes( + site.pg.status, + ) + ); + }, + + onClick() { + let SiteUpdateDialog = defineAsyncComponent( + () => import('../components/SiteUpdateDialog.vue'), + ); + renderDialog(h(SiteUpdateDialog, { site: site.pg?.name })); + }, + }, + { + label: '已安排更新', + slots: { + prefix: icon('calendar'), + }, + condition: () => site.pg?.has_scheduled_updates, + onClick() { + router.push({ + name: 'Site Detail Updates', + params: { name: site.name }, + }); + }, + }, + { + label: '模拟站点所有者', + title: '模拟站点所有者', // for label to pop-up on hover + slots: { + icon: defineAsyncComponent( + () => import('~icons/lucide/venetian-mask'), + ), + }, + condition: () => + $team.pg?.is_desk_user && site.pg.team !== $team.name, + onClick() { + switchToTeam(site.pg.team); + }, + }, + { + label: '访问站点', + slots: { + prefix: icon('external-link'), + }, + condition: () => + site.pg.status !== 'Archived' && site.pg?.setup_wizard_complete, + onClick() { + window.open(`https://${site.name}`, '_blank'); + }, + }, + { + label: '设置站点', + slots: { + prefix: icon('external-link'), + }, + variant: 'solid', + condition: () => + site.pg.status === 'Active' && !site.pg?.setup_wizard_complete, + onClick() { + if (site.pg.additional_system_user_created) { + site.loginAsTeam + .submit({ reason: '' }) + .then((url) => window.open(url, '_blank')); + } else { + site.loginAsAdmin + .submit({ reason: '' }) + .then((url) => window.open(url, '_blank')); + } + }, + }, + { + label: '选项', + context, + options: [ + { + label: '在 Desk 中查看', + icon: 'external-link', + condition: () => $team.pg?.is_desk_user, + onClick: () => { + window.open( + `${window.location.protocol}//${window.location.host}/app/site/${site.name}`, + '_blank', + ); + }, + }, + { + label: '以管理员身份登录', + icon: 'external-link', + condition: () => ['Active', 'Broken'].includes(site.pg.status), + onClick: () => { + confirmDialog({ + title: '以管理员身份登录', + message: `您确定要以管理员身份登录站点 ${site.pg?.name} 吗?`, + fields: + $team.name !== site.pg.team + ? [ + { + label: '原因', + type: 'textarea', + fieldname: 'reason', + }, + ] + : [], + onSuccess: ({ hide, values }) => { + if (!values.reason && $team.name !== site.pg.team) { + throw new Error('原因必填'); + } + return site.loginAsAdmin + .submit({ reason: values.reason }) + .then((result) => { + let url = result; + window.open(url, '_blank'); + hide(); + }); + }, + }); + }, + }, + ], + }, + ]; + }, + }, + + routes: [ + { + name: '站点更新', + path: 'updates/:id', + component: () => import('../pages/SiteUpdate.vue'), + }, + ], +}; \ No newline at end of file diff --git a/dashboard/src2/objects/tabs/site/logs.ts b/dashboard/src2/objects/tabs/site/logs.ts new file mode 100644 index 0000000..83ab4ab --- /dev/null +++ b/dashboard/src2/objects/tabs/site/logs.ts @@ -0,0 +1,59 @@ +import { icon } from '../../../utils/components'; +import { date } from '../../../utils/format'; +import { Tab } from '../../common/types'; + +export function getLogsTab(forSite: boolean) { + const childRoute = forSite ? '站点日志' : '工作台日志'; + const url = forSite ? 'jcloud.api.site.logs' : 'jcloud.api.bench.logs'; + + return { + label: '日志', + icon: icon('file-text'), + route: 'logs', + childrenRoutes: [childRoute], + type: 'list', + list: { + resource({ documentResource: res }) { + return { + makeParams: () => { + if (res.pagetype === 'Site') { + return { name: res.pg.name }; + } else { + return { name: res.pg.group, bench: res.name }; + } + }, + url, + auto: true, + cache: ['ObjectList', url, res.name] + }; + }, + route(row) { + return { + name: childRoute, + params: { logName: row.name } + }; + }, + columns: [ + { + label: '名称', + fieldname: 'name' + }, + { + label: '大小', + fieldname: 'size', + class: 'text-gray-600', + format(value) { + return `${value} kB`; + } + }, + { + label: '修改时间', + fieldname: 'modified', + format(value) { + return value ? date(value, 'lll') : ''; + } + } + ] + } + } satisfies Tab as Tab; +} \ No newline at end of file diff --git a/dashboard/src2/pages/Billing.vue b/dashboard/src2/pages/Billing.vue new file mode 100644 index 0000000..00dcddc --- /dev/null +++ b/dashboard/src2/pages/Billing.vue @@ -0,0 +1,68 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/BillingBalances.vue b/dashboard/src2/pages/BillingBalances.vue new file mode 100644 index 0000000..b679dfc --- /dev/null +++ b/dashboard/src2/pages/BillingBalances.vue @@ -0,0 +1,205 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/BillingInvoices.vue b/dashboard/src2/pages/BillingInvoices.vue new file mode 100644 index 0000000..e095dec --- /dev/null +++ b/dashboard/src2/pages/BillingInvoices.vue @@ -0,0 +1,249 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/BillingMarketplacePayouts.vue b/dashboard/src2/pages/BillingMarketplacePayouts.vue new file mode 100644 index 0000000..43b6e82 --- /dev/null +++ b/dashboard/src2/pages/BillingMarketplacePayouts.vue @@ -0,0 +1,105 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/BillingMpesaInvoices.vue b/dashboard/src2/pages/BillingMpesaInvoices.vue new file mode 100644 index 0000000..78c8f90 --- /dev/null +++ b/dashboard/src2/pages/BillingMpesaInvoices.vue @@ -0,0 +1,119 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/BillingOrders.vue b/dashboard/src2/pages/BillingOrders.vue new file mode 100644 index 0000000..c73b45d --- /dev/null +++ b/dashboard/src2/pages/BillingOrders.vue @@ -0,0 +1,348 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/BillingOverview.vue b/dashboard/src2/pages/BillingOverview.vue new file mode 100644 index 0000000..8d0f431 --- /dev/null +++ b/dashboard/src2/pages/BillingOverview.vue @@ -0,0 +1,40 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/BillingPaymentMethods.vue b/dashboard/src2/pages/BillingPaymentMethods.vue new file mode 100644 index 0000000..bc23e24 --- /dev/null +++ b/dashboard/src2/pages/BillingPaymentMethods.vue @@ -0,0 +1,209 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/CreateSiteForMarketplaceApp.vue b/dashboard/src2/pages/CreateSiteForMarketplaceApp.vue new file mode 100644 index 0000000..a066cfa --- /dev/null +++ b/dashboard/src2/pages/CreateSiteForMarketplaceApp.vue @@ -0,0 +1,242 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/DeployCandidate.vue b/dashboard/src2/pages/DeployCandidate.vue new file mode 100644 index 0000000..9173508 --- /dev/null +++ b/dashboard/src2/pages/DeployCandidate.vue @@ -0,0 +1,263 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/DetailPage.vue b/dashboard/src2/pages/DetailPage.vue new file mode 100644 index 0000000..e846cbe --- /dev/null +++ b/dashboard/src2/pages/DetailPage.vue @@ -0,0 +1,198 @@ + + + + \ No newline at end of file diff --git a/dashboard/src2/pages/DetailTab.vue b/dashboard/src2/pages/DetailTab.vue new file mode 100644 index 0000000..fb5fe00 --- /dev/null +++ b/dashboard/src2/pages/DetailTab.vue @@ -0,0 +1,42 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/Enable2FA.vue b/dashboard/src2/pages/Enable2FA.vue new file mode 100644 index 0000000..9e26f60 --- /dev/null +++ b/dashboard/src2/pages/Enable2FA.vue @@ -0,0 +1,34 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/Home.vue b/dashboard/src2/pages/Home.vue new file mode 100644 index 0000000..8789158 --- /dev/null +++ b/dashboard/src2/pages/Home.vue @@ -0,0 +1,44 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/Impersonate.vue b/dashboard/src2/pages/Impersonate.vue new file mode 100644 index 0000000..b53effa --- /dev/null +++ b/dashboard/src2/pages/Impersonate.vue @@ -0,0 +1,21 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/InstallApp.vue b/dashboard/src2/pages/InstallApp.vue new file mode 100644 index 0000000..3758d70 --- /dev/null +++ b/dashboard/src2/pages/InstallApp.vue @@ -0,0 +1,435 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/JobPage.vue b/dashboard/src2/pages/JobPage.vue new file mode 100644 index 0000000..597fe4f --- /dev/null +++ b/dashboard/src2/pages/JobPage.vue @@ -0,0 +1,216 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/ListPage.vue b/dashboard/src2/pages/ListPage.vue new file mode 100644 index 0000000..7183c33 --- /dev/null +++ b/dashboard/src2/pages/ListPage.vue @@ -0,0 +1,108 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/LogPage.vue b/dashboard/src2/pages/LogPage.vue new file mode 100644 index 0000000..a3407c3 --- /dev/null +++ b/dashboard/src2/pages/LogPage.vue @@ -0,0 +1,103 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/LoginSignup.vue b/dashboard/src2/pages/LoginSignup.vue new file mode 100644 index 0000000..896aecf --- /dev/null +++ b/dashboard/src2/pages/LoginSignup.vue @@ -0,0 +1,933 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/NewReleaseGroup.vue b/dashboard/src2/pages/NewReleaseGroup.vue new file mode 100644 index 0000000..84a0ad0 --- /dev/null +++ b/dashboard/src2/pages/NewReleaseGroup.vue @@ -0,0 +1,298 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/NewServer.vue b/dashboard/src2/pages/NewServer.vue new file mode 100644 index 0000000..55e08c8 --- /dev/null +++ b/dashboard/src2/pages/NewServer.vue @@ -0,0 +1,586 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/NewSite.vue b/dashboard/src2/pages/NewSite.vue new file mode 100644 index 0000000..f7333ec --- /dev/null +++ b/dashboard/src2/pages/NewSite.vue @@ -0,0 +1,886 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/Partners.vue b/dashboard/src2/pages/Partners.vue new file mode 100644 index 0000000..a8279d5 --- /dev/null +++ b/dashboard/src2/pages/Partners.vue @@ -0,0 +1,64 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/PlayPage.vue b/dashboard/src2/pages/PlayPage.vue new file mode 100644 index 0000000..1f72ada --- /dev/null +++ b/dashboard/src2/pages/PlayPage.vue @@ -0,0 +1,159 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/RechargeCredits.vue b/dashboard/src2/pages/RechargeCredits.vue new file mode 100644 index 0000000..0baf4fc --- /dev/null +++ b/dashboard/src2/pages/RechargeCredits.vue @@ -0,0 +1,205 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/ReleaseGroupBenchSites.vue b/dashboard/src2/pages/ReleaseGroupBenchSites.vue new file mode 100644 index 0000000..9257daf --- /dev/null +++ b/dashboard/src2/pages/ReleaseGroupBenchSites.vue @@ -0,0 +1,444 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/ResetPassword.vue b/dashboard/src2/pages/ResetPassword.vue new file mode 100644 index 0000000..a84da6e --- /dev/null +++ b/dashboard/src2/pages/ResetPassword.vue @@ -0,0 +1,145 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/Settings.vue b/dashboard/src2/pages/Settings.vue new file mode 100644 index 0000000..58d5cb0 --- /dev/null +++ b/dashboard/src2/pages/Settings.vue @@ -0,0 +1,61 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/SetupAccount.vue b/dashboard/src2/pages/SetupAccount.vue new file mode 100644 index 0000000..ed1f788 --- /dev/null +++ b/dashboard/src2/pages/SetupAccount.vue @@ -0,0 +1,279 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/SiteLogin.vue b/dashboard/src2/pages/SiteLogin.vue new file mode 100644 index 0000000..a6f6771 --- /dev/null +++ b/dashboard/src2/pages/SiteLogin.vue @@ -0,0 +1,409 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/SiteUpdate.vue b/dashboard/src2/pages/SiteUpdate.vue new file mode 100644 index 0000000..66f8627 --- /dev/null +++ b/dashboard/src2/pages/SiteUpdate.vue @@ -0,0 +1,165 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/Welcome.vue b/dashboard/src2/pages/Welcome.vue new file mode 100644 index 0000000..0732ab7 --- /dev/null +++ b/dashboard/src2/pages/Welcome.vue @@ -0,0 +1,49 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/devtools/database/DatabaseAnalyzer.vue b/dashboard/src2/pages/devtools/database/DatabaseAnalyzer.vue new file mode 100644 index 0000000..fe0154a --- /dev/null +++ b/dashboard/src2/pages/devtools/database/DatabaseAnalyzer.vue @@ -0,0 +1,779 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/devtools/database/DatabaseSQLPlayground.vue b/dashboard/src2/pages/devtools/database/DatabaseSQLPlayground.vue new file mode 100644 index 0000000..7326582 --- /dev/null +++ b/dashboard/src2/pages/devtools/database/DatabaseSQLPlayground.vue @@ -0,0 +1,405 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/devtools/log-browser/LogBrowser.vue b/dashboard/src2/pages/devtools/log-browser/LogBrowser.vue new file mode 100644 index 0000000..02a0ac2 --- /dev/null +++ b/dashboard/src2/pages/devtools/log-browser/LogBrowser.vue @@ -0,0 +1,192 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/devtools/log-browser/LogList.vue b/dashboard/src2/pages/devtools/log-browser/LogList.vue new file mode 100644 index 0000000..7112ea4 --- /dev/null +++ b/dashboard/src2/pages/devtools/log-browser/LogList.vue @@ -0,0 +1,133 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/devtools/log-browser/LogViewer.vue b/dashboard/src2/pages/devtools/log-browser/LogViewer.vue new file mode 100644 index 0000000..facacd8 --- /dev/null +++ b/dashboard/src2/pages/devtools/log-browser/LogViewer.vue @@ -0,0 +1,322 @@ + + + \ No newline at end of file diff --git a/dashboard/src2/pages/saas/AppSelector.vue b/dashboard/src2/pages/saas/AppSelector.vue new file mode 100644 index 0000000..493b3e9 --- /dev/null +++ b/dashboard/src2/pages/saas/AppSelector.vue @@ -0,0 +1,36 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/saas/Login.vue b/dashboard/src2/pages/saas/Login.vue new file mode 100644 index 0000000..a2affe8 --- /dev/null +++ b/dashboard/src2/pages/saas/Login.vue @@ -0,0 +1,229 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/saas/LoginToSite.vue b/dashboard/src2/pages/saas/LoginToSite.vue new file mode 100644 index 0000000..22239c6 --- /dev/null +++ b/dashboard/src2/pages/saas/LoginToSite.vue @@ -0,0 +1,238 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/saas/OAuthSetupAccount.vue b/dashboard/src2/pages/saas/OAuthSetupAccount.vue new file mode 100644 index 0000000..20a5d52 --- /dev/null +++ b/dashboard/src2/pages/saas/OAuthSetupAccount.vue @@ -0,0 +1,153 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/saas/SetupSite.vue b/dashboard/src2/pages/saas/SetupSite.vue new file mode 100644 index 0000000..2144883 --- /dev/null +++ b/dashboard/src2/pages/saas/SetupSite.vue @@ -0,0 +1,285 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/saas/Signup.vue b/dashboard/src2/pages/saas/Signup.vue new file mode 100644 index 0000000..36693ed --- /dev/null +++ b/dashboard/src2/pages/saas/Signup.vue @@ -0,0 +1,232 @@ + + \ No newline at end of file diff --git a/dashboard/src2/pages/saas/VerifyEmail.vue b/dashboard/src2/pages/saas/VerifyEmail.vue new file mode 100644 index 0000000..4ca8937 --- /dev/null +++ b/dashboard/src2/pages/saas/VerifyEmail.vue @@ -0,0 +1,139 @@ + + \ No newline at end of file diff --git a/dashboard/src2/router.js b/dashboard/src2/router.js new file mode 100644 index 0000000..f1f0053 --- /dev/null +++ b/dashboard/src2/router.js @@ -0,0 +1,403 @@ +import { createRouter, createWebHistory } from 'vue-router'; +import { getTeam } from './data/team'; +import generateRoutes from './objects/generateRoutes'; + +let router = createRouter({ + history: createWebHistory('/dashboard/'), + routes: [ + { + path: '/', + name: 'Home', + component: () => import('./pages/Home.vue'), + beforeEnter: (to, from, next) => { + next({ + name: 'Welcome', + query: { + is_redirect: true, + }, + }); + }, + }, + { + path: '/welcome', + name: 'Welcome', + component: () => import('./pages/Welcome.vue'), + meta: { hideSidebar: true }, + }, + { + path: '/login', + name: 'Login', + component: () => import('./pages/LoginSignup.vue'), + meta: { isLoginPage: true }, + }, + { + path: '/signup', + name: 'Signup', + component: () => import('./pages/LoginSignup.vue'), + meta: { isLoginPage: true }, + }, + { + path: '/site-login', + name: 'Site Login', + component: () => import('./pages/SiteLogin.vue'), + meta: { hideSidebar: true }, + }, + { + path: '/setup-account/:requestKey/:joinRequest?', + name: 'Setup Account', + component: () => import('./pages/SetupAccount.vue'), + props: true, + meta: { isLoginPage: true }, + }, + { + path: '/reset-password/:requestKey', + name: 'Reset Password', + component: () => import('./pages/ResetPassword.vue'), + props: true, + meta: { isLoginPage: true }, + }, + { + path: '/checkout/:secretKey', + name: 'Checkout', + component: () => import('../src/views/checkout/Checkout.vue'), + props: true, + meta: { + isLoginPage: true, + }, + }, + { + path: '/subscription/:site?', + name: 'Subscription', + component: () => import('../src/views/checkout/Subscription.vue'), + props: true, + meta: { + hideSidebar: true, + }, + }, + { + name: 'Enable2FA', + path: '/enable-2fa', + component: () => import('./pages/Enable2FA.vue'), + props: true, + meta: { + hideSidebar: true, + }, + }, + { + name: 'New Site', + path: '/sites/new', + component: () => import('./pages/NewSite.vue'), + }, + { + name: 'Release Group New Site', + path: '/groups/:bench/sites/new', + component: () => import('./pages/NewSite.vue'), + props: true, + }, + { + name: 'New Release Group', + path: '/groups/new', + component: () => import('./pages/NewReleaseGroup.vue'), + }, + { + name: 'Server New Release Group', + path: '/servers/:server/groups/new', + component: () => import('./pages/NewReleaseGroup.vue'), + props: true, + }, + { + name: 'New Server', + path: '/servers/new', + component: () => import('./pages/NewServer.vue'), + }, + { + name: 'Billing', + path: '/billing', + component: () => import('./pages/Billing.vue'), + children: [ + { + name: 'BillingOverview', + path: '', + component: () => import('./pages/BillingOverview.vue'), + }, + { + name: 'BillingInvoices', + path: 'invoices', + component: () => import('./pages/BillingInvoices.vue'), + }, + { + name: 'BillingOrders', + path: 'orders', + component: () => import('./pages/BillingOrders.vue'), + }, + { + name: 'BillingBalances', + path: 'balances', + component: () => import('./pages/BillingBalances.vue'), + }, + { + name: 'BillingPaymentMethods', + path: 'payment-methods', + component: () => import('./pages/BillingPaymentMethods.vue'), + }, + { + name: 'BillingMarketplacePayouts', + path: 'payouts', + component: () => import('./pages/BillingMarketplacePayouts.vue'), + }, + { + name: 'BillingMpesaInvoices', + path: 'mpesa-invoices', + component: () => import('./pages/BillingMpesaInvoices.vue'), + }, + ], + }, + { + path: '/settings', + name: 'Settings', + redirect: { name: 'SettingsProfile' }, + component: () => import('./pages/Settings.vue'), + children: [ + { + name: 'SettingsProfile', + path: 'profile', + component: () => + import('./components/settings/profile/ProfileSettings.vue'), + }, + { + name: 'SettingsTeam', + path: 'team', + component: () => import('./components/settings/TeamSettings.vue'), + }, + { + name: 'SettingsDeveloper', + path: 'developer', + component: () => + import('./components/settings/DeveloperSettings.vue'), + }, + { + name: 'SettingsPermission', + path: 'permissions', + component: () => + import('./components/settings/SettingsPermissions.vue'), + redirect: { name: 'SettingsPermissionRoles' }, + children: [ + { + path: 'roles', + name: 'SettingsPermissionRoles', + component: () => import('./components/settings/RoleList.vue'), + }, + { + name: 'SettingsPermissionRolePermissions', + path: 'roles/:roleId', + component: () => + import('./components/settings/RolePermissions.vue'), + props: true, + }, + ], + }, + ], + }, + { + name: 'Partner Portal', + path: '/partners', + redirect: { name: 'PartnerOverview' }, + component: () => import('./pages/Partners.vue'), + children: [ + { + name: 'PartnerOverview', + path: 'overview', + component: () => import('./components/partners/PartnerOverview.vue'), + }, + { + name: 'PartnerCustomers', + path: 'customers', + component: () => import('./components/partners/PartnerCustomers.vue'), + }, + { + name: 'PartnerApprovalRequests', + path: 'approval-requests', + component: () => + import('./components/partners/PartnerApprovalRequests.vue'), + }, + { + name: 'LocalPaymentSetup', + path: 'local-payment-setup', + component: () => + import('./components/partners/PartnerLocalPaymentSetup.vue'), + }, + ], + }, + { + name: 'Signup Create Site', + path: '/create-site', + redirect: { name: 'Home' }, + children: [ + { + name: 'SignupAppSelector', + path: 'app-selector', + component: () => import('./pages/saas/AppSelector.vue'), + meta: { hideSidebar: true }, + }, + { + name: 'SignupSetup', + path: ':productId/setup', + component: () => import('./pages/saas/SetupSite.vue'), + props: true, + meta: { hideSidebar: true }, + }, + { + name: 'SignupLoginToSite', + path: ':productId/login-to-site', + component: () => import('./pages/saas/LoginToSite.vue'), + props: true, + meta: { hideSidebar: true }, + }, + ], + }, + { + name: 'Impersonate', + path: '/impersonate/:teamId', + component: () => import('./pages/Impersonate.vue'), + props: true, + }, + { + name: 'InstallApp', + path: '/install-app/:app', + component: () => import('./pages/InstallApp.vue'), + props: true, + }, + { + name: 'CreateSiteForMarketplaceApp', + path: '/create-site/:app', + component: () => import('./pages/CreateSiteForMarketplaceApp.vue'), + props: true, + }, + { + path: '/developer-reply/:marketplaceApp/:reviewId', + name: 'ReplyMarketplaceApp', + component: () => + import('./components/marketplace/ReplyMarketplaceApp.vue'), + props: true, + }, + { + path: '/sql-playground', + name: 'SQL Playground', + component: () => + import('./pages/devtools/database/DatabaseSQLPlayground.vue'), + }, + { + path: '/database-analyzer', + name: 'DB Analyzer', + component: () => import('./pages/devtools/database/DatabaseAnalyzer.vue'), + }, + { + path: '/log-browser/:mode?/:docName?/:logId?', + name: 'Log Browser', + component: () => import('./pages/devtools/log-browser/LogBrowser.vue'), + props: true, + }, + { + path: '/recharge', + name: 'RechargeCredits', + component: () => import('./pages/RechargeCredits.vue'), + meta: { + requiresAuth: true + }, + }, + ...generateRoutes(), + { + path: '/:pathMatch(.*)*', + name: '404', + component: () => import('../src/views/general/404.vue'), + }, + ], +}); + +router.beforeEach(async (to, from, next) => { + let isLoggedIn = + document.cookie.includes('user_id') && + !document.cookie.includes('user_id=Guest'); + let goingToLoginPage = to.matched.some((record) => record.meta.isLoginPage); + + if (isLoggedIn) { + await waitUntilTeamLoaded(); + let $team = getTeam(); + let onboardingComplete = $team.pg.onboarding.complete; + let defaultRoute = 'Site List'; + let onboardingRoute = 'Welcome'; + + // identify user in posthog + if (window.posthog?.__loaded) { + try { + window.posthog.identify($team.pg.user, { + app: 'jingrow_cloud', + }); + } catch (e) { + console.error(e); + } + } + + // if team owner/admin enforce 2fa and user has not enabled 2fa, redirect to enable 2fa + const Enable2FARoute = 'Enable2FA'; + if ( + to.name !== Enable2FARoute && + !$team.pg.is_desk_user && + $team.pg.enforce_2fa && + !$team.pg.user_info.is_2fa_enabled + ) { + next({ name: Enable2FARoute }); + return; + } + + // if team owner/admin doesn't enforce 2fa don't allow user to visit Enable2FA route + if (to.name === Enable2FARoute && !$team.pg.enforce_2fa) { + next({ name: defaultRoute }); + return; + } + + if ( + !onboardingComplete && + (to.name.startsWith('Release Group') || to.name.startsWith('Server')) + ) { + next({ name: onboardingRoute }); + return; + } + + if (goingToLoginPage) { + if (to.name == 'Signup' && to.query?.product) { + next({ + name: 'SignupSetup', + params: { productId: to.query.product }, + }); + } + next({ name: defaultRoute }); + } else { + next(); + } + } else { + if (goingToLoginPage) { + next(); + } else { + if (to.name == 'Site Login') { + next(); + } else { + next({ name: 'Login', query: { redirect: to.href } }); + } + } + } +}); + +function waitUntilTeamLoaded() { + return new Promise((resolve) => { + let interval = setInterval(() => { + let team = getTeam(); + if (team?.pg) { + clearInterval(interval); + resolve(); + } + }, 100); + }); +} + +export default router; diff --git a/dashboard/src2/socket.js b/dashboard/src2/socket.js new file mode 100644 index 0000000..27fb4f7 --- /dev/null +++ b/dashboard/src2/socket.js @@ -0,0 +1,28 @@ +import { io } from 'socket.io-client'; +import { socketio_port } from '../../../../sites/common_site_config.json'; +import { getCachedResource, getCachedListResource } from 'jingrow-ui'; + +export function initSocket() { + let host = window.location.hostname; + let siteName = window.site_name; + let port = window.location.port ? `:${socketio_port}` : ''; + let protocol = port ? 'http' : 'https'; + let url = `${protocol}://${host}${port}/${siteName}`; + + let socket = io(url, { + withCredentials: true, + reconnectionAttempts: 5 + }); + + socket.on('refetch_resource', data => { + if (data.cache_key) { + let resource = + getCachedResource(data.cache_key) || + getCachedListResource(data.cache_key); + if (resource) { + resource.reload(); + } + } + }); + return socket; +} diff --git a/dashboard/src2/types.ts b/dashboard/src2/types.ts new file mode 100644 index 0000000..3860914 --- /dev/null +++ b/dashboard/src2/types.ts @@ -0,0 +1 @@ +export type Platform = 'win' | 'mac' | 'linux' | 'unknown'; diff --git a/dashboard/src2/utils/agentJob.js b/dashboard/src2/utils/agentJob.js new file mode 100644 index 0000000..2ad935a --- /dev/null +++ b/dashboard/src2/utils/agentJob.js @@ -0,0 +1,62 @@ +import { jingrowRequest } from 'jingrow-ui'; +import { reactive } from 'vue'; + +let states = {}; +export function pollJobStatus(jobId, stopFunction) { + if (!states[jobId]) { + states[jobId] = reactive({ status: null, loading: false }); + } + let state = states[jobId]; + state.loading = true; + fetchJobStatus(jobId).then(status => { + state.status = status; + }); + if (stopFunction(state.status)) { + state.loading = false; + return; + } + setTimeout(() => { + pollJobStatus(jobId, stopFunction); + }, 1000); + return state; +} + +function fetchJobStatus(jobId) { + return jingrowRequest({ + url: 'jcloud.api.site.get_job_status', + params: { job_name: jobId } + }).then(result => result.status); +} + +let runningJobs = reactive({}); +export function subscribeToJobUpdates(socket) { + // listening to site's pg_update event + // check agent_job.py for more details + socket.on('pg_update', data => { + let job = runningJobs[data.id]; + if (!job) { + job = data; + runningJobs[data.id] = job; + } + Object.assign(job, data); + }); +} + +export function getRunningJobs({ id, name, site, bench, server }) { + if (id) { + return runningJobs[id]; + } + if (name) { + return Object.values(runningJobs).filter(job => job.name === name); + } + if (site) { + return Object.values(runningJobs).filter(job => job.site === site); + } + if (bench) { + return Object.values(runningJobs).filter(job => job.bench === bench); + } + if (server) { + return Object.values(runningJobs).filter(job => job.server === server); + } + return runningJobs; +} diff --git a/dashboard/src2/utils/components.jsx b/dashboard/src2/utils/components.jsx new file mode 100644 index 0000000..08cca47 --- /dev/null +++ b/dashboard/src2/utils/components.jsx @@ -0,0 +1,87 @@ +import { FeatherIcon } from 'jingrow-ui'; +import { h, isVNode, ref, defineAsyncComponent } from 'vue'; +import AddressableErrorDialog from '../components/AddressableErrorDialog.vue'; +import DialogWrapper from '../components/DialogWrapper.vue'; +import ConfirmDialog from '../dialogs/ConfirmDialog.vue'; + +export function icon(name, _class = '') { + let iconComponent; + if (typeof name !== 'string' && name?.render) { + iconComponent = name; + name = undefined; + } else { + iconComponent = FeatherIcon; + } + return () => h(iconComponent, { name, class: _class || 'w-4 h-4' }); +} + +/** + * + * @param {import('../objects/common/types').DialogConfig} param0 + * @returns + */ +export function confirmDialog({ + title = 'Untitled', + fields = [], + message, + primaryAction, + onSuccess +}) { + let dialog = h(ConfirmDialog, { + title, + message, + fields, + primaryAction, + onSuccess + }); + renderDialog(dialog); + return dialog; +} + +export function addressableErrorDialog(name, onDone) { + renderDialog( + h(AddressableErrorDialog, { + name, + onDone + }) + ); +} + +export const dialogs = ref([]); + +export function renderDialog(component) { + if (!isVNode(component)) { + component = h(component); + } + component.id = dialogs.length; + dialogs.value.push(component); +} + +export function renderInDialog(component, options = {}) { + renderDialog({component}); +} + +export function cardBrandIcon(brand) { + const component = { + 'master-card': defineAsyncComponent(() => + import('@/components/icons/cards/MasterCard.vue') + ), + visa: defineAsyncComponent(() => + import('@/components/icons/cards/Visa.vue') + ), + amex: defineAsyncComponent(() => + import('@/components/icons/cards/Amex.vue') + ), + jcb: defineAsyncComponent(() => + import('@/components/icons/cards/JCB.vue') + ), + generic: defineAsyncComponent(() => + import('@/components/icons/cards/Generic.vue') + ), + 'union-pay': defineAsyncComponent(() => + import('@/components/icons/cards/UnionPay.vue') + ) + }[brand || 'generic']; + + return h(component, { class: 'h-4 w-6' }); +} \ No newline at end of file diff --git a/dashboard/src2/utils/country.ts b/dashboard/src2/utils/country.ts new file mode 100644 index 0000000..a7aac5d --- /dev/null +++ b/dashboard/src2/utils/country.ts @@ -0,0 +1,2867 @@ +const countries = { + AD: '安道尔', + AE: '阿拉伯联合酋长国', + AF: '阿富汗', + AG: '安提瓜和巴布达', + AI: '安圭拉', + AL: '阿尔巴尼亚', + AM: '亚美尼亚', + AO: '安哥拉', + AQ: '南极洲', + AR: '阿根廷', + AS: '美属萨摩亚', + AT: '奥地利', + AU: '澳大利亚', + AW: '阿鲁巴', + AX: '奥兰群岛', + AZ: '阿塞拜疆', + BA: '波斯尼亚和黑塞哥维那', + BB: '巴巴多斯', + BD: '孟加拉国', + BE: '比利时', + BF: '布基纳法索', + BG: '保加利亚', + BH: '巴林', + BI: '布隆迪', + BJ: '贝宁', + BL: '圣巴泰勒米', + BM: '百慕大', + BN: '文莱', + BO: '玻利维亚', + BQ: '荷兰加勒比区', + BR: '巴西', + BS: '巴哈马', + BT: '不丹', + BV: '布韦岛', + BW: '博茨瓦纳', + BY: '白俄罗斯', + BZ: '伯利兹', + CA: '加拿大', + CC: '科科斯群岛', + CD: '刚果民主共和国', + CF: '中非共和国', + CG: '刚果共和国', + CH: '瑞士', + CI: '科特迪瓦', + CK: '库克群岛', + CL: '智利', + CM: '喀麦隆', + CN: '中国', + CO: '哥伦比亚', + CR: '哥斯达黎加', + CU: '古巴', + CV: '佛得角', + CW: '库拉索', + CX: '圣诞岛', + CY: '塞浦路斯', + CZ: '捷克', + DE: '德国', + DJ: '吉布提', + DK: '丹麦', + DM: '多米尼克', + DO: '多米尼加共和国', + DZ: '阿尔及利亚', + EC: '厄瓜多尔', + EE: '爱沙尼亚', + EG: '埃及', + EH: '西撒哈拉', + ER: '厄立特里亚', + ES: '西班牙', + ET: '埃塞俄比亚', + FI: '芬兰', + FJ: '斐济', + FK: '福克兰群岛', + FM: '密克罗尼西亚', + FO: '法罗群岛', + FR: '法国', + GA: '加蓬', + GB: '英国', + GD: '格林纳达', + GE: '格鲁吉亚', + GF: '法属圭亚那', + GG: '根西岛', + GH: '加纳', + GI: '直布罗陀', + GL: '格陵兰', + GM: '冈比亚', + GN: '几内亚', + GP: '瓜德罗普', + GQ: '赤道几内亚', + GR: '希腊', + GS: '南乔治亚和南桑威奇群岛', + GT: '危地马拉', + GU: '关岛', + GW: '几内亚比绍', + GY: '圭亚那', + HK: '香港', + HM: '赫德岛和麦克唐纳群岛', + HN: '洪都拉斯', + HR: '克罗地亚', + HT: '海地', + HU: '匈牙利', + ID: '印度尼西亚', + IE: '爱尔兰', + IL: '以色列', + IM: '马恩岛', + IN: '印度', + IO: '英属印度洋领地', + IQ: '伊拉克', + IR: '伊朗', + IS: '冰岛', + IT: '意大利', + JE: '泽西岛', + JM: '牙买加', + JO: '约旦', + JP: '日本', + KE: '肯尼亚', + KG: '吉尔吉斯斯坦', + KH: '柬埔寨', + KI: '基里巴斯', + KM: '科摩罗', + KN: '圣基茨和尼维斯', + KP: '朝鲜', + KR: '韩国', + KW: '科威特', + KY: '开曼群岛', + KZ: '哈萨克斯坦', + LA: '老挝', + LB: '黎巴嫩', + LC: '圣卢西亚', + LI: '列支敦士登', + LK: '斯里兰卡', + LR: '利比里亚', + LS: '莱索托', + LT: '立陶宛', + LU: '卢森堡', + LV: '拉脱维亚', + LY: '利比亚', + MA: '摩洛哥', + MC: '摩纳哥', + MD: '摩尔多瓦', + ME: '黑山', + MF: '法属圣马丁', + MG: '马达加斯加', + MH: '马绍尔群岛', + MK: '北马其顿', + ML: '马里', + MM: '缅甸', + MN: '蒙古', + MO: '澳门', + MP: '北马里亚纳群岛', + MQ: '马提尼克', + MR: '毛里塔尼亚', + MS: '蒙特塞拉特', + MT: '马耳他', + MU: '毛里求斯', + MV: '马尔代夫', + MW: '马拉维', + MX: '墨西哥', + MY: '马来西亚', + MZ: '莫桑比克', + NA: '纳米比亚', + NC: '新喀里多尼亚', + NE: '尼日尔', + NF: '诺福克岛', + NG: '尼日利亚', + NI: '尼加拉瓜', + NL: '荷兰', + NO: '挪威', + NP: '尼泊尔', + NR: '瑙鲁', + NU: '纽埃', + NZ: '新西兰', + OM: '阿曼', + PA: '巴拿马', + PE: '秘鲁', + PF: '法属波利尼西亚', + PG: '巴布亚新几内亚', + PH: '菲律宾', + PK: '巴基斯坦', + PL: '波兰', + PM: '圣皮埃尔和密克隆', + PN: '皮特凯恩群岛', + PR: '波多黎各', + PS: '巴勒斯坦', + PT: '葡萄牙', + PW: '帕劳', + PY: '巴拉圭', + QA: '卡塔尔', + RE: '留尼汪', + RO: '罗马尼亚', + RS: '塞尔维亚', + RU: '俄罗斯', + RW: '卢旺达', + SA: '沙特阿拉伯', + SB: '所罗门群岛', + SC: '塞舌尔', + SD: '苏丹', + SE: '瑞典', + SG: '新加坡', + SH: '圣赫勒拿、阿森松和特里斯坦-达库尼亚', + SI: '斯洛文尼亚', + SJ: '斯瓦尔巴和扬马延', + SK: '斯洛伐克', + SL: '塞拉利昂', + SM: '圣马力诺', + SN: '塞内加尔', + SO: '索马里', + SR: '苏里南', + SS: '南苏丹', + ST: '圣多美和普林西比', + SV: '萨尔瓦多', + SX: '荷属圣马丁', + SY: '叙利亚', + SZ: '斯威士兰', + TC: '特克斯和凯科斯群岛', + TD: '乍得', + TF: '法属南部领地', + TG: '多哥', + TH: '泰国', + TJ: '塔吉克斯坦', + TK: '托克劳', + TL: '东帝汶', + TM: '土库曼斯坦', + TN: '突尼斯', + TO: '汤加', + TR: '土耳其', + TT: '特立尼达和多巴哥', + TV: '图瓦卢', + TW: '台湾', + TZ: '坦桑尼亚', + UA: '乌克兰', + UG: '乌干达', + UM: '美国本土外小岛屿', + US: '美国', + UY: '乌拉圭', + UZ: '乌兹别克斯坦', + VA: '梵蒂冈', + VC: '圣文森特和格林纳丁斯', + VE: '委内瑞拉', + VG: '英属维尔京群岛', + VI: '美属维尔京群岛', + VN: '越南', + VU: '瓦努阿图', + WF: '瓦利斯和富图纳', + WS: '萨摩亚', + YE: '也门', + YT: '马约特', + ZA: '南非', + ZM: '赞比亚', + ZW: '津巴布韦' +}; +const timezones = { + 'Africa/Abidjan': { + u: 0, + c: ['CI', 'BF', 'GH', 'GM', 'GN', 'ML', 'MR', 'SH', 'SL', 'SN', 'TG'] + }, + 'Africa/Accra': { + a: 'Africa/Abidjan', + c: ['GH'], + r: 1 + }, + 'Africa/Addis_Ababa': { + a: 'Africa/Nairobi', + c: ['ET'], + r: 1 + }, + 'Africa/Algiers': { + u: 60, + c: ['DZ'] + }, + 'Africa/Asmara': { + a: 'Africa/Nairobi', + c: ['ER'], + r: 1 + }, + 'Africa/Asmera': { + a: 'Africa/Nairobi', + c: ['ER'], + r: 1 + }, + 'Africa/Bamako': { + a: 'Africa/Abidjan', + c: ['ML'], + r: 1 + }, + 'Africa/Bangui': { + a: 'Africa/Lagos', + c: ['CF'], + r: 1 + }, + 'Africa/Banjul': { + a: 'Africa/Abidjan', + c: ['GM'], + r: 1 + }, + 'Africa/Bissau': { + u: 0, + c: ['GW'] + }, + 'Africa/Blantyre': { + a: 'Africa/Maputo', + c: ['MW'], + r: 1 + }, + 'Africa/Brazzaville': { + a: 'Africa/Lagos', + c: ['CG'], + r: 1 + }, + 'Africa/Bujumbura': { + a: 'Africa/Maputo', + c: ['BI'], + r: 1 + }, + 'Africa/Cairo': { + u: 120, + c: ['EG'] + }, + 'Africa/Casablanca': { + u: 60, + d: 0, + c: ['MA'] + }, + 'Africa/Ceuta': { + u: 60, + d: 120, + c: ['ES'] + }, + 'Africa/Conakry': { + a: 'Africa/Abidjan', + c: ['GN'], + r: 1 + }, + 'Africa/Dakar': { + a: 'Africa/Abidjan', + c: ['SN'], + r: 1 + }, + 'Africa/Dar_es_Salaam': { + a: 'Africa/Nairobi', + c: ['TZ'], + r: 1 + }, + 'Africa/Djibouti': { + a: 'Africa/Nairobi', + c: ['DJ'], + r: 1 + }, + 'Africa/Douala': { + a: 'Africa/Lagos', + c: ['CM'], + r: 1 + }, + 'Africa/El_Aaiun': { + u: 60, + d: 0, + c: ['EH'] + }, + 'Africa/Freetown': { + a: 'Africa/Abidjan', + c: ['SL'], + r: 1 + }, + 'Africa/Gaborone': { + a: 'Africa/Maputo', + c: ['BW'], + r: 1 + }, + 'Africa/Harare': { + a: 'Africa/Maputo', + c: ['ZW'], + r: 1 + }, + 'Africa/Johannesburg': { + u: 120, + c: ['ZA', 'LS', 'SZ'] + }, + 'Africa/Juba': { + u: 120, + c: ['SS'] + }, + 'Africa/Kampala': { + a: 'Africa/Nairobi', + c: ['UG'], + r: 1 + }, + 'Africa/Khartoum': { + u: 120, + c: ['SD'] + }, + 'Africa/Kigali': { + a: 'Africa/Maputo', + c: ['RW'], + r: 1 + }, + 'Africa/Kinshasa': { + a: 'Africa/Lagos', + c: ['CD'], + r: 1 + }, + 'Africa/Lagos': { + u: 60, + c: ['NG', 'AO', 'BJ', 'CD', 'CF', 'CG', 'CM', 'GA', 'GQ', 'NE'] + }, + 'Africa/Libreville': { + a: 'Africa/Lagos', + c: ['GA'], + r: 1 + }, + 'Africa/Lome': { + a: 'Africa/Abidjan', + c: ['TG'], + r: 1 + }, + 'Africa/Luanda': { + a: 'Africa/Lagos', + c: ['AO'], + r: 1 + }, + 'Africa/Lubumbashi': { + a: 'Africa/Maputo', + c: ['CD'], + r: 1 + }, + 'Africa/Lusaka': { + a: 'Africa/Maputo', + c: ['ZM'], + r: 1 + }, + 'Africa/Malabo': { + a: 'Africa/Lagos', + c: ['GQ'], + r: 1 + }, + 'Africa/Maputo': { + u: 120, + c: ['MZ', 'BI', 'BW', 'CD', 'MW', 'RW', 'ZM', 'ZW'] + }, + 'Africa/Maseru': { + a: 'Africa/Johannesburg', + c: ['LS'], + r: 1 + }, + 'Africa/Mbabane': { + a: 'Africa/Johannesburg', + c: ['SZ'], + r: 1 + }, + 'Africa/Mogadishu': { + a: 'Africa/Nairobi', + c: ['SO'], + r: 1 + }, + 'Africa/Monrovia': { + u: 0, + c: ['LR'] + }, + 'Africa/Nairobi': { + u: 180, + c: ['KE', 'DJ', 'ER', 'ET', 'KM', 'MG', 'SO', 'TZ', 'UG', 'YT'] + }, + 'Africa/Ndjamena': { + u: 60, + c: ['TD'] + }, + 'Africa/Niamey': { + a: 'Africa/Lagos', + c: ['NE'], + r: 1 + }, + 'Africa/Nouakchott': { + a: 'Africa/Abidjan', + c: ['MR'], + r: 1 + }, + 'Africa/Ouagadougou': { + a: 'Africa/Abidjan', + c: ['BF'], + r: 1 + }, + 'Africa/Porto-Novo': { + a: 'Africa/Lagos', + c: ['BJ'], + r: 1 + }, + 'Africa/Sao_Tome': { + u: 0, + c: ['ST'] + }, + 'Africa/Timbuktu': { + a: 'Africa/Abidjan', + c: ['ML'], + r: 1 + }, + 'Africa/Tripoli': { + u: 120, + c: ['LY'] + }, + 'Africa/Tunis': { + u: 60, + c: ['TN'] + }, + 'Africa/Windhoek': { + u: 120, + c: ['NA'] + }, + 'America/Adak': { + u: -600, + d: -540, + c: ['US'] + }, + 'America/Anchorage': { + u: -540, + d: -480, + c: ['US'] + }, + 'America/Anguilla': { + a: 'America/Puerto_Rico', + c: ['AI'], + r: 1 + }, + 'America/Antigua': { + a: 'America/Puerto_Rico', + c: ['AG'], + r: 1 + }, + 'America/Araguaina': { + u: -180, + c: ['BR'] + }, + 'America/Argentina/Buenos_Aires': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/Catamarca': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/ComodRivadavia': { + a: 'America/Argentina/Catamarca', + r: 1 + }, + 'America/Argentina/Cordoba': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/Jujuy': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/La_Rioja': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/Mendoza': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/Rio_Gallegos': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/Salta': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/San_Juan': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/San_Luis': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/Tucuman': { + u: -180, + c: ['AR'] + }, + 'America/Argentina/Ushuaia': { + u: -180, + c: ['AR'] + }, + 'America/Aruba': { + a: 'America/Puerto_Rico', + c: ['AW'], + r: 1 + }, + 'America/Asuncion': { + u: -240, + d: -180, + c: ['PY'] + }, + 'America/Atikokan': { + a: 'America/Panama', + c: ['CA'], + r: 1 + }, + 'America/Atka': { + a: 'America/Adak', + r: 1 + }, + 'America/Bahia': { + u: -180, + c: ['BR'] + }, + 'America/Bahia_Banderas': { + u: -360, + d: -300, + c: ['MX'] + }, + 'America/Barbados': { + u: -240, + c: ['BB'] + }, + 'America/Belem': { + u: -180, + c: ['BR'] + }, + 'America/Belize': { + u: -360, + c: ['BZ'] + }, + 'America/Blanc-Sablon': { + a: 'America/Puerto_Rico', + c: ['CA'], + r: 1 + }, + 'America/Boa_Vista': { + u: -240, + c: ['BR'] + }, + 'America/Bogota': { + u: -300, + c: ['CO'] + }, + 'America/Boise': { + u: -420, + d: -360, + c: ['US'] + }, + 'America/Buenos_Aires': { + a: 'America/Argentina/Buenos_Aires', + r: 1 + }, + 'America/Cambridge_Bay': { + u: -420, + d: -360, + c: ['CA'] + }, + 'America/Campo_Grande': { + u: -240, + c: ['BR'] + }, + 'America/Cancun': { + u: -300, + c: ['MX'] + }, + 'America/Caracas': { + u: -240, + c: ['VE'] + }, + 'America/Catamarca': { + a: 'America/Argentina/Catamarca', + r: 1 + }, + 'America/Cayenne': { + u: -180, + c: ['GF'] + }, + 'America/Cayman': { + a: 'America/Panama', + c: ['KY'], + r: 1 + }, + 'America/Chicago': { + u: -360, + d: -300, + c: ['US'] + }, + 'America/Chihuahua': { + u: -420, + d: -360, + c: ['MX'] + }, + 'America/Coral_Harbour': { + a: 'America/Panama', + c: ['CA'], + r: 1 + }, + 'America/Cordoba': { + a: 'America/Argentina/Cordoba', + r: 1 + }, + 'America/Costa_Rica': { + u: -360, + c: ['CR'] + }, + 'America/Creston': { + a: 'America/Phoenix', + c: ['CA'], + r: 1 + }, + 'America/Cuiaba': { + u: -240, + c: ['BR'] + }, + 'America/Curacao': { + a: 'America/Puerto_Rico', + c: ['CW'], + r: 1 + }, + 'America/Danmarkshavn': { + u: 0, + c: ['GL'] + }, + 'America/Dawson': { + u: -420, + c: ['CA'] + }, + 'America/Dawson_Creek': { + u: -420, + c: ['CA'] + }, + 'America/Denver': { + u: -420, + d: -360, + c: ['US'] + }, + 'America/Detroit': { + u: -300, + d: -240, + c: ['US'] + }, + 'America/Dominica': { + a: 'America/Puerto_Rico', + c: ['DM'], + r: 1 + }, + 'America/Edmonton': { + u: -420, + d: -360, + c: ['CA'] + }, + 'America/Eirunepe': { + u: -300, + c: ['BR'] + }, + 'America/El_Salvador': { + u: -360, + c: ['SV'] + }, + 'America/Ensenada': { + a: 'America/Tijuana', + r: 1 + }, + 'America/Fort_Nelson': { + u: -420, + c: ['CA'] + }, + 'America/Fort_Wayne': { + a: 'America/Indiana/Indianapolis', + r: 1 + }, + 'America/Fortaleza': { + u: -180, + c: ['BR'] + }, + 'America/Glace_Bay': { + u: -240, + d: -180, + c: ['CA'] + }, + 'America/Godthab': { + a: 'America/Nuuk', + r: 1 + }, + 'America/Goose_Bay': { + u: -240, + d: -180, + c: ['CA'] + }, + 'America/Grand_Turk': { + u: -300, + d: -240, + c: ['TC'] + }, + 'America/Grenada': { + a: 'America/Puerto_Rico', + c: ['GD'], + r: 1 + }, + 'America/Guadeloupe': { + a: 'America/Puerto_Rico', + c: ['GP'], + r: 1 + }, + 'America/Guatemala': { + u: -360, + c: ['GT'] + }, + 'America/Guayaquil': { + u: -300, + c: ['EC'] + }, + 'America/Guyana': { + u: -240, + c: ['GY'] + }, + 'America/Halifax': { + u: -240, + d: -180, + c: ['CA'] + }, + 'America/Havana': { + u: -300, + d: -240, + c: ['CU'] + }, + 'America/Hermosillo': { + u: -420, + c: ['MX'] + }, + 'America/Indiana/Indianapolis': { + u: -300, + d: -240, + c: ['US'] + }, + 'America/Indiana/Knox': { + u: -360, + d: -300, + c: ['US'] + }, + 'America/Indiana/Marengo': { + u: -300, + d: -240, + c: ['US'] + }, + 'America/Indiana/Petersburg': { + u: -300, + d: -240, + c: ['US'] + }, + 'America/Indiana/Tell_City': { + u: -360, + d: -300, + c: ['US'] + }, + 'America/Indiana/Vevay': { + u: -300, + d: -240, + c: ['US'] + }, + 'America/Indiana/Vincennes': { + u: -300, + d: -240, + c: ['US'] + }, + 'America/Indiana/Winamac': { + u: -300, + d: -240, + c: ['US'] + }, + 'America/Indianapolis': { + a: 'America/Indiana/Indianapolis', + r: 1 + }, + 'America/Inuvik': { + u: -420, + d: -360, + c: ['CA'] + }, + 'America/Iqaluit': { + u: -300, + d: -240, + c: ['CA'] + }, + 'America/Jamaica': { + u: -300, + c: ['JM'] + }, + 'America/Jujuy': { + a: 'America/Argentina/Jujuy', + r: 1 + }, + 'America/Juneau': { + u: -540, + d: -480, + c: ['US'] + }, + 'America/Kentucky/Louisville': { + u: -300, + d: -240, + c: ['US'] + }, + 'America/Kentucky/Monticello': { + u: -300, + d: -240, + c: ['US'] + }, + 'America/Knox_IN': { + a: 'America/Indiana/Knox', + r: 1 + }, + 'America/Kralendijk': { + a: 'America/Puerto_Rico', + c: ['BQ'], + r: 1 + }, + 'America/La_Paz': { + u: -240, + c: ['BO'] + }, + 'America/Lima': { + u: -300, + c: ['PE'] + }, + 'America/Los_Angeles': { + u: -480, + d: -420, + c: ['US'] + }, + 'America/Louisville': { + a: 'America/Kentucky/Louisville', + r: 1 + }, + 'America/Lower_Princes': { + a: 'America/Puerto_Rico', + c: ['SX'], + r: 1 + }, + 'America/Maceio': { + u: -180, + c: ['BR'] + }, + 'America/Managua': { + u: -360, + c: ['NI'] + }, + 'America/Manaus': { + u: -240, + c: ['BR'] + }, + 'America/Marigot': { + a: 'America/Puerto_Rico', + c: ['MF'], + r: 1 + }, + 'America/Martinique': { + u: -240, + c: ['MQ'] + }, + 'America/Matamoros': { + u: -360, + d: -300, + c: ['MX'] + }, + 'America/Mazatlan': { + u: -420, + d: -360, + c: ['MX'] + }, + 'America/Mendoza': { + a: 'America/Argentina/Mendoza', + r: 1 + }, + 'America/Menominee': { + u: -360, + d: -300, + c: ['US'] + }, + 'America/Merida': { + u: -360, + d: -300, + c: ['MX'] + }, + 'America/Metlakatla': { + u: -540, + d: -480, + c: ['US'] + }, + 'America/Mexico_City': { + u: -360, + d: -300, + c: ['MX'] + }, + 'America/Miquelon': { + u: -180, + d: -120, + c: ['PM'] + }, + 'America/Moncton': { + u: -240, + d: -180, + c: ['CA'] + }, + 'America/Monterrey': { + u: -360, + d: -300, + c: ['MX'] + }, + 'America/Montevideo': { + u: -180, + c: ['UY'] + }, + 'America/Montreal': { + a: 'America/Toronto', + c: ['CA'], + r: 1 + }, + 'America/Montserrat': { + a: 'America/Puerto_Rico', + c: ['MS'], + r: 1 + }, + 'America/Nassau': { + a: 'America/Toronto', + c: ['BS'], + r: 1 + }, + 'America/New_York': { + u: -300, + d: -240, + c: ['US'] + }, + 'America/Nipigon': { + u: -300, + d: -240, + c: ['CA'] + }, + 'America/Nome': { + u: -540, + d: -480, + c: ['US'] + }, + 'America/Noronha': { + u: -120, + c: ['BR'] + }, + 'America/North_Dakota/Beulah': { + u: -360, + d: -300, + c: ['US'] + }, + 'America/North_Dakota/Center': { + u: -360, + d: -300, + c: ['US'] + }, + 'America/North_Dakota/New_Salem': { + u: -360, + d: -300, + c: ['US'] + }, + 'America/Nuuk': { + u: -180, + d: -120, + c: ['GL'] + }, + 'America/Ojinaga': { + u: -420, + d: -360, + c: ['MX'] + }, + 'America/Panama': { + u: -300, + c: ['PA', 'CA', 'KY'] + }, + 'America/Pangnirtung': { + u: -300, + d: -240, + c: ['CA'] + }, + 'America/Paramaribo': { + u: -180, + c: ['SR'] + }, + 'America/Phoenix': { + u: -420, + c: ['US', 'CA'] + }, + 'America/Port-au-Prince': { + u: -300, + d: -240, + c: ['HT'] + }, + 'America/Port_of_Spain': { + a: 'America/Puerto_Rico', + c: ['TT'], + r: 1 + }, + 'America/Porto_Acre': { + a: 'America/Rio_Branco', + r: 1 + }, + 'America/Porto_Velho': { + u: -240, + c: ['BR'] + }, + 'America/Puerto_Rico': { + u: -240, + c: [ + 'PR', + 'AG', + 'CA', + 'AI', + 'AW', + 'BL', + 'BQ', + 'CW', + 'DM', + 'GD', + 'GP', + 'KN', + 'LC', + 'MF', + 'MS', + 'SX', + 'TT', + 'VC', + 'VG', + 'VI' + ] + }, + 'America/Punta_Arenas': { + u: -180, + c: ['CL'] + }, + 'America/Rainy_River': { + u: -360, + d: -300, + c: ['CA'] + }, + 'America/Rankin_Inlet': { + u: -360, + d: -300, + c: ['CA'] + }, + 'America/Recife': { + u: -180, + c: ['BR'] + }, + 'America/Regina': { + u: -360, + c: ['CA'] + }, + 'America/Resolute': { + u: -360, + d: -300, + c: ['CA'] + }, + 'America/Rio_Branco': { + u: -300, + c: ['BR'] + }, + 'America/Rosario': { + a: 'America/Argentina/Cordoba', + r: 1 + }, + 'America/Santa_Isabel': { + a: 'America/Tijuana', + r: 1 + }, + 'America/Santarem': { + u: -180, + c: ['BR'] + }, + 'America/Santiago': { + u: -240, + d: -180, + c: ['CL'] + }, + 'America/Santo_Domingo': { + u: -240, + c: ['DO'] + }, + 'America/Sao_Paulo': { + u: -180, + c: ['BR'] + }, + 'America/Scoresbysund': { + u: -60, + d: 0, + c: ['GL'] + }, + 'America/Shiprock': { + a: 'America/Denver', + r: 1 + }, + 'America/Sitka': { + u: -540, + d: -480, + c: ['US'] + }, + 'America/St_Barthelemy': { + a: 'America/Puerto_Rico', + c: ['BL'], + r: 1 + }, + 'America/St_Johns': { + u: -150, + d: -90, + c: ['CA'] + }, + 'America/St_Kitts': { + a: 'America/Puerto_Rico', + c: ['KN'], + r: 1 + }, + 'America/St_Lucia': { + a: 'America/Puerto_Rico', + c: ['LC'], + r: 1 + }, + 'America/St_Thomas': { + a: 'America/Puerto_Rico', + c: ['VI'], + r: 1 + }, + 'America/St_Vincent': { + a: 'America/Puerto_Rico', + c: ['VC'], + r: 1 + }, + 'America/Swift_Current': { + u: -360, + c: ['CA'] + }, + 'America/Tegucigalpa': { + u: -360, + c: ['HN'] + }, + 'America/Thule': { + u: -240, + d: -180, + c: ['GL'] + }, + 'America/Thunder_Bay': { + u: -300, + d: -240, + c: ['CA'] + }, + 'America/Tijuana': { + u: -480, + d: -420, + c: ['MX'] + }, + 'America/Toronto': { + u: -300, + d: -240, + c: ['CA', 'BS'] + }, + 'America/Tortola': { + a: 'America/Puerto_Rico', + c: ['VG'], + r: 1 + }, + 'America/Vancouver': { + u: -480, + d: -420, + c: ['CA'] + }, + 'America/Virgin': { + a: 'America/Puerto_Rico', + c: ['VI'], + r: 1 + }, + 'America/Whitehorse': { + u: -420, + c: ['CA'] + }, + 'America/Winnipeg': { + u: -360, + d: -300, + c: ['CA'] + }, + 'America/Yakutat': { + u: -540, + d: -480, + c: ['US'] + }, + 'America/Yellowknife': { + u: -420, + d: -360, + c: ['CA'] + }, + 'Antarctica/Casey': { + u: 660, + c: ['AQ'] + }, + 'Antarctica/Davis': { + u: 420, + c: ['AQ'] + }, + 'Antarctica/DumontDUrville': { + a: 'Pacific/Port_Moresby', + c: ['AQ'], + r: 1 + }, + 'Antarctica/Macquarie': { + u: 600, + d: 660, + c: ['AU'] + }, + 'Antarctica/Mawson': { + u: 300, + c: ['AQ'] + }, + 'Antarctica/McMurdo': { + a: 'Pacific/Auckland', + c: ['AQ'], + r: 1 + }, + 'Antarctica/Palmer': { + u: -180, + c: ['AQ'] + }, + 'Antarctica/Rothera': { + u: -180, + c: ['AQ'] + }, + 'Antarctica/South_Pole': { + a: 'Pacific/Auckland', + c: ['AQ'], + r: 1 + }, + 'Antarctica/Syowa': { + a: 'Asia/Riyadh', + c: ['AQ'], + r: 1 + }, + 'Antarctica/Troll': { + u: 0, + d: 120, + c: ['AQ'] + }, + 'Antarctica/Vostok': { + u: 360, + c: ['AQ'] + }, + 'Arctic/Longyearbyen': { + a: 'Europe/Oslo', + c: ['SJ'], + r: 1 + }, + 'Asia/Aden': { + a: 'Asia/Riyadh', + c: ['YE'], + r: 1 + }, + 'Asia/Almaty': { + u: 360, + c: ['KZ'] + }, + 'Asia/Amman': { + u: 120, + d: 180, + c: ['JO'] + }, + 'Asia/Anadyr': { + u: 720, + c: ['RU'] + }, + 'Asia/Aqtau': { + u: 300, + c: ['KZ'] + }, + 'Asia/Aqtobe': { + u: 300, + c: ['KZ'] + }, + 'Asia/Ashgabat': { + u: 300, + c: ['TM'] + }, + 'Asia/Ashkhabad': { + a: 'Asia/Ashgabat', + r: 1 + }, + 'Asia/Atyrau': { + u: 300, + c: ['KZ'] + }, + 'Asia/Baghdad': { + u: 180, + c: ['IQ'] + }, + 'Asia/Bahrain': { + a: 'Asia/Qatar', + c: ['BH'], + r: 1 + }, + 'Asia/Baku': { + u: 240, + c: ['AZ'] + }, + 'Asia/Bangkok': { + u: 420, + c: ['TH', 'KH', 'LA', 'VN'] + }, + 'Asia/Barnaul': { + u: 420, + c: ['RU'] + }, + 'Asia/Beirut': { + u: 120, + d: 180, + c: ['LB'] + }, + 'Asia/Bishkek': { + u: 360, + c: ['KG'] + }, + 'Asia/Brunei': { + u: 480, + c: ['BN'] + }, + 'Asia/Calcutta': { + a: 'Asia/Kolkata', + r: 1 + }, + 'Asia/Chita': { + u: 540, + c: ['RU'] + }, + 'Asia/Choibalsan': { + u: 480, + c: ['MN'] + }, + 'Asia/Chongqing': { + a: 'Asia/Shanghai', + r: 1 + }, + 'Asia/Chungking': { + a: 'Asia/Shanghai', + r: 1 + }, + 'Asia/Colombo': { + u: 330, + c: ['LK'] + }, + 'Asia/Dacca': { + a: 'Asia/Dhaka', + r: 1 + }, + 'Asia/Damascus': { + u: 120, + d: 180, + c: ['SY'] + }, + 'Asia/Dhaka': { + u: 360, + c: ['BD'] + }, + 'Asia/Dili': { + u: 540, + c: ['TL'] + }, + 'Asia/Dubai': { + u: 240, + c: ['AE', 'OM'] + }, + 'Asia/Dushanbe': { + u: 300, + c: ['TJ'] + }, + 'Asia/Famagusta': { + u: 120, + d: 180, + c: ['CY'] + }, + 'Asia/Gaza': { + u: 120, + d: 180, + c: ['PS'] + }, + 'Asia/Harbin': { + a: 'Asia/Shanghai', + r: 1 + }, + 'Asia/Hebron': { + u: 120, + d: 180, + c: ['PS'] + }, + 'Asia/Ho_Chi_Minh': { + u: 420, + c: ['VN'] + }, + 'Asia/Hong_Kong': { + u: 480, + c: ['HK'] + }, + 'Asia/Hovd': { + u: 420, + c: ['MN'] + }, + 'Asia/Irkutsk': { + u: 480, + c: ['RU'] + }, + 'Asia/Istanbul': { + a: 'Europe/Istanbul', + r: 1 + }, + 'Asia/Jakarta': { + u: 420, + c: ['ID'] + }, + 'Asia/Jayapura': { + u: 540, + c: ['ID'] + }, + 'Asia/Jerusalem': { + u: 120, + d: 180, + c: ['IL'] + }, + 'Asia/Kabul': { + u: 270, + c: ['AF'] + }, + 'Asia/Kamchatka': { + u: 720, + c: ['RU'] + }, + 'Asia/Karachi': { + u: 300, + c: ['PK'] + }, + 'Asia/Kashgar': { + a: 'Asia/Urumqi', + r: 1 + }, + 'Asia/Kathmandu': { + u: 345, + c: ['NP'] + }, + 'Asia/Katmandu': { + a: 'Asia/Kathmandu', + r: 1 + }, + 'Asia/Khandyga': { + u: 540, + c: ['RU'] + }, + 'Asia/Kolkata': { + u: 330, + c: ['IN'] + }, + 'Asia/Krasnoyarsk': { + u: 420, + c: ['RU'] + }, + 'Asia/Kuala_Lumpur': { + u: 480, + c: ['MY'] + }, + 'Asia/Kuching': { + u: 480, + c: ['MY'] + }, + 'Asia/Kuwait': { + a: 'Asia/Riyadh', + c: ['KW'], + r: 1 + }, + 'Asia/Macao': { + a: 'Asia/Macau', + r: 1 + }, + 'Asia/Macau': { + u: 480, + c: ['MO'] + }, + 'Asia/Magadan': { + u: 660, + c: ['RU'] + }, + 'Asia/Makassar': { + u: 480, + c: ['ID'] + }, + 'Asia/Manila': { + u: 480, + c: ['PH'] + }, + 'Asia/Muscat': { + a: 'Asia/Dubai', + c: ['OM'], + r: 1 + }, + 'Asia/Nicosia': { + u: 120, + d: 180, + c: ['CY'] + }, + 'Asia/Novokuznetsk': { + u: 420, + c: ['RU'] + }, + 'Asia/Novosibirsk': { + u: 420, + c: ['RU'] + }, + 'Asia/Omsk': { + u: 360, + c: ['RU'] + }, + 'Asia/Oral': { + u: 300, + c: ['KZ'] + }, + 'Asia/Phnom_Penh': { + a: 'Asia/Bangkok', + c: ['KH'], + r: 1 + }, + 'Asia/Pontianak': { + u: 420, + c: ['ID'] + }, + 'Asia/Pyongyang': { + u: 540, + c: ['KP'] + }, + 'Asia/Qatar': { + u: 180, + c: ['QA', 'BH'] + }, + 'Asia/Qostanay': { + u: 360, + c: ['KZ'] + }, + 'Asia/Qyzylorda': { + u: 300, + c: ['KZ'] + }, + 'Asia/Rangoon': { + a: 'Asia/Yangon', + r: 1 + }, + 'Asia/Riyadh': { + u: 180, + c: ['SA', 'AQ', 'KW', 'YE'] + }, + 'Asia/Saigon': { + a: 'Asia/Ho_Chi_Minh', + r: 1 + }, + 'Asia/Sakhalin': { + u: 660, + c: ['RU'] + }, + 'Asia/Samarkand': { + u: 300, + c: ['UZ'] + }, + 'Asia/Seoul': { + u: 540, + c: ['KR'] + }, + 'Asia/Shanghai': { + u: 480, + c: ['CN'] + }, + 'Asia/Singapore': { + u: 480, + c: ['SG', 'MY'] + }, + 'Asia/Srednekolymsk': { + u: 660, + c: ['RU'] + }, + 'Asia/Taipei': { + u: 480, + c: ['TW'] + }, + 'Asia/Tashkent': { + u: 300, + c: ['UZ'] + }, + 'Asia/Tbilisi': { + u: 240, + c: ['GE'] + }, + 'Asia/Tehran': { + u: 210, + d: 270, + c: ['IR'] + }, + 'Asia/Tel_Aviv': { + a: 'Asia/Jerusalem', + r: 1 + }, + 'Asia/Thimbu': { + a: 'Asia/Thimphu', + r: 1 + }, + 'Asia/Thimphu': { + u: 360, + c: ['BT'] + }, + 'Asia/Tokyo': { + u: 540, + c: ['JP'] + }, + 'Asia/Tomsk': { + u: 420, + c: ['RU'] + }, + 'Asia/Ujung_Pandang': { + a: 'Asia/Makassar', + r: 1 + }, + 'Asia/Ulaanbaatar': { + u: 480, + c: ['MN'] + }, + 'Asia/Ulan_Bator': { + a: 'Asia/Ulaanbaatar', + r: 1 + }, + 'Asia/Urumqi': { + u: 360, + c: ['CN'] + }, + 'Asia/Ust-Nera': { + u: 600, + c: ['RU'] + }, + 'Asia/Vientiane': { + a: 'Asia/Bangkok', + c: ['LA'], + r: 1 + }, + 'Asia/Vladivostok': { + u: 600, + c: ['RU'] + }, + 'Asia/Yakutsk': { + u: 540, + c: ['RU'] + }, + 'Asia/Yangon': { + u: 390, + c: ['MM'] + }, + 'Asia/Yekaterinburg': { + u: 300, + c: ['RU'] + }, + 'Asia/Yerevan': { + u: 240, + c: ['AM'] + }, + 'Atlantic/Azores': { + u: -60, + d: 0, + c: ['PT'] + }, + 'Atlantic/Bermuda': { + u: -240, + d: -180, + c: ['BM'] + }, + 'Atlantic/Canary': { + u: 0, + d: 60, + c: ['ES'] + }, + 'Atlantic/Cape_Verde': { + u: -60, + c: ['CV'] + }, + 'Atlantic/Faeroe': { + a: 'Atlantic/Faroe', + r: 1 + }, + 'Atlantic/Faroe': { + u: 0, + d: 60, + c: ['FO'] + }, + 'Atlantic/Jan_Mayen': { + a: 'Europe/Oslo', + c: ['SJ'], + r: 1 + }, + 'Atlantic/Madeira': { + u: 0, + d: 60, + c: ['PT'] + }, + 'Atlantic/Reykjavik': { + u: 0, + c: ['IS'] + }, + 'Atlantic/South_Georgia': { + u: -120, + c: ['GS'] + }, + 'Atlantic/St_Helena': { + a: 'Africa/Abidjan', + c: ['SH'], + r: 1 + }, + 'Atlantic/Stanley': { + u: -180, + c: ['FK'] + }, + 'Australia/ACT': { + a: 'Australia/Sydney', + r: 1 + }, + 'Australia/Adelaide': { + u: 570, + d: 630, + c: ['AU'] + }, + 'Australia/Brisbane': { + u: 600, + c: ['AU'] + }, + 'Australia/Broken_Hill': { + u: 570, + d: 630, + c: ['AU'] + }, + 'Australia/Canberra': { + a: 'Australia/Sydney', + r: 1 + }, + 'Australia/Currie': { + a: 'Australia/Hobart', + r: 1 + }, + 'Australia/Darwin': { + u: 570, + c: ['AU'] + }, + 'Australia/Eucla': { + u: 525, + c: ['AU'] + }, + 'Australia/Hobart': { + u: 600, + d: 660, + c: ['AU'] + }, + 'Australia/LHI': { + a: 'Australia/Lord_Howe', + r: 1 + }, + 'Australia/Lindeman': { + u: 600, + c: ['AU'] + }, + 'Australia/Lord_Howe': { + u: 630, + d: 660, + c: ['AU'] + }, + 'Australia/Melbourne': { + u: 600, + d: 660, + c: ['AU'] + }, + 'Australia/NSW': { + a: 'Australia/Sydney', + r: 1 + }, + 'Australia/North': { + a: 'Australia/Darwin', + r: 1 + }, + 'Australia/Perth': { + u: 480, + c: ['AU'] + }, + 'Australia/Queensland': { + a: 'Australia/Brisbane', + r: 1 + }, + 'Australia/South': { + a: 'Australia/Adelaide', + r: 1 + }, + 'Australia/Sydney': { + u: 600, + d: 660, + c: ['AU'] + }, + 'Australia/Tasmania': { + a: 'Australia/Hobart', + r: 1 + }, + 'Australia/Victoria': { + a: 'Australia/Melbourne', + r: 1 + }, + 'Australia/West': { + a: 'Australia/Perth', + r: 1 + }, + 'Australia/Yancowinna': { + a: 'Australia/Broken_Hill', + r: 1 + }, + 'Brazil/Acre': { + a: 'America/Rio_Branco', + r: 1 + }, + 'Brazil/DeNoronha': { + a: 'America/Noronha', + r: 1 + }, + 'Brazil/East': { + a: 'America/Sao_Paulo', + r: 1 + }, + 'Brazil/West': { + a: 'America/Manaus', + r: 1 + }, + CET: { + u: 60, + d: 120 + }, + CST6CDT: { + u: -360, + d: -300 + }, + 'Canada/Atlantic': { + a: 'America/Halifax', + r: 1 + }, + 'Canada/Central': { + a: 'America/Winnipeg', + r: 1 + }, + 'Canada/Eastern': { + a: 'America/Toronto', + c: ['CA'], + r: 1 + }, + 'Canada/Mountain': { + a: 'America/Edmonton', + r: 1 + }, + 'Canada/Newfoundland': { + a: 'America/St_Johns', + r: 1 + }, + 'Canada/Pacific': { + a: 'America/Vancouver', + r: 1 + }, + 'Canada/Saskatchewan': { + a: 'America/Regina', + r: 1 + }, + 'Canada/Yukon': { + a: 'America/Whitehorse', + r: 1 + }, + 'Chile/Continental': { + a: 'America/Santiago', + r: 1 + }, + 'Chile/EasterIsland': { + a: 'Pacific/Easter', + r: 1 + }, + Cuba: { + a: 'America/Havana', + r: 1 + }, + EET: { + u: 120, + d: 180 + }, + EST: { + u: -300 + }, + EST5EDT: { + u: -300, + d: -240 + }, + Egypt: { + a: 'Africa/Cairo', + r: 1 + }, + Eire: { + a: 'Europe/Dublin', + r: 1 + }, + 'Etc/GMT': { + u: 0 + }, + 'Etc/GMT+0': { + a: 'Etc/GMT', + r: 1 + }, + 'Etc/GMT+1': { + u: -60 + }, + 'Etc/GMT+10': { + u: -600 + }, + 'Etc/GMT+11': { + u: -660 + }, + 'Etc/GMT+12': { + u: -720 + }, + 'Etc/GMT+2': { + u: -120 + }, + 'Etc/GMT+3': { + u: -180 + }, + 'Etc/GMT+4': { + u: -240 + }, + 'Etc/GMT+5': { + u: -300 + }, + 'Etc/GMT+6': { + u: -360 + }, + 'Etc/GMT+7': { + u: -420 + }, + 'Etc/GMT+8': { + u: -480 + }, + 'Etc/GMT+9': { + u: -540 + }, + 'Etc/GMT-0': { + a: 'Etc/GMT', + r: 1 + }, + 'Etc/GMT-1': { + u: 60 + }, + 'Etc/GMT-10': { + u: 600 + }, + 'Etc/GMT-11': { + u: 660 + }, + 'Etc/GMT-12': { + u: 720 + }, + 'Etc/GMT-13': { + u: 780 + }, + 'Etc/GMT-14': { + u: 840 + }, + 'Etc/GMT-2': { + u: 120 + }, + 'Etc/GMT-3': { + u: 180 + }, + 'Etc/GMT-4': { + u: 240 + }, + 'Etc/GMT-5': { + u: 300 + }, + 'Etc/GMT-6': { + u: 360 + }, + 'Etc/GMT-7': { + u: 420 + }, + 'Etc/GMT-8': { + u: 480 + }, + 'Etc/GMT-9': { + u: 540 + }, + 'Etc/GMT0': { + a: 'Etc/GMT', + r: 1 + }, + 'Etc/Greenwich': { + a: 'Etc/GMT', + r: 1 + }, + 'Etc/UCT': { + a: 'Etc/UTC', + r: 1 + }, + 'Etc/UTC': { + u: 0 + }, + 'Etc/Universal': { + a: 'Etc/UTC', + r: 1 + }, + 'Etc/Zulu': { + a: 'Etc/UTC', + r: 1 + }, + 'Europe/Amsterdam': { + u: 60, + d: 120, + c: ['NL'] + }, + 'Europe/Andorra': { + u: 60, + d: 120, + c: ['AD'] + }, + 'Europe/Astrakhan': { + u: 240, + c: ['RU'] + }, + 'Europe/Athens': { + u: 120, + d: 180, + c: ['GR'] + }, + 'Europe/Belfast': { + a: 'Europe/London', + c: ['GB'], + r: 1 + }, + 'Europe/Belgrade': { + u: 60, + d: 120, + c: ['RS', 'BA', 'HR', 'ME', 'MK', 'SI'] + }, + 'Europe/Berlin': { + u: 60, + d: 120, + c: ['DE'] + }, + 'Europe/Bratislava': { + a: 'Europe/Prague', + c: ['SK'], + r: 1 + }, + 'Europe/Brussels': { + u: 60, + d: 120, + c: ['BE'] + }, + 'Europe/Bucharest': { + u: 120, + d: 180, + c: ['RO'] + }, + 'Europe/Budapest': { + u: 60, + d: 120, + c: ['HU'] + }, + 'Europe/Busingen': { + a: 'Europe/Zurich', + c: ['DE'], + r: 1 + }, + 'Europe/Chisinau': { + u: 120, + d: 180, + c: ['MD'] + }, + 'Europe/Copenhagen': { + u: 60, + d: 120, + c: ['DK'] + }, + 'Europe/Dublin': { + u: 60, + d: 0, + c: ['IE'] + }, + 'Europe/Gibraltar': { + u: 60, + d: 120, + c: ['GI'] + }, + 'Europe/Guernsey': { + a: 'Europe/London', + c: ['GG'], + r: 1 + }, + 'Europe/Helsinki': { + u: 120, + d: 180, + c: ['FI', 'AX'] + }, + 'Europe/Isle_of_Man': { + a: 'Europe/London', + c: ['IM'], + r: 1 + }, + 'Europe/Istanbul': { + u: 180, + c: ['TR'] + }, + 'Europe/Jersey': { + a: 'Europe/London', + c: ['JE'], + r: 1 + }, + 'Europe/Kaliningrad': { + u: 120, + c: ['RU'] + }, + 'Europe/Kiev': { + u: 120, + d: 180, + c: ['UA'] + }, + 'Europe/Kirov': { + u: 180, + c: ['RU'] + }, + 'Europe/Lisbon': { + u: 0, + d: 60, + c: ['PT'] + }, + 'Europe/Ljubljana': { + a: 'Europe/Belgrade', + c: ['SI'], + r: 1 + }, + 'Europe/London': { + u: 0, + d: 60, + c: ['GB', 'GG', 'IM', 'JE'] + }, + 'Europe/Luxembourg': { + u: 60, + d: 120, + c: ['LU'] + }, + 'Europe/Madrid': { + u: 60, + d: 120, + c: ['ES'] + }, + 'Europe/Malta': { + u: 60, + d: 120, + c: ['MT'] + }, + 'Europe/Mariehamn': { + a: 'Europe/Helsinki', + c: ['AX'], + r: 1 + }, + 'Europe/Minsk': { + u: 180, + c: ['BY'] + }, + 'Europe/Monaco': { + u: 60, + d: 120, + c: ['MC'] + }, + 'Europe/Moscow': { + u: 180, + c: ['RU'] + }, + 'Europe/Nicosia': { + a: 'Asia/Nicosia', + r: 1 + }, + 'Europe/Oslo': { + u: 60, + d: 120, + c: ['NO', 'SJ', 'BV'] + }, + 'Europe/Paris': { + u: 60, + d: 120, + c: ['FR'] + }, + 'Europe/Podgorica': { + a: 'Europe/Belgrade', + c: ['ME'], + r: 1 + }, + 'Europe/Prague': { + u: 60, + d: 120, + c: ['CZ', 'SK'] + }, + 'Europe/Riga': { + u: 120, + d: 180, + c: ['LV'] + }, + 'Europe/Rome': { + u: 60, + d: 120, + c: ['IT', 'SM', 'VA'] + }, + 'Europe/Samara': { + u: 240, + c: ['RU'] + }, + 'Europe/San_Marino': { + a: 'Europe/Rome', + c: ['SM'], + r: 1 + }, + 'Europe/Sarajevo': { + a: 'Europe/Belgrade', + c: ['BA'], + r: 1 + }, + 'Europe/Saratov': { + u: 240, + c: ['RU'] + }, + 'Europe/Simferopol': { + u: 180, + c: ['RU', 'UA'] + }, + 'Europe/Skopje': { + a: 'Europe/Belgrade', + c: ['MK'], + r: 1 + }, + 'Europe/Sofia': { + u: 120, + d: 180, + c: ['BG'] + }, + 'Europe/Stockholm': { + u: 60, + d: 120, + c: ['SE'] + }, + 'Europe/Tallinn': { + u: 120, + d: 180, + c: ['EE'] + }, + 'Europe/Tirane': { + u: 60, + d: 120, + c: ['AL'] + }, + 'Europe/Tiraspol': { + a: 'Europe/Chisinau', + r: 1 + }, + 'Europe/Ulyanovsk': { + u: 240, + c: ['RU'] + }, + 'Europe/Uzhgorod': { + u: 120, + d: 180, + c: ['UA'] + }, + 'Europe/Vaduz': { + a: 'Europe/Zurich', + c: ['LI'], + r: 1 + }, + 'Europe/Vatican': { + a: 'Europe/Rome', + c: ['VA'], + r: 1 + }, + 'Europe/Vienna': { + u: 60, + d: 120, + c: ['AT'] + }, + 'Europe/Vilnius': { + u: 120, + d: 180, + c: ['LT'] + }, + 'Europe/Volgograd': { + u: 180, + c: ['RU'] + }, + 'Europe/Warsaw': { + u: 60, + d: 120, + c: ['PL'] + }, + 'Europe/Zagreb': { + a: 'Europe/Belgrade', + c: ['HR'], + r: 1 + }, + 'Europe/Zaporozhye': { + u: 120, + d: 180, + c: ['UA'] + }, + 'Europe/Zurich': { + u: 60, + d: 120, + c: ['CH', 'DE', 'LI'] + }, + Factory: { + u: 0 + }, + GB: { + a: 'Europe/London', + c: ['GB'], + r: 1 + }, + 'GB-Eire': { + a: 'Europe/London', + c: ['GB'], + r: 1 + }, + GMT: { + a: 'Etc/GMT', + r: 1 + }, + 'GMT+0': { + a: 'Etc/GMT', + r: 1 + }, + 'GMT-0': { + a: 'Etc/GMT', + r: 1 + }, + GMT0: { + a: 'Etc/GMT', + r: 1 + }, + Greenwich: { + a: 'Etc/GMT', + r: 1 + }, + HST: { + u: -600 + }, + Hongkong: { + a: 'Asia/Hong_Kong', + r: 1 + }, + Iceland: { + a: 'Atlantic/Reykjavik', + r: 1 + }, + 'Indian/Antananarivo': { + a: 'Africa/Nairobi', + c: ['MG'], + r: 1 + }, + 'Indian/Chagos': { + u: 360, + c: ['IO'] + }, + 'Indian/Christmas': { + u: 420, + c: ['CX'] + }, + 'Indian/Cocos': { + u: 390, + c: ['CC'] + }, + 'Indian/Comoro': { + a: 'Africa/Nairobi', + c: ['KM'], + r: 1 + }, + 'Indian/Kerguelen': { + u: 300, + c: ['TF', 'HM'] + }, + 'Indian/Mahe': { + u: 240, + c: ['SC'] + }, + 'Indian/Maldives': { + u: 300, + c: ['MV'] + }, + 'Indian/Mauritius': { + u: 240, + c: ['MU'] + }, + 'Indian/Mayotte': { + a: 'Africa/Nairobi', + c: ['YT'], + r: 1 + }, + 'Indian/Reunion': { + u: 240, + c: ['RE', 'TF'] + }, + Iran: { + a: 'Asia/Tehran', + r: 1 + }, + Israel: { + a: 'Asia/Jerusalem', + r: 1 + }, + Jamaica: { + a: 'America/Jamaica', + r: 1 + }, + Japan: { + a: 'Asia/Tokyo', + r: 1 + }, + Kwajalein: { + a: 'Pacific/Kwajalein', + r: 1 + }, + Libya: { + a: 'Africa/Tripoli', + r: 1 + }, + MET: { + u: 60, + d: 120 + }, + MST: { + u: -420 + }, + MST7MDT: { + u: -420, + d: -360 + }, + 'Mexico/BajaNorte': { + a: 'America/Tijuana', + r: 1 + }, + 'Mexico/BajaSur': { + a: 'America/Mazatlan', + r: 1 + }, + 'Mexico/General': { + a: 'America/Mexico_City', + r: 1 + }, + NZ: { + a: 'Pacific/Auckland', + c: ['NZ'], + r: 1 + }, + 'NZ-CHAT': { + a: 'Pacific/Chatham', + r: 1 + }, + Navajo: { + a: 'America/Denver', + r: 1 + }, + PRC: { + a: 'Asia/Shanghai', + r: 1 + }, + PST8PDT: { + u: -480, + d: -420 + }, + 'Pacific/Apia': { + u: 780, + c: ['WS'] + }, + 'Pacific/Auckland': { + u: 720, + d: 780, + c: ['NZ', 'AQ'] + }, + 'Pacific/Bougainville': { + u: 660, + c: ['PG'] + }, + 'Pacific/Chatham': { + u: 765, + d: 825, + c: ['NZ'] + }, + 'Pacific/Chuuk': { + u: 600, + c: ['FM'] + }, + 'Pacific/Easter': { + u: -360, + d: -300, + c: ['CL'] + }, + 'Pacific/Efate': { + u: 660, + c: ['VU'] + }, + 'Pacific/Enderbury': { + a: 'Pacific/Kanton', + r: 1 + }, + 'Pacific/Fakaofo': { + u: 780, + c: ['TK'] + }, + 'Pacific/Fiji': { + u: 720, + d: 780, + c: ['FJ'] + }, + 'Pacific/Funafuti': { + u: 720, + c: ['TV'] + }, + 'Pacific/Galapagos': { + u: -360, + c: ['EC'] + }, + 'Pacific/Gambier': { + u: -540, + c: ['PF'] + }, + 'Pacific/Guadalcanal': { + u: 660, + c: ['SB'] + }, + 'Pacific/Guam': { + u: 600, + c: ['GU', 'MP'] + }, + 'Pacific/Honolulu': { + u: -600, + c: ['US', 'UM'] + }, + 'Pacific/Johnston': { + a: 'Pacific/Honolulu', + c: ['UM'], + r: 1 + }, + 'Pacific/Kanton': { + u: 780, + c: ['KI'] + }, + 'Pacific/Kiritimati': { + u: 840, + c: ['KI'] + }, + 'Pacific/Kosrae': { + u: 660, + c: ['FM'] + }, + 'Pacific/Kwajalein': { + u: 720, + c: ['MH'] + }, + 'Pacific/Majuro': { + u: 720, + c: ['MH'] + }, + 'Pacific/Marquesas': { + u: -510, + c: ['PF'] + }, + 'Pacific/Midway': { + a: 'Pacific/Pago_Pago', + c: ['UM'], + r: 1 + }, + 'Pacific/Nauru': { + u: 720, + c: ['NR'] + }, + 'Pacific/Niue': { + u: -660, + c: ['NU'] + }, + 'Pacific/Norfolk': { + u: 660, + d: 720, + c: ['NF'] + }, + 'Pacific/Noumea': { + u: 660, + c: ['NC'] + }, + 'Pacific/Pago_Pago': { + u: -660, + c: ['AS', 'UM'] + }, + 'Pacific/Palau': { + u: 540, + c: ['PW'] + }, + 'Pacific/Pitcairn': { + u: -480, + c: ['PN'] + }, + 'Pacific/Pohnpei': { + u: 660, + c: ['FM'] + }, + 'Pacific/Ponape': { + a: 'Pacific/Pohnpei', + r: 1 + }, + 'Pacific/Port_Moresby': { + u: 600, + c: ['PG', 'AQ'] + }, + 'Pacific/Rarotonga': { + u: -600, + c: ['CK'] + }, + 'Pacific/Saipan': { + a: 'Pacific/Guam', + c: ['MP'], + r: 1 + }, + 'Pacific/Samoa': { + a: 'Pacific/Pago_Pago', + c: ['WS'], + r: 1 + }, + 'Pacific/Tahiti': { + u: -600, + c: ['PF'] + }, + 'Pacific/Tarawa': { + u: 720, + c: ['KI'] + }, + 'Pacific/Tongatapu': { + u: 780, + c: ['TO'] + }, + 'Pacific/Truk': { + a: 'Pacific/Chuuk', + r: 1 + }, + 'Pacific/Wake': { + u: 720, + c: ['UM'] + }, + 'Pacific/Wallis': { + u: 720, + c: ['WF'] + }, + 'Pacific/Yap': { + a: 'Pacific/Chuuk', + r: 1 + }, + Poland: { + a: 'Europe/Warsaw', + r: 1 + }, + Portugal: { + a: 'Europe/Lisbon', + r: 1 + }, + ROC: { + a: 'Asia/Taipei', + r: 1 + }, + ROK: { + a: 'Asia/Seoul', + r: 1 + }, + Singapore: { + a: 'Asia/Singapore', + c: ['SG'], + r: 1 + }, + Turkey: { + a: 'Europe/Istanbul', + r: 1 + }, + UCT: { + a: 'Etc/UTC', + r: 1 + }, + 'US/Alaska': { + a: 'America/Anchorage', + r: 1 + }, + 'US/Aleutian': { + a: 'America/Adak', + r: 1 + }, + 'US/Arizona': { + a: 'America/Phoenix', + c: ['US'], + r: 1 + }, + 'US/Central': { + a: 'America/Chicago', + r: 1 + }, + 'US/East-Indiana': { + a: 'America/Indiana/Indianapolis', + r: 1 + }, + 'US/Eastern': { + a: 'America/New_York', + r: 1 + }, + 'US/Hawaii': { + a: 'Pacific/Honolulu', + c: ['US'], + r: 1 + }, + 'US/Indiana-Starke': { + a: 'America/Indiana/Knox', + r: 1 + }, + 'US/Michigan': { + a: 'America/Detroit', + r: 1 + }, + 'US/Mountain': { + a: 'America/Denver', + r: 1 + }, + 'US/Pacific': { + a: 'America/Los_Angeles', + r: 1 + }, + 'US/Samoa': { + a: 'Pacific/Pago_Pago', + c: ['WS'], + r: 1 + }, + UTC: { + a: 'Etc/UTC', + r: 1 + }, + Universal: { + a: 'Etc/UTC', + r: 1 + }, + 'W-SU': { + a: 'Europe/Moscow', + r: 1 + }, + WET: { + u: 0, + d: 60 + }, + Zulu: { + a: 'Etc/UTC', + r: 1 + } +}; + +export function getCountry(): string | null { + const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone; + + if (timezone === '' || !timezone) { + return null; + } + + const _country = timezones[timezone]?.c?.[0] ?? null; + const country = countries[_country] ?? null; + return country; +} diff --git a/dashboard/src2/utils/dayjs.js b/dashboard/src2/utils/dayjs.js new file mode 100644 index 0000000..77ac9fa --- /dev/null +++ b/dashboard/src2/utils/dayjs.js @@ -0,0 +1,30 @@ +import dayjs from 'dayjs/esm'; +import relativeTime from 'dayjs/esm/plugin/relativeTime'; +import localizedFormat from 'dayjs/esm/plugin/localizedFormat'; +import updateLocale from 'dayjs/esm/plugin/updateLocale'; +import isToday from 'dayjs/esm/plugin/isToday'; +import duration from 'dayjs/esm/plugin/duration'; +import utc from 'dayjs/esm/plugin/utc'; +import timezone from 'dayjs/esm/plugin/timezone'; +import advancedFormat from 'dayjs/plugin/advancedFormat'; + +dayjs.extend(updateLocale); +dayjs.extend(relativeTime); +dayjs.extend(localizedFormat); +dayjs.extend(isToday); +dayjs.extend(duration); +dayjs.extend(utc); +dayjs.extend(timezone); +dayjs.extend(advancedFormat); + +export function dayjsLocal(dateTimeString) { + let localTimezone = dayjs.tz.guess(); + // dates are stored in Asia/Calcutta timezone on the server + return dayjs.tz(dateTimeString, 'Asia/Calcutta').tz(localTimezone); +} + +export function dayjsIST(dateTimeString) { + return dayjs(dateTimeString).tz('Asia/Calcutta'); +} + +export default dayjs; diff --git a/dashboard/src2/utils/device.ts b/dashboard/src2/utils/device.ts new file mode 100644 index 0000000..4e4c9af --- /dev/null +++ b/dashboard/src2/utils/device.ts @@ -0,0 +1,19 @@ +import type { Platform } from '../types'; + +export function getPlatform(): Platform { + const ua = navigator.userAgent.toLowerCase(); + + if (ua.indexOf('win') > -1) { + return 'win'; + } else if (ua.indexOf('mac') > -1) { + return 'mac'; + } else if (ua.indexOf('x11') > -1 || ua.indexOf('linux') > -1) { + return 'linux'; + } + + return 'unknown'; +} + +export function isMobile(): boolean { + return window.innerWidth < 640; +} diff --git a/dashboard/src2/utils/error.ts b/dashboard/src2/utils/error.ts new file mode 100644 index 0000000..82b1722 --- /dev/null +++ b/dashboard/src2/utils/error.ts @@ -0,0 +1,6 @@ +export class DashboardError extends Error { + constructor(message: string) { + super(message); + this.name = 'DashboardError'; + } +} diff --git a/dashboard/src2/utils/format.js b/dashboard/src2/utils/format.js new file mode 100644 index 0000000..491fcda --- /dev/null +++ b/dashboard/src2/utils/format.js @@ -0,0 +1,225 @@ +import dayjs, { dayjsLocal } from '../utils/dayjs'; +import { getTeam } from '../data/team'; +import { format } from 'sql-formatter'; + +export function bytes(bytes, decimals = 2, current = 0) { + if (bytes === 0) return '0 Bytes'; + + const k = 1024; + const dm = decimals < 0 ? 0 : decimals; + const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB']; + let i = Math.floor(Math.log(Math.abs(bytes)) / Math.log(k)); + if (i < 0) i++; + + return ( + parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i + current] + ); +} + +export function duration(value) { + if (!value) return; + + let [hours, minutes, seconds] = value.split(':'); + [hours, minutes, seconds] = [ + parseInt(hours), + parseInt(minutes), + parseInt(seconds), + ]; + + let format = ''; + if (hours > 0) { + format = 'H[h] m[m] s[s]'; + } else if (minutes > 0) { + format = 'm[m] s[s]'; + } else { + format = 's[s]'; + } + return dayjs.duration({ hours, minutes, seconds }).format(format); +} + +export function plural(number, singular, plural) { + if (typeof number === 'string') { + number = parseInt(number); + } + if (number === 1) { + return singular; + } + return plural; +} + +export function planTitle(plan) { + if (plan === undefined) return; + const $team = getTeam(); + const china = $team.pg?.currency === 'CNY'; + const priceField = china ? 'price_cny' : 'price_usd'; + const price = + plan?.block_monthly == 1 ? plan[priceField] * 12 : plan[priceField]; + return price > 0 ? `${userCurrency(price, 0)}` : plan.plan_title; +} + +export function userCurrency(value, fractions = 2) { + const $team = getTeam(); + if (!$team.pg?.currency) return value; + return currency(value, $team.pg?.currency, fractions); +} + +export function currency(value, currency, fractions = 2) { + return new Intl.NumberFormat('zh-CN', { + style: 'currency', + currency, + maximumFractionDigits: fractions, + }).format(value); +} + +export function numberK(number) { + if (number < 1000) { + return number; + } else { + let value = Math.round(number / 1000, 1); + + // To handle cases like 8.0, 9.0 etc. + if (value == number / 1000) { + value = parseInt(value); + } + // To handle cases like 8999 -> 9k and not 9.0k + else if (value - 1 == number / 1000) { + value = parseInt(value); + } + + return `${value}k`; + } +} + +export function pricePerDay(price, interval = 'Monthly') { + if (!price) return 0; + // 根据计费周期计算天数 + const daysInPeriod = interval === 'Annually' ? 365 : 30; + return price / daysInPeriod; +} + +export function date(value) { + if (!value) return ''; + + // 转换为YYYY-MM-DD格式,符合中国用户习惯 + const date = new Date(value); + const year = date.getFullYear(); + const month = String(date.getMonth() + 1).padStart(2, '0'); + const day = String(date.getDate()).padStart(2, '0'); + + return `${year}-${month}-${day}`; +} + +export function commaSeparator(arr, separator) { + let joinedString = arr.slice(0, -1).join(', '); + + if (arr.length > 1) { + joinedString += ` ${separator} ${arr[arr.length - 1]}`; + } else { + joinedString += arr[0]; + } + + return joinedString; +} + +export function commaAnd(arr) { + return commaSeparator(arr, 'and'); +} + +export function formatSQL(query) { + try { + return format(query, { + language: 'mariadb', + tabWidth: 2, + keywordCase: 'upper', + }); + } catch (_) { + return query; + } +} + +export function formatSeconds(seconds) { + if (seconds === 0) return '0s'; + if (seconds <= 60) return `${seconds}s`; + + const hours = Math.floor(seconds / 3600); + const minutes = Math.floor((seconds % 3600) / 60); + const remainingSeconds = seconds % 60; + + let result = []; + + if (hours > 0) { + result.push(`${hours}h`); + } + + if (minutes > 0) { + result.push(`${minutes}m`); + } + + if (remainingSeconds > 0) { + result.push(`${remainingSeconds}s`); + } + + return result.join(' '); +} + +export function formatCommaSeperatedNumber(number) { + let numStr = number.toString(); + + let lastThree = numStr.slice(-3); + let remaining = numStr.slice(0, -3); + + let parts = []; + while (remaining.length > 2) { + parts.push(remaining.slice(-2)); + remaining = remaining.slice(0, -2); + } + + if (remaining) { + parts.push(remaining); + } + + let result = parts.reverse().join(',') + ',' + lastThree; + // truncate , at start or end + result = result.replace(/^,/, ''); + result = result.replace(/,$/, ''); + return result; +} + +export function formatMilliseconds(ms) { + if (ms < 100) { + return `${ms.toFixed(3).replace(/\.?0+$/, '')}ms`; // Keep milliseconds if less than 100 and remove unnecessary zeros + } else if (ms < 60000) { + // Less than 1 minute, convert to seconds + let seconds = ms / 1000; + return `${seconds.toFixed(1).replace(/\.?0+$/, '')}s`; + } else { + // Convert to minutes + let minutes = ms / 60000; + return `${minutes.toFixed(1).replace(/\.?0+$/, '')}m`; + } +} + +export function formatValue(value, type) { + switch (type) { + case 'bytes': + return bytes(value); + case 'date': + return date(value); + case 'duration': + return duration(value); + case 'durationSeconds': + return formatSeconds(value); + case 'durationMilliseconds': + return formatMilliseconds(value); + case 'commaSeperatedNumber': + return formatCommaSeperatedNumber(value); + case 'numberK': + return numberK(value); + case 'pricePerDay': + return pricePerDay(value); + case 'sql': + return formatSQL(value); + default: + return value; + } +} diff --git a/dashboard/src2/utils/resource.js b/dashboard/src2/utils/resource.js new file mode 100644 index 0000000..1c34c1b --- /dev/null +++ b/dashboard/src2/utils/resource.js @@ -0,0 +1,7 @@ +import { createDocumentResource, getCachedDocumentResource } from 'jingrow-ui'; + +export function getDocResource(object) { + let pg = getCachedDocumentResource(object.pagetype, object.name); + if (!pg) pg = createDocumentResource(object); + return pg; +} diff --git a/dashboard/src2/utils/site.js b/dashboard/src2/utils/site.js new file mode 100644 index 0000000..122e681 --- /dev/null +++ b/dashboard/src2/utils/site.js @@ -0,0 +1,45 @@ +import { dayjsLocal } from './dayjs'; +import { plural } from './format'; + +export function trialDays(_trialEndDate) { + let trialEndDate = dayjsLocal(_trialEndDate); + let today = dayjsLocal(); + let diffHours = trialEndDate.diff(today, 'hours'); + let endsIn = ''; + if (diffHours < 0) { + let daysAgo = Math.floor(Math.abs(diffHours) / 24); + endsIn = `${daysAgo} ${plural(daysAgo, 'day', 'days')} ago`; + } else if (diffHours < 24) { + endsIn = `today`; + } else { + let days = Math.round(diffHours / 24) + 1; + endsIn = `in ${days} ${plural(days, 'day', 'days')}`; + } + if (trialEndDate.isAfter(today) || trialEndDate.isSame(today, 'day')) { + return `Trial ends ${endsIn}`; + } else { + return `Trial ended ${endsIn}`; + } +} + +export function isTrialEnded(_trialEndDate) { + let trialEndDate = dayjsLocal(_trialEndDate); + let today = dayjsLocal(); + return trialEndDate.isBefore(today, 'day'); +} + +export function validateSubdomain(subdomain) { + if (!subdomain) { + return 'Subdomain cannot be empty'; + } + if (subdomain.length < 5) { + return 'Subdomain too short. Use 5 or more characters'; + } + if (subdomain.length > 32) { + return 'Subdomain too long. Use 32 or less characters'; + } + if (!subdomain.match(/^[a-z0-9][a-z0-9-]*[a-z0-9]$/)) { + return 'Subdomain contains invalid characters. Use lowercase characters, numbers and hyphens'; + } + return null; +} diff --git a/dashboard/src2/utils/throttle.ts b/dashboard/src2/utils/throttle.ts new file mode 100644 index 0000000..4b6c3ba --- /dev/null +++ b/dashboard/src2/utils/throttle.ts @@ -0,0 +1,20 @@ +export function throttle(func: Function, wait: number) { + let timeout = 0; + let pending = false; + + return (...args: any) => { + if (timeout) { + pending = true; + return; + } + + func(...args); + timeout = setTimeout(() => { + timeout = 0; + if (pending) { + func(...args); + pending = false; + } + }, wait); + }; +} diff --git a/dashboard/src2/utils/toast.js b/dashboard/src2/utils/toast.js new file mode 100644 index 0000000..12707da --- /dev/null +++ b/dashboard/src2/utils/toast.js @@ -0,0 +1,16 @@ +import { toast } from 'vue-sonner'; +import { h } from 'vue'; + +export function showErrorToast(error) { + let errorMessage = e.messages.length ? e.messages.join('\n') : e.message; + toast.error(errorMessage); +} + +export function getToastErrorMessage(e, fallbackMessage = 'An error occurred') { + const errorMessage = e.messages?.length + ? e.messages.join('
') + : e.message || fallbackMessage; + return h('div', { + innerHTML: errorMessage, + }); +} diff --git a/dashboard/src2/vendor/posthog.js b/dashboard/src2/vendor/posthog.js new file mode 100644 index 0000000..0b995cc --- /dev/null +++ b/dashboard/src2/vendor/posthog.js @@ -0,0 +1,44 @@ +!(function (t, e) { + var o, n, p, r; + e.__SV || + ((window.posthog = e), + (e._i = []), + (e.init = function (i, s, a) { + function g(t, e) { + var o = e.split('.'); + 2 == o.length && ((t = t[o[0]]), (e = o[1])), + (t[e] = function () { + t.push([e].concat(Array.prototype.slice.call(arguments, 0))); + }); + } + ((p = t.createElement('script')).type = 'text/javascript'), + (p.crossOrigin = 'anonymous'), + (p.async = !0), + (p.src = + s.api_host.replace('.i.posthog.com', '-assets.i.posthog.com') + + '/static/array.js'), + (r = t.getElementsByTagName('script')[0]).parentNode.insertBefore(p, r); + var u = e; + for ( + void 0 !== a ? (u = e[a] = []) : (a = 'posthog'), + u.people = u.people || [], + u.toString = function (t) { + var e = 'posthog'; + return 'posthog' !== a && (e += '.' + a), t || (e += ' (stub)'), e; + }, + u.people.toString = function () { + return u.toString(1) + '.people (stub)'; + }, + o = + 'init capture register register_once register_for_session unregister unregister_for_session getFeatureFlag getFeatureFlagPayload isFeatureEnabled reloadFeatureFlags updateEarlyAccessFeatureEnrollment getEarlyAccessFeatures on onFeatureFlags onSessionId getSurveys getActiveMatchingSurveys renderSurvey canRenderSurvey getNextSurveyStep identify setPersonProperties group resetGroups setPersonPropertiesForFlags resetPersonPropertiesForFlags setGroupPropertiesForFlags resetGroupPropertiesForFlags reset get_distinct_id getGroups get_session_id get_session_replay_url alias set_config startSessionRecording stopSessionRecording sessionRecordingStarted captureException loadToolbar get_property getSessionProperty createPersonProfile opt_in_capturing opt_out_capturing has_opted_in_capturing has_opted_out_capturing clear_opt_in_out_capturing debug'.split( + ' ' + ), + n = 0; + n < o.length; + n++ + ) + g(u, o[n]); + e._i.push([i, s, a]); + }), + (e.__SV = 1)); +})(document, window.posthog || []); diff --git a/dashboard/tailwind.config.cjs b/dashboard/tailwind.config.cjs new file mode 100644 index 0000000..b05c606 --- /dev/null +++ b/dashboard/tailwind.config.cjs @@ -0,0 +1,59 @@ +const plugin = require('tailwindcss/plugin'); + +module.exports = { + presets: [require('jingrow-ui/src/utils/tailwind.config')], + content: [ + './public/index.html', + './src/**/*.html', + './src/**/*.vue', + './src2/**/*.vue', + './src/assets/*.css', + './node_modules/jingrow-ui/src/components/**/*.{vue,js,ts}' + ], + theme: { + extend: { + width: { + 112: '28rem', + wizard: '650px' + }, + minWidth: { + 40: '10rem' + }, + maxHeight: { + 52: '13rem' + } + }, + container: { + padding: { + xl: '5rem' + }, + margin: { + 3.5: '14px' + } + }, + screens: { + sm: '640px', + md: '768px', + lg: '1024px', + xl: '1280px' + } + }, + plugins: [ + require('@tailwindcss/container-queries'), + plugin(function ({ addUtilities, theme }) { + // Add your custom styles here + addUtilities({ + '.bg-gradient-blue': { + 'background-image': `linear-gradient(180deg,#2c9af1 0%, ${theme( + 'colors.blue.500' + )} 100%)` + } + }); + addUtilities({ + '.bg-gradient-none': { + 'background-image': 'none' + } + }); + }) + ] +}; diff --git a/dashboard/tsconfig.json b/dashboard/tsconfig.json new file mode 100644 index 0000000..e9ed4a5 --- /dev/null +++ b/dashboard/tsconfig.json @@ -0,0 +1,19 @@ +{ + "compilerOptions": { + "allowJs": true, + "target": "ESNext", + "useDefineForClassFields": true, + "module": "ESNext", + "moduleResolution": "Node", + "strict": true, + "jsx": "preserve", + "sourceMap": true, + "resolveJsonModule": true, + "isolatedModules": true, + "esModuleInterop": true, + "lib": ["ESNext", "DOM"], + "skipLibCheck": true, + "types": ["unplugin-icons/types/vue"] + }, + "include": ["**/*.ts", "**/*.d.ts"] +} diff --git a/dashboard/vite.config.ts b/dashboard/vite.config.ts new file mode 100644 index 0000000..679b840 --- /dev/null +++ b/dashboard/vite.config.ts @@ -0,0 +1,64 @@ +import path from 'path'; +import { defineConfig } from 'vite'; +import vue from '@vitejs/plugin-vue'; +import vueJsx from '@vitejs/plugin-vue-jsx'; +import jingrowui from 'jingrow-ui/vite'; +import pluginRewriteAll from 'vite-plugin-rewrite-all'; +import Components from 'unplugin-vue-components/vite'; +import Icons from 'unplugin-icons/vite'; +import IconsResolver from 'unplugin-icons/resolver'; +import { sentryVitePlugin } from '@sentry/vite-plugin'; + +export default defineConfig({ + plugins: [ + vue(), + vueJsx(), + pluginRewriteAll(), + jingrowui(), + Components({ + dirs: [ + 'src/components', + // 'src2/components', + 'node_modules/jingrow-ui/src/components' + ], + resolvers: [IconsResolver()] + }), + Icons(), + sentryVitePlugin({ + url: process.env.SENTRY_URL, + org: process.env.SENTRY_ORG, + project: process.env.SENTRY_PROJECT, + applicationKey: 'jcloud-dashboard', + authToken: process.env.SENTRY_AUTH_TOKEN + }) + ], + resolve: { + alias: { + '@': path.resolve(__dirname, 'src') + } + }, + optimizeDeps: { + include: ['feather-icons', 'showdown'] + }, + build: { + outDir: '../jcloud/public/dashboard', + emptyOutDir: true, + sourcemap: true, + target: 'es2015', + rollupOptions: { + input: { + main: path.resolve(__dirname, 'index.html') + } + } + }, + // @ts-ignore + test: { + globals: true, + environment: 'jsdom', + setupFiles: 'src/tests/setup/msw.js', + coverage: { + extension: ['.vue', '.js'], + all: true + } + } +}); diff --git a/dashboard/vite.config.ts.timestamp-1743195879566-0d6d78d20076a.mjs b/dashboard/vite.config.ts.timestamp-1743195879566-0d6d78d20076a.mjs new file mode 100644 index 0000000..ce9661e --- /dev/null +++ b/dashboard/vite.config.ts.timestamp-1743195879566-0d6d78d20076a.mjs @@ -0,0 +1,69 @@ +// vite.config.ts +import path from "path"; +import { defineConfig } from "file:///home/jingrow/jingrow-bench/apps/jcloud/dashboard/node_modules/vite/dist/node/index.js"; +import vue from "file:///home/jingrow/jingrow-bench/apps/jcloud/dashboard/node_modules/@vitejs/plugin-vue/dist/index.mjs"; +import vueJsx from "file:///home/jingrow/jingrow-bench/apps/jcloud/dashboard/node_modules/@vitejs/plugin-vue-jsx/dist/index.mjs"; +import jingrowui from "file:///home/jingrow/jingrow-bench/apps/jcloud/dashboard/node_modules/jingrow-ui/vite.js"; +import pluginRewriteAll from "file:///home/jingrow/jingrow-bench/apps/jcloud/dashboard/node_modules/vite-plugin-rewrite-all/dist/index.mjs"; +import Components from "file:///home/jingrow/jingrow-bench/apps/jcloud/dashboard/node_modules/unplugin-vue-components/dist/vite.mjs"; +import Icons from "file:///home/jingrow/jingrow-bench/apps/jcloud/dashboard/node_modules/unplugin-icons/dist/vite.mjs"; +import IconsResolver from "file:///home/jingrow/jingrow-bench/apps/jcloud/dashboard/node_modules/unplugin-icons/dist/resolver.mjs"; +import { sentryVitePlugin } from "file:///home/jingrow/jingrow-bench/apps/jcloud/dashboard/node_modules/@sentry/vite-plugin/dist/esm/index.mjs"; +var __vite_injected_original_dirname = "/home/jingrow/jingrow-bench/apps/jcloud/dashboard"; +var vite_config_default = defineConfig({ + plugins: [ + vue(), + vueJsx(), + pluginRewriteAll(), + jingrowui(), + Components({ + dirs: [ + "src/components", + // 'src2/components', + "node_modules/jingrow-ui/src/components" + ], + resolvers: [IconsResolver()] + }), + Icons(), + sentryVitePlugin({ + url: process.env.SENTRY_URL, + org: process.env.SENTRY_ORG, + project: process.env.SENTRY_PROJECT, + applicationKey: "jcloud-dashboard", + authToken: process.env.SENTRY_AUTH_TOKEN + }) + ], + resolve: { + alias: { + "@": path.resolve(__vite_injected_original_dirname, "src") + } + }, + optimizeDeps: { + include: ["feather-icons", "showdown"] + }, + build: { + outDir: "../jcloud/public/dashboard", + emptyOutDir: true, + sourcemap: true, + target: "es2015", + rollupOptions: { + input: { + main: path.resolve(__vite_injected_original_dirname, "index.html") + } + } + }, + // @ts-ignore + test: { + globals: true, + environment: "jsdom", + setupFiles: "src/tests/setup/msw.js", + coverage: { + extension: [".vue", ".js"], + all: true + } + } +}); +export { + vite_config_default as default +}; +//# sourceMappingURL=data:application/json;base64,ewogICJ2ZXJzaW9uIjogMywKICAic291cmNlcyI6IFsidml0ZS5jb25maWcudHMiXSwKICAic291cmNlc0NvbnRlbnQiOiBbImNvbnN0IF9fdml0ZV9pbmplY3RlZF9vcmlnaW5hbF9kaXJuYW1lID0gXCIvaG9tZS9mcmFwcGUvZnJhcHBlLWJlbmNoL2FwcHMvcHJlc3MvZGFzaGJvYXJkXCI7Y29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2ZpbGVuYW1lID0gXCIvaG9tZS9mcmFwcGUvZnJhcHBlLWJlbmNoL2FwcHMvcHJlc3MvZGFzaGJvYXJkL3ZpdGUuY29uZmlnLnRzXCI7Y29uc3QgX192aXRlX2luamVjdGVkX29yaWdpbmFsX2ltcG9ydF9tZXRhX3VybCA9IFwiZmlsZTovLy9ob21lL2ZyYXBwZS9mcmFwcGUtYmVuY2gvYXBwcy9wcmVzcy9kYXNoYm9hcmQvdml0ZS5jb25maWcudHNcIjtpbXBvcnQgcGF0aCBmcm9tICdwYXRoJztcclxuaW1wb3J0IHsgZGVmaW5lQ29uZmlnIH0gZnJvbSAndml0ZSc7XHJcbmltcG9ydCB2dWUgZnJvbSAnQHZpdGVqcy9wbHVnaW4tdnVlJztcclxuaW1wb3J0IHZ1ZUpzeCBmcm9tICdAdml0ZWpzL3BsdWdpbi12dWUtanN4JztcclxuaW1wb3J0IGZyYXBwZXVpIGZyb20gJ2ZyYXBwZS11aS92aXRlJztcclxuaW1wb3J0IHBsdWdpblJld3JpdGVBbGwgZnJvbSAndml0ZS1wbHVnaW4tcmV3cml0ZS1hbGwnO1xyXG5pbXBvcnQgQ29tcG9uZW50cyBmcm9tICd1bnBsdWdpbi12dWUtY29tcG9uZW50cy92aXRlJztcclxuaW1wb3J0IEljb25zIGZyb20gJ3VucGx1Z2luLWljb25zL3ZpdGUnO1xyXG5pbXBvcnQgSWNvbnNSZXNvbHZlciBmcm9tICd1bnBsdWdpbi1pY29ucy9yZXNvbHZlcic7XHJcbmltcG9ydCB7IHNlbnRyeVZpdGVQbHVnaW4gfSBmcm9tICdAc2VudHJ5L3ZpdGUtcGx1Z2luJztcclxuXHJcbmV4cG9ydCBkZWZhdWx0IGRlZmluZUNvbmZpZyh7XHJcblx0cGx1Z2luczogW1xyXG5cdFx0dnVlKCksXHJcblx0XHR2dWVKc3goKSxcclxuXHRcdHBsdWdpblJld3JpdGVBbGwoKSxcclxuXHRcdGZyYXBwZXVpKCksXHJcblx0XHRDb21wb25lbnRzKHtcclxuXHRcdFx0ZGlyczogW1xyXG5cdFx0XHRcdCdzcmMvY29tcG9uZW50cycsXHJcblx0XHRcdFx0Ly8gJ3NyYzIvY29tcG9uZW50cycsXHJcblx0XHRcdFx0J25vZGVfbW9kdWxlcy9mcmFwcGUtdWkvc3JjL2NvbXBvbmVudHMnXHJcblx0XHRcdF0sXHJcblx0XHRcdHJlc29sdmVyczogW0ljb25zUmVzb2x2ZXIoKV1cclxuXHRcdH0pLFxyXG5cdFx0SWNvbnMoKSxcclxuXHRcdHNlbnRyeVZpdGVQbHVnaW4oe1xyXG5cdFx0XHR1cmw6IHByb2Nlc3MuZW52LlNFTlRSWV9VUkwsXHJcblx0XHRcdG9yZzogcHJvY2Vzcy5lbnYuU0VOVFJZX09SRyxcclxuXHRcdFx0cHJvamVjdDogcHJvY2Vzcy5lbnYuU0VOVFJZX1BST0pFQ1QsXHJcblx0XHRcdGFwcGxpY2F0aW9uS2V5OiAncHJlc3MtZGFzaGJvYXJkJyxcclxuXHRcdFx0YXV0aFRva2VuOiBwcm9jZXNzLmVudi5TRU5UUllfQVVUSF9UT0tFTlxyXG5cdFx0fSlcclxuXHRdLFxyXG5cdHJlc29sdmU6IHtcclxuXHRcdGFsaWFzOiB7XHJcblx0XHRcdCdAJzogcGF0aC5yZXNvbHZlKF9fZGlybmFtZSwgJ3NyYycpXHJcblx0XHR9XHJcblx0fSxcclxuXHRvcHRpbWl6ZURlcHM6IHtcclxuXHRcdGluY2x1ZGU6IFsnZmVhdGhlci1pY29ucycsICdzaG93ZG93biddXHJcblx0fSxcclxuXHRidWlsZDoge1xyXG5cdFx0b3V0RGlyOiAnLi4vcHJlc3MvcHVibGljL2Rhc2hib2FyZCcsXHJcblx0XHRlbXB0eU91dERpcjogdHJ1ZSxcclxuXHRcdHNvdXJjZW1hcDogdHJ1ZSxcclxuXHRcdHRhcmdldDogJ2VzMjAxNScsXHJcblx0XHRyb2xsdXBPcHRpb25zOiB7XHJcblx0XHRcdGlucHV0OiB7XHJcblx0XHRcdFx0bWFpbjogcGF0aC5yZXNvbHZlKF9fZGlybmFtZSwgJ2luZGV4Lmh0bWwnKVxyXG5cdFx0XHR9XHJcblx0XHR9XHJcblx0fSxcclxuXHQvLyBAdHMtaWdub3JlXHJcblx0dGVzdDoge1xyXG5cdFx0Z2xvYmFsczogdHJ1ZSxcclxuXHRcdGVudmlyb25tZW50OiAnanNkb20nLFxyXG5cdFx0c2V0dXBGaWxlczogJ3NyYy90ZXN0cy9zZXR1cC9tc3cuanMnLFxyXG5cdFx0Y292ZXJhZ2U6IHtcclxuXHRcdFx0ZXh0ZW5zaW9uOiBbJy52dWUnLCAnLmpzJ10sXHJcblx0XHRcdGFsbDogdHJ1ZVxyXG5cdFx0fVxyXG5cdH1cclxufSk7XHJcbiJdLAogICJtYXBwaW5ncyI6ICI7QUFBNFQsT0FBTyxVQUFVO0FBQzdVLFNBQVMsb0JBQW9CO0FBQzdCLE9BQU8sU0FBUztBQUNoQixPQUFPLFlBQVk7QUFDbkIsT0FBTyxjQUFjO0FBQ3JCLE9BQU8sc0JBQXNCO0FBQzdCLE9BQU8sZ0JBQWdCO0FBQ3ZCLE9BQU8sV0FBVztBQUNsQixPQUFPLG1CQUFtQjtBQUMxQixTQUFTLHdCQUF3QjtBQVRqQyxJQUFNLG1DQUFtQztBQVd6QyxJQUFPLHNCQUFRLGFBQWE7QUFBQSxFQUMzQixTQUFTO0FBQUEsSUFDUixJQUFJO0FBQUEsSUFDSixPQUFPO0FBQUEsSUFDUCxpQkFBaUI7QUFBQSxJQUNqQixTQUFTO0FBQUEsSUFDVCxXQUFXO0FBQUEsTUFDVixNQUFNO0FBQUEsUUFDTDtBQUFBO0FBQUEsUUFFQTtBQUFBLE1BQ0Q7QUFBQSxNQUNBLFdBQVcsQ0FBQyxjQUFjLENBQUM7QUFBQSxJQUM1QixDQUFDO0FBQUEsSUFDRCxNQUFNO0FBQUEsSUFDTixpQkFBaUI7QUFBQSxNQUNoQixLQUFLLFFBQVEsSUFBSTtBQUFBLE1BQ2pCLEtBQUssUUFBUSxJQUFJO0FBQUEsTUFDakIsU0FBUyxRQUFRLElBQUk7QUFBQSxNQUNyQixnQkFBZ0I7QUFBQSxNQUNoQixXQUFXLFFBQVEsSUFBSTtBQUFBLElBQ3hCLENBQUM7QUFBQSxFQUNGO0FBQUEsRUFDQSxTQUFTO0FBQUEsSUFDUixPQUFPO0FBQUEsTUFDTixLQUFLLEtBQUssUUFBUSxrQ0FBVyxLQUFLO0FBQUEsSUFDbkM7QUFBQSxFQUNEO0FBQUEsRUFDQSxjQUFjO0FBQUEsSUFDYixTQUFTLENBQUMsaUJBQWlCLFVBQVU7QUFBQSxFQUN0QztBQUFBLEVBQ0EsT0FBTztBQUFBLElBQ04sUUFBUTtBQUFBLElBQ1IsYUFBYTtBQUFBLElBQ2IsV0FBVztBQUFBLElBQ1gsUUFBUTtBQUFBLElBQ1IsZUFBZTtBQUFBLE1BQ2QsT0FBTztBQUFBLFFBQ04sTUFBTSxLQUFLLFFBQVEsa0NBQVcsWUFBWTtBQUFBLE1BQzNDO0FBQUEsSUFDRDtBQUFBLEVBQ0Q7QUFBQTtBQUFBLEVBRUEsTUFBTTtBQUFBLElBQ0wsU0FBUztBQUFBLElBQ1QsYUFBYTtBQUFBLElBQ2IsWUFBWTtBQUFBLElBQ1osVUFBVTtBQUFBLE1BQ1QsV0FBVyxDQUFDLFFBQVEsS0FBSztBQUFBLE1BQ3pCLEtBQUs7QUFBQSxJQUNOO0FBQUEsRUFDRDtBQUNELENBQUM7IiwKICAibmFtZXMiOiBbXQp9Cg== diff --git a/dashboard/yarn.lock b/dashboard/yarn.lock new file mode 100644 index 0000000..bd6d610 --- /dev/null +++ b/dashboard/yarn.lock @@ -0,0 +1,7322 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + + +"@alloc/quick-lru@^5.2.0": + version "5.2.0" + resolved "https://registry.npmmirror.com/@alloc/quick-lru/-/quick-lru-5.2.0.tgz#7bf68b20c0a350f936915fcae06f58e32007ce30" + integrity sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw== + +"@ampproject/remapping@^2.2.0": + version "2.3.0" + resolved "https://registry.npmmirror.com/@ampproject/remapping/-/remapping-2.3.0.tgz#ed441b6fa600072520ce18b43d2c8cc8caecc7f4" + integrity sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw== + dependencies: + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.24" + +"@antfu/install-pkg@^0.1.1": + version "0.1.1" + resolved "https://registry.npmmirror.com/@antfu/install-pkg/-/install-pkg-0.1.1.tgz#157bb04f0de8100b9e4c01734db1a6c77e98bbb5" + integrity sha512-LyB/8+bSfa0DFGC06zpCEfs89/XoWZwws5ygEa5D+Xsm3OfI+aXQ86VgVG7Acyef+rSZ5HE7J8rrxzrQeM3PjQ== + dependencies: + execa "^5.1.1" + find-up "^5.0.0" + +"@antfu/install-pkg@^1.0.0": + version "1.0.0" + resolved "https://registry.npmmirror.com/@antfu/install-pkg/-/install-pkg-1.0.0.tgz#2912a150fc8b35ec912f583f90074ee98f64d66a" + integrity sha512-xvX6P/lo1B3ej0OsaErAjqgFYzYVcJpamjLAFLYh9vRJngBrMoUG7aVnrGTeqM7yxbyTD5p3F2+0/QUEh8Vzhw== + dependencies: + package-manager-detector "^0.2.8" + tinyexec "^0.3.2" + +"@antfu/utils@^0.7.5", "@antfu/utils@^0.7.6": + version "0.7.10" + resolved "https://registry.npmmirror.com/@antfu/utils/-/utils-0.7.10.tgz#ae829f170158e297a9b6a28f161a8e487d00814d" + integrity sha512-+562v9k4aI80m1+VuMHehNJWLOFjBnXn3tdOitzD0il5b7smkSBal4+a3oKiQTbrwMmN/TBUMDvbdoWDehgOww== + +"@antfu/utils@^8.1.0": + version "8.1.1" + resolved "https://registry.npmmirror.com/@antfu/utils/-/utils-8.1.1.tgz#95b1947d292a9a2efffba2081796dcaa05ecedfb" + integrity sha512-Mex9nXf9vR6AhcXmMrlz/HVgYYZpVGJ6YlPgwl7UnaFpnshXs6EK/oa5Gpf3CzENMjkvEx2tQtntGnb7UtSTOQ== + +"@babel/code-frame@^7.0.0", "@babel/code-frame@^7.26.2": + version "7.26.2" + resolved "https://registry.npmmirror.com/@babel/code-frame/-/code-frame-7.26.2.tgz#4b5fab97d33338eff916235055f0ebc21e573a85" + integrity sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ== + dependencies: + "@babel/helper-validator-identifier" "^7.25.9" + js-tokens "^4.0.0" + picocolors "^1.0.0" + +"@babel/compat-data@^7.22.6", "@babel/compat-data@^7.26.8": + version "7.26.8" + resolved "https://registry.npmmirror.com/@babel/compat-data/-/compat-data-7.26.8.tgz#821c1d35641c355284d4a870b8a4a7b0c141e367" + integrity sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ== + +"@babel/core@^7.18.5", "@babel/core@^7.22.9", "@babel/core@^7.23.3": + version "7.26.10" + resolved "https://registry.npmmirror.com/@babel/core/-/core-7.26.10.tgz#5c876f83c8c4dcb233ee4b670c0606f2ac3000f9" + integrity sha512-vMqyb7XCDMPvJFFOaT9kxtiRh42GwlZEg1/uIgtZshS5a/8OaduUfCi7kynKgc3Tw/6Uo2D+db9qBttghhmxwQ== + dependencies: + "@ampproject/remapping" "^2.2.0" + "@babel/code-frame" "^7.26.2" + "@babel/generator" "^7.26.10" + "@babel/helper-compilation-targets" "^7.26.5" + "@babel/helper-module-transforms" "^7.26.0" + "@babel/helpers" "^7.26.10" + "@babel/parser" "^7.26.10" + "@babel/template" "^7.26.9" + "@babel/traverse" "^7.26.10" + "@babel/types" "^7.26.10" + convert-source-map "^2.0.0" + debug "^4.1.0" + gensync "^1.0.0-beta.2" + json5 "^2.2.3" + semver "^6.3.1" + +"@babel/generator@^7.26.10", "@babel/generator@^7.27.0": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/generator/-/generator-7.27.0.tgz#764382b5392e5b9aff93cadb190d0745866cbc2c" + integrity sha512-VybsKvpiN1gU1sdMZIp7FcqphVVKEwcuj02x73uvcHE0PTihx1nlBcowYWhDwjpoAXRv43+gDzyggGnn1XZhVw== + dependencies: + "@babel/parser" "^7.27.0" + "@babel/types" "^7.27.0" + "@jridgewell/gen-mapping" "^0.3.5" + "@jridgewell/trace-mapping" "^0.3.25" + jsesc "^3.0.2" + +"@babel/helper-annotate-as-pure@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.25.9.tgz#d8eac4d2dc0d7b6e11fa6e535332e0d3184f06b4" + integrity sha512-gv7320KBUFJz1RnylIg5WWYPRXKZ884AGkYpgpWW02TH66Dl+HaC1t1CKd0z3R4b6hdYEcmrNZHUmfCP+1u3/g== + dependencies: + "@babel/types" "^7.25.9" + +"@babel/helper-compilation-targets@^7.22.6", "@babel/helper-compilation-targets@^7.25.9", "@babel/helper-compilation-targets@^7.26.5": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.0.tgz#de0c753b1cd1d9ab55d473c5a5cf7170f0a81880" + integrity sha512-LVk7fbXml0H2xH34dFzKQ7TDZ2G4/rVTOrq9V+icbbadjbVxxeFeDsNHv2SrZeWoA+6ZiTyWYWtScEIW07EAcA== + dependencies: + "@babel/compat-data" "^7.26.8" + "@babel/helper-validator-option" "^7.25.9" + browserslist "^4.24.0" + lru-cache "^5.1.1" + semver "^6.3.1" + +"@babel/helper-create-class-features-plugin@^7.25.9", "@babel/helper-create-class-features-plugin@^7.27.0": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.27.0.tgz#518fad6a307c6a96f44af14912b2c20abe9bfc30" + integrity sha512-vSGCvMecvFCd/BdpGlhpXYNhhC4ccxyvQWpbGL4CWbvfEoLFWUZuSuf7s9Aw70flgQF+6vptvgK2IfOnKlRmBg== + dependencies: + "@babel/helper-annotate-as-pure" "^7.25.9" + "@babel/helper-member-expression-to-functions" "^7.25.9" + "@babel/helper-optimise-call-expression" "^7.25.9" + "@babel/helper-replace-supers" "^7.26.5" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + "@babel/traverse" "^7.27.0" + semver "^6.3.1" + +"@babel/helper-create-regexp-features-plugin@^7.18.6", "@babel/helper-create-regexp-features-plugin@^7.25.9": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.27.0.tgz#0e41f7d38c2ebe06ebd9cf0e02fb26019c77cd95" + integrity sha512-fO8l08T76v48BhpNRW/nQ0MxfnSdoSKUJBMjubOAYffsVuGG5qOfMq7N6Es7UJvi7Y8goXXo07EfcHZXDPuELQ== + dependencies: + "@babel/helper-annotate-as-pure" "^7.25.9" + regexpu-core "^6.2.0" + semver "^6.3.1" + +"@babel/helper-define-polyfill-provider@^0.6.3", "@babel/helper-define-polyfill-provider@^0.6.4": + version "0.6.4" + resolved "https://registry.npmmirror.com/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.4.tgz#15e8746368bfa671785f5926ff74b3064c291fab" + integrity sha512-jljfR1rGnXXNWnmQg2K3+bvhkxB51Rl32QRaOTuwwjviGrHzIbSc8+x9CpraDtbT7mfyjXObULP4w/adunNwAw== + dependencies: + "@babel/helper-compilation-targets" "^7.22.6" + "@babel/helper-plugin-utils" "^7.22.5" + debug "^4.1.1" + lodash.debounce "^4.0.8" + resolve "^1.14.2" + +"@babel/helper-member-expression-to-functions@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.25.9.tgz#9dfffe46f727005a5ea29051ac835fb735e4c1a3" + integrity sha512-wbfdZ9w5vk0C0oyHqAJbc62+vet5prjj01jjJ8sKn3j9h3MQQlflEdXYvuqRWjHnM12coDEqiC1IRCi0U/EKwQ== + dependencies: + "@babel/traverse" "^7.25.9" + "@babel/types" "^7.25.9" + +"@babel/helper-module-imports@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz#e7f8d20602ebdbf9ebbea0a0751fb0f2a4141715" + integrity sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw== + dependencies: + "@babel/traverse" "^7.25.9" + "@babel/types" "^7.25.9" + +"@babel/helper-module-transforms@^7.25.9", "@babel/helper-module-transforms@^7.26.0": + version "7.26.0" + resolved "https://registry.npmmirror.com/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz#8ce54ec9d592695e58d84cd884b7b5c6a2fdeeae" + integrity sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw== + dependencies: + "@babel/helper-module-imports" "^7.25.9" + "@babel/helper-validator-identifier" "^7.25.9" + "@babel/traverse" "^7.25.9" + +"@babel/helper-optimise-call-expression@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.25.9.tgz#3324ae50bae7e2ab3c33f60c9a877b6a0146b54e" + integrity sha512-FIpuNaz5ow8VyrYcnXQTDRGvV6tTjkNtCK/RYNDXGSLlUD6cBuQTSw43CShGxjvfBTfcUA/r6UhUCbtYqkhcuQ== + dependencies: + "@babel/types" "^7.25.9" + +"@babel/helper-plugin-utils@^7.0.0", "@babel/helper-plugin-utils@^7.18.6", "@babel/helper-plugin-utils@^7.22.5", "@babel/helper-plugin-utils@^7.25.9", "@babel/helper-plugin-utils@^7.26.5": + version "7.26.5" + resolved "https://registry.npmmirror.com/@babel/helper-plugin-utils/-/helper-plugin-utils-7.26.5.tgz#18580d00c9934117ad719392c4f6585c9333cc35" + integrity sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg== + +"@babel/helper-remap-async-to-generator@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.25.9.tgz#e53956ab3d5b9fb88be04b3e2f31b523afd34b92" + integrity sha512-IZtukuUeBbhgOcaW2s06OXTzVNJR0ybm4W5xC1opWFFJMZbwRj5LCk+ByYH7WdZPZTt8KnFwA8pvjN2yqcPlgw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.25.9" + "@babel/helper-wrap-function" "^7.25.9" + "@babel/traverse" "^7.25.9" + +"@babel/helper-replace-supers@^7.25.9", "@babel/helper-replace-supers@^7.26.5": + version "7.26.5" + resolved "https://registry.npmmirror.com/@babel/helper-replace-supers/-/helper-replace-supers-7.26.5.tgz#6cb04e82ae291dae8e72335dfe438b0725f14c8d" + integrity sha512-bJ6iIVdYX1YooY2X7w1q6VITt+LnUILtNk7zT78ykuwStx8BauCzxvFqFaHjOpW1bVnSUM1PN1f0p5P21wHxvg== + dependencies: + "@babel/helper-member-expression-to-functions" "^7.25.9" + "@babel/helper-optimise-call-expression" "^7.25.9" + "@babel/traverse" "^7.26.5" + +"@babel/helper-skip-transparent-expression-wrappers@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.25.9.tgz#0b2e1b62d560d6b1954893fd2b705dc17c91f0c9" + integrity sha512-K4Du3BFa3gvyhzgPcntrkDgZzQaq6uozzcpGbOO1OEJaI+EJdqWIMTLgFgQf6lrfiDFo5FU+BxKepI9RmZqahA== + dependencies: + "@babel/traverse" "^7.25.9" + "@babel/types" "^7.25.9" + +"@babel/helper-string-parser@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz#1aabb72ee72ed35789b4bbcad3ca2862ce614e8c" + integrity sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA== + +"@babel/helper-validator-identifier@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz#24b64e2c3ec7cd3b3c547729b8d16871f22cbdc7" + integrity sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ== + +"@babel/helper-validator-option@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz#86e45bd8a49ab7e03f276577f96179653d41da72" + integrity sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw== + +"@babel/helper-wrap-function@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/helper-wrap-function/-/helper-wrap-function-7.25.9.tgz#d99dfd595312e6c894bd7d237470025c85eea9d0" + integrity sha512-ETzz9UTjQSTmw39GboatdymDq4XIQbR8ySgVrylRhPOFpsd+JrKHIuF0de7GCWmem+T4uC5z7EZguod7Wj4A4g== + dependencies: + "@babel/template" "^7.25.9" + "@babel/traverse" "^7.25.9" + "@babel/types" "^7.25.9" + +"@babel/helpers@^7.26.10": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/helpers/-/helpers-7.27.0.tgz#53d156098defa8243eab0f32fa17589075a1b808" + integrity sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg== + dependencies: + "@babel/template" "^7.27.0" + "@babel/types" "^7.27.0" + +"@babel/parser@^7.25.3", "@babel/parser@^7.26.10", "@babel/parser@^7.26.9", "@babel/parser@^7.27.0", "@babel/parser@^7.7.0": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/parser/-/parser-7.27.0.tgz#3d7d6ee268e41d2600091cbd4e145ffee85a44ec" + integrity sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg== + dependencies: + "@babel/types" "^7.27.0" + +"@babel/plugin-bugfix-firefox-class-in-computed-class-key@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.25.9.tgz#cc2e53ebf0a0340777fff5ed521943e253b4d8fe" + integrity sha512-ZkRyVkThtxQ/J6nv3JFYv1RYY+JT5BvU0y3k5bWrmuG4woXypRa4PXmm9RhOwodRkYFWqC0C0cqcJ4OqR7kW+g== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/traverse" "^7.25.9" + +"@babel/plugin-bugfix-safari-class-field-initializer-scope@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.25.9.tgz#af9e4fb63ccb8abcb92375b2fcfe36b60c774d30" + integrity sha512-MrGRLZxLD/Zjj0gdU15dfs+HH/OXvnw/U4jJD8vpcP2CJQapPEv1IWwjc/qMg7ItBlPwSv1hRBbb7LeuANdcnw== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.25.9.tgz#e8dc26fcd616e6c5bf2bd0d5a2c151d4f92a9137" + integrity sha512-2qUwwfAFpJLZqxd02YW9btUCZHl+RFvdDkNfZwaIJrvB8Tesjsk8pEQkTvGwZXLqXUx/2oyY3ySRhm6HOXuCug== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.25.9.tgz#807a667f9158acac6f6164b4beb85ad9ebc9e1d1" + integrity sha512-6xWgLZTJXwilVjlnV7ospI3xi+sl8lN8rXXbBD6vYn3UYDlGsag8wrZkKcSI8G6KgqKP7vNFaDgeDnfAABq61g== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + "@babel/plugin-transform-optional-chaining" "^7.25.9" + +"@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.25.9.tgz#de7093f1e7deaf68eadd7cc6b07f2ab82543269e" + integrity sha512-aLnMXYPnzwwqhYSCyXfKkIkYgJ8zv9RK+roo9DkTXz38ynIhd9XCbN08s3MGvqL2MYGVUGdRQLL/JqBIeJhJBg== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/traverse" "^7.25.9" + +"@babel/plugin-proposal-private-property-in-object@7.21.0-placeholder-for-preset-env.2": + version "7.21.0-placeholder-for-preset-env.2" + resolved "https://registry.npmmirror.com/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz#7844f9289546efa9febac2de4cfe358a050bd703" + integrity sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w== + +"@babel/plugin-syntax-import-assertions@^7.26.0": + version "7.26.0" + resolved "https://registry.npmmirror.com/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.26.0.tgz#620412405058efa56e4a564903b79355020f445f" + integrity sha512-QCWT5Hh830hK5EQa7XzuqIkQU9tT/whqbDz7kuaZMHFl1inRRg7JnuAEOQ0Ur0QUl0NufCk1msK2BeY79Aj/eg== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-syntax-import-attributes@^7.26.0": + version "7.26.0" + resolved "https://registry.npmmirror.com/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.26.0.tgz#3b1412847699eea739b4f2602c74ce36f6b0b0f7" + integrity sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-syntax-jsx@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.25.9.tgz#a34313a178ea56f1951599b929c1ceacee719290" + integrity sha512-ld6oezHQMZsZfp6pWtbjaNDF2tiiCYYDqQszHt5VV437lewP9aSi2Of99CK0D0XB21k7FLgnLcmQKyKzynfeAA== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-syntax-typescript@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.25.9.tgz#67dda2b74da43727cf21d46cf9afef23f4365399" + integrity sha512-hjMgRy5hb8uJJjUcdWunWVcoi9bGpJp8p5Ol1229PoN6aytsLwNMgmdftO23wnCLMfVmTwZDWMPNq/D1SY60JQ== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-syntax-unicode-sets-regex@^7.18.6": + version "7.18.6" + resolved "https://registry.npmmirror.com/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz#d49a3b3e6b52e5be6740022317580234a6a47357" + integrity sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.18.6" + "@babel/helper-plugin-utils" "^7.18.6" + +"@babel/plugin-transform-arrow-functions@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.25.9.tgz#7821d4410bee5daaadbb4cdd9a6649704e176845" + integrity sha512-6jmooXYIwn9ca5/RylZADJ+EnSxVUS5sjeJ9UPk6RWRzXCmOJCy6dqItPJFpw2cuCangPK4OYr5uhGKcmrm5Qg== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-async-generator-functions@^7.26.8": + version "7.26.8" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.26.8.tgz#5e3991135e3b9c6eaaf5eff56d1ae5a11df45ff8" + integrity sha512-He9Ej2X7tNf2zdKMAGOsmg2MrFc+hfoAhd3po4cWfo/NWjzEAKa0oQruj1ROVUdl0e6fb6/kE/G3SSxE0lRJOg== + dependencies: + "@babel/helper-plugin-utils" "^7.26.5" + "@babel/helper-remap-async-to-generator" "^7.25.9" + "@babel/traverse" "^7.26.8" + +"@babel/plugin-transform-async-to-generator@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.25.9.tgz#c80008dacae51482793e5a9c08b39a5be7e12d71" + integrity sha512-NT7Ejn7Z/LjUH0Gv5KsBCxh7BH3fbLTV0ptHvpeMvrt3cPThHfJfst9Wrb7S8EvJ7vRTFI7z+VAvFVEQn/m5zQ== + dependencies: + "@babel/helper-module-imports" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-remap-async-to-generator" "^7.25.9" + +"@babel/plugin-transform-block-scoped-functions@^7.26.5": + version "7.26.5" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.26.5.tgz#3dc4405d31ad1cbe45293aa57205a6e3b009d53e" + integrity sha512-chuTSY+hq09+/f5lMj8ZSYgCFpppV2CbYrhNFJ1BFoXpiWPnnAb7R0MqrafCpN8E1+YRrtM1MXZHJdIx8B6rMQ== + dependencies: + "@babel/helper-plugin-utils" "^7.26.5" + +"@babel/plugin-transform-block-scoping@^7.25.9": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.27.0.tgz#acc2c0d98a7439bbde4244588ddbd4904701d47f" + integrity sha512-u1jGphZ8uDI2Pj/HJj6YQ6XQLZCNjOlprjxB5SVz6rq2T6SwAR+CdrWK0CP7F+9rDVMXdB0+r6Am5G5aobOjAQ== + dependencies: + "@babel/helper-plugin-utils" "^7.26.5" + +"@babel/plugin-transform-class-properties@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.25.9.tgz#a8ce84fedb9ad512549984101fa84080a9f5f51f" + integrity sha512-bbMAII8GRSkcd0h0b4X+36GksxuheLFjP65ul9w6C3KgAamI3JqErNgSrosX6ZPj+Mpim5VvEbawXxJCyEUV3Q== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-class-static-block@^7.26.0": + version "7.26.0" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.26.0.tgz#6c8da219f4eb15cae9834ec4348ff8e9e09664a0" + integrity sha512-6J2APTs7BDDm+UMqP1useWqhcRAXo0WIoVj26N7kPFB6S73Lgvyka4KTZYIxtgYXiN5HTyRObA72N2iu628iTQ== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-classes@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-classes/-/plugin-transform-classes-7.25.9.tgz#7152457f7880b593a63ade8a861e6e26a4469f52" + integrity sha512-mD8APIXmseE7oZvZgGABDyM34GUmK45Um2TXiBUt7PnuAxrgoSVf123qUzPxEr/+/BHrRn5NMZCdE2m/1F8DGg== + dependencies: + "@babel/helper-annotate-as-pure" "^7.25.9" + "@babel/helper-compilation-targets" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-replace-supers" "^7.25.9" + "@babel/traverse" "^7.25.9" + globals "^11.1.0" + +"@babel/plugin-transform-computed-properties@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.25.9.tgz#db36492c78460e534b8852b1d5befe3c923ef10b" + integrity sha512-HnBegGqXZR12xbcTHlJ9HGxw1OniltT26J5YpfruGqtUHlz/xKf/G2ak9e+t0rVqrjXa9WOhvYPz1ERfMj23AA== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/template" "^7.25.9" + +"@babel/plugin-transform-destructuring@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.25.9.tgz#966ea2595c498224340883602d3cfd7a0c79cea1" + integrity sha512-WkCGb/3ZxXepmMiX101nnGiU+1CAdut8oHyEOHxkKuS1qKpU2SMXE2uSvfz8PBuLd49V6LEsbtyPhWC7fnkgvQ== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-dotall-regex@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.25.9.tgz#bad7945dd07734ca52fe3ad4e872b40ed09bb09a" + integrity sha512-t7ZQ7g5trIgSRYhI9pIJtRl64KHotutUJsh4Eze5l7olJv+mRSg4/MmbZ0tv1eeqRbdvo/+trvJD/Oc5DmW2cA== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-duplicate-keys@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.25.9.tgz#8850ddf57dce2aebb4394bb434a7598031059e6d" + integrity sha512-LZxhJ6dvBb/f3x8xwWIuyiAHy56nrRG3PeYTpBkkzkYRRQ6tJLu68lEF5VIqMUZiAV7a8+Tb78nEoMCMcqjXBw== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-duplicate-named-capturing-groups-regex@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.25.9.tgz#6f7259b4de127721a08f1e5165b852fcaa696d31" + integrity sha512-0UfuJS0EsXbRvKnwcLjFtJy/Sxc5J5jhLHnFhy7u4zih97Hz6tJkLU+O+FMMrNZrosUPxDi6sYxJ/EA8jDiAog== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-dynamic-import@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.25.9.tgz#23e917de63ed23c6600c5dd06d94669dce79f7b8" + integrity sha512-GCggjexbmSLaFhqsojeugBpeaRIgWNTcgKVq/0qIteFEqY2A+b9QidYadrWlnbWQUrW5fn+mCvf3tr7OeBFTyg== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-exponentiation-operator@^7.26.3": + version "7.26.3" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.26.3.tgz#e29f01b6de302c7c2c794277a48f04a9ca7f03bc" + integrity sha512-7CAHcQ58z2chuXPWblnn1K6rLDnDWieghSOEmqQsrBenH0P9InCUtOJYD89pvngljmZlJcz3fcmgYsXFNGa1ZQ== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-export-namespace-from@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.25.9.tgz#90745fe55053394f554e40584cda81f2c8a402a2" + integrity sha512-2NsEz+CxzJIVOPx2o9UsW1rXLqtChtLoVnwYHHiB04wS5sgn7mrV45fWMBX0Kk+ub9uXytVYfNP2HjbVbCB3Ww== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-for-of@^7.26.9": + version "7.26.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.26.9.tgz#27231f79d5170ef33b5111f07fe5cafeb2c96a56" + integrity sha512-Hry8AusVm8LW5BVFgiyUReuoGzPUpdHQQqJY5bZnbbf+ngOHWuCuYFKw/BqaaWlvEUrF91HMhDtEaI1hZzNbLg== + dependencies: + "@babel/helper-plugin-utils" "^7.26.5" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + +"@babel/plugin-transform-function-name@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.25.9.tgz#939d956e68a606661005bfd550c4fc2ef95f7b97" + integrity sha512-8lP+Yxjv14Vc5MuWBpJsoUCd3hD6V9DgBon2FVYL4jJgbnVQ9fTgYmonchzZJOVNgzEgbxp4OwAf6xz6M/14XA== + dependencies: + "@babel/helper-compilation-targets" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/traverse" "^7.25.9" + +"@babel/plugin-transform-json-strings@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.25.9.tgz#c86db407cb827cded902a90c707d2781aaa89660" + integrity sha512-xoTMk0WXceiiIvsaquQQUaLLXSW1KJ159KP87VilruQm0LNNGxWzahxSS6T6i4Zg3ezp4vA4zuwiNUR53qmQAw== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-literals@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-literals/-/plugin-transform-literals-7.25.9.tgz#1a1c6b4d4aa59bc4cad5b6b3a223a0abd685c9de" + integrity sha512-9N7+2lFziW8W9pBl2TzaNht3+pgMIRP74zizeCSrtnSKVdUl8mAjjOP2OOVQAfZ881P2cNjDj1uAMEdeD50nuQ== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-logical-assignment-operators@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.25.9.tgz#b19441a8c39a2fda0902900b306ea05ae1055db7" + integrity sha512-wI4wRAzGko551Y8eVf6iOY9EouIDTtPb0ByZx+ktDGHwv6bHFimrgJM/2T021txPZ2s4c7bqvHbd+vXG6K948Q== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-member-expression-literals@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.25.9.tgz#63dff19763ea64a31f5e6c20957e6a25e41ed5de" + integrity sha512-PYazBVfofCQkkMzh2P6IdIUaCEWni3iYEerAsRWuVd8+jlM1S9S9cz1dF9hIzyoZ8IA3+OwVYIp9v9e+GbgZhA== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-modules-amd@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.25.9.tgz#49ba478f2295101544abd794486cd3088dddb6c5" + integrity sha512-g5T11tnI36jVClQlMlt4qKDLlWnG5pP9CSM4GhdRciTNMRgkfpo5cR6b4rGIOYPgRRuFAvwjPQ/Yk+ql4dyhbw== + dependencies: + "@babel/helper-module-transforms" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-modules-commonjs@^7.26.3": + version "7.26.3" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.26.3.tgz#8f011d44b20d02c3de44d8850d971d8497f981fb" + integrity sha512-MgR55l4q9KddUDITEzEFYn5ZsGDXMSsU9E+kh7fjRXTIC3RHqfCo8RPRbyReYJh44HQ/yomFkqbOFohXvDCiIQ== + dependencies: + "@babel/helper-module-transforms" "^7.26.0" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-modules-systemjs@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.25.9.tgz#8bd1b43836269e3d33307151a114bcf3ba6793f8" + integrity sha512-hyss7iIlH/zLHaehT+xwiymtPOpsiwIIRlCAOwBB04ta5Tt+lNItADdlXw3jAWZ96VJ2jlhl/c+PNIQPKNfvcA== + dependencies: + "@babel/helper-module-transforms" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-validator-identifier" "^7.25.9" + "@babel/traverse" "^7.25.9" + +"@babel/plugin-transform-modules-umd@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.25.9.tgz#6710079cdd7c694db36529a1e8411e49fcbf14c9" + integrity sha512-bS9MVObUgE7ww36HEfwe6g9WakQ0KF07mQF74uuXdkoziUPfKyu/nIm663kz//e5O1nPInPFx36z7WJmJ4yNEw== + dependencies: + "@babel/helper-module-transforms" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-named-capturing-groups-regex@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.25.9.tgz#454990ae6cc22fd2a0fa60b3a2c6f63a38064e6a" + integrity sha512-oqB6WHdKTGl3q/ItQhpLSnWWOpjUJLsOCLVyeFgeTktkBSCiurvPOsyt93gibI9CmuKvTUEtWmG5VhZD+5T/KA== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-new-target@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.25.9.tgz#42e61711294b105c248336dcb04b77054ea8becd" + integrity sha512-U/3p8X1yCSoKyUj2eOBIx3FOn6pElFOKvAAGf8HTtItuPyB+ZeOqfn+mvTtg9ZlOAjsPdK3ayQEjqHjU/yLeVQ== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-nullish-coalescing-operator@^7.26.6": + version "7.26.6" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.26.6.tgz#fbf6b3c92cb509e7b319ee46e3da89c5bedd31fe" + integrity sha512-CKW8Vu+uUZneQCPtXmSBUC6NCAUdya26hWCElAWh5mVSlSRsmiCPUUDKb3Z0szng1hiAJa098Hkhg9o4SE35Qw== + dependencies: + "@babel/helper-plugin-utils" "^7.26.5" + +"@babel/plugin-transform-numeric-separator@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.25.9.tgz#bfed75866261a8b643468b0ccfd275f2033214a1" + integrity sha512-TlprrJ1GBZ3r6s96Yq8gEQv82s8/5HnCVHtEJScUj90thHQbwe+E5MLhi2bbNHBEJuzrvltXSru+BUxHDoog7Q== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-object-rest-spread@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.25.9.tgz#0203725025074164808bcf1a2cfa90c652c99f18" + integrity sha512-fSaXafEE9CVHPweLYw4J0emp1t8zYTXyzN3UuG+lylqkvYd7RMrsOQ8TYx5RF231be0vqtFC6jnx3UmpJmKBYg== + dependencies: + "@babel/helper-compilation-targets" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/plugin-transform-parameters" "^7.25.9" + +"@babel/plugin-transform-object-super@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.25.9.tgz#385d5de135162933beb4a3d227a2b7e52bb4cf03" + integrity sha512-Kj/Gh+Rw2RNLbCK1VAWj2U48yxxqL2x0k10nPtSdRa0O2xnHXalD0s+o1A6a0W43gJ00ANo38jxkQreckOzv5A== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-replace-supers" "^7.25.9" + +"@babel/plugin-transform-optional-catch-binding@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.25.9.tgz#10e70d96d52bb1f10c5caaac59ac545ea2ba7ff3" + integrity sha512-qM/6m6hQZzDcZF3onzIhZeDHDO43bkNNlOX0i8n3lR6zLbu0GN2d8qfM/IERJZYauhAHSLHy39NF0Ctdvcid7g== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-optional-chaining@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.25.9.tgz#e142eb899d26ef715435f201ab6e139541eee7dd" + integrity sha512-6AvV0FsLULbpnXeBjrY4dmWF8F7gf8QnvTEoO/wX/5xm/xE1Xo8oPuD3MPS+KS9f9XBEAWN7X1aWr4z9HdOr7A== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + +"@babel/plugin-transform-parameters@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.25.9.tgz#b856842205b3e77e18b7a7a1b94958069c7ba257" + integrity sha512-wzz6MKwpnshBAiRmn4jR8LYz/g8Ksg0o80XmwZDlordjwEk9SxBzTWC7F5ef1jhbrbOW2DJ5J6ayRukrJmnr0g== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-private-methods@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.25.9.tgz#847f4139263577526455d7d3223cd8bda51e3b57" + integrity sha512-D/JUozNpQLAPUVusvqMxyvjzllRaF8/nSrP1s2YGQT/W4LHK4xxsMcHjhOGTS01mp9Hda8nswb+FblLdJornQw== + dependencies: + "@babel/helper-create-class-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-private-property-in-object@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.25.9.tgz#9c8b73e64e6cc3cbb2743633885a7dd2c385fe33" + integrity sha512-Evf3kcMqzXA3xfYJmZ9Pg1OvKdtqsDMSWBDzZOPLvHiTt36E75jLDQo5w1gtRU95Q4E5PDttrTf25Fw8d/uWLw== + dependencies: + "@babel/helper-annotate-as-pure" "^7.25.9" + "@babel/helper-create-class-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-property-literals@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.25.9.tgz#d72d588bd88b0dec8b62e36f6fda91cedfe28e3f" + integrity sha512-IvIUeV5KrS/VPavfSM/Iu+RE6llrHrYIKY1yfCzyO/lMXHQ+p7uGhonmGVisv6tSBSVgWzMBohTcvkC9vQcQFA== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-regenerator@^7.25.9": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.27.0.tgz#822feebef43d6a59a81f696b2512df5b1682db31" + integrity sha512-LX/vCajUJQDqE7Aum/ELUMZAY19+cDpghxrnyt5I1tV6X5PyC86AOoWXWFYFeIvauyeSA6/ktn4tQVn/3ZifsA== + dependencies: + "@babel/helper-plugin-utils" "^7.26.5" + regenerator-transform "^0.15.2" + +"@babel/plugin-transform-regexp-modifiers@^7.26.0": + version "7.26.0" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-regexp-modifiers/-/plugin-transform-regexp-modifiers-7.26.0.tgz#2f5837a5b5cd3842a919d8147e9903cc7455b850" + integrity sha512-vN6saax7lrA2yA/Pak3sCxuD6F5InBjn9IcrIKQPjpsLvuHYLVroTxjdlVRHjjBWxKOqIwpTXDkOssYT4BFdRw== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-reserved-words@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.25.9.tgz#0398aed2f1f10ba3f78a93db219b27ef417fb9ce" + integrity sha512-7DL7DKYjn5Su++4RXu8puKZm2XBPHyjWLUidaPEkCUBbE7IPcsrkRHggAOOKydH1dASWdcUBxrkOGNxUv5P3Jg== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-shorthand-properties@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.25.9.tgz#bb785e6091f99f826a95f9894fc16fde61c163f2" + integrity sha512-MUv6t0FhO5qHnS/W8XCbHmiRWOphNufpE1IVxhK5kuN3Td9FT1x4rx4K42s3RYdMXCXpfWkGSbCSd0Z64xA7Ng== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-spread@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-spread/-/plugin-transform-spread-7.25.9.tgz#24a35153931b4ba3d13cec4a7748c21ab5514ef9" + integrity sha512-oNknIB0TbURU5pqJFVbOOFspVlrpVwo2H1+HUIsVDvp5VauGGDP1ZEvO8Nn5xyMEs3dakajOxlmkNW7kNgSm6A== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + +"@babel/plugin-transform-sticky-regex@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.25.9.tgz#c7f02b944e986a417817b20ba2c504dfc1453d32" + integrity sha512-WqBUSgeVwucYDP9U/xNRQam7xV8W5Zf+6Eo7T2SRVUFlhRiMNFdFz58u0KZmCVVqs2i7SHgpRnAhzRNmKfi2uA== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-template-literals@^7.26.8": + version "7.26.8" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.26.8.tgz#966b15d153a991172a540a69ad5e1845ced990b5" + integrity sha512-OmGDL5/J0CJPJZTHZbi2XpO0tyT2Ia7fzpW5GURwdtp2X3fMmN8au/ej6peC/T33/+CRiIpA8Krse8hFGVmT5Q== + dependencies: + "@babel/helper-plugin-utils" "^7.26.5" + +"@babel/plugin-transform-typeof-symbol@^7.26.7": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.27.0.tgz#044a0890f3ca694207c7826d0c7a65e5ac008aae" + integrity sha512-+LLkxA9rKJpNoGsbLnAgOCdESl73vwYn+V6b+5wHbrE7OGKVDPHIQvbFSzqE6rwqaCw2RE+zdJrlLkcf8YOA0w== + dependencies: + "@babel/helper-plugin-utils" "^7.26.5" + +"@babel/plugin-transform-typescript@^7.23.3": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.27.0.tgz#a29fd3481da85601c7e34091296e9746d2cccba8" + integrity sha512-fRGGjO2UEGPjvEcyAZXRXAS8AfdaQoq7HnxAbJoAoW10B9xOKesmmndJv+Sym2a+9FHWZ9KbyyLCe9s0Sn5jtg== + dependencies: + "@babel/helper-annotate-as-pure" "^7.25.9" + "@babel/helper-create-class-features-plugin" "^7.27.0" + "@babel/helper-plugin-utils" "^7.26.5" + "@babel/helper-skip-transparent-expression-wrappers" "^7.25.9" + "@babel/plugin-syntax-typescript" "^7.25.9" + +"@babel/plugin-transform-unicode-escapes@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.25.9.tgz#a75ef3947ce15363fccaa38e2dd9bc70b2788b82" + integrity sha512-s5EDrE6bW97LtxOcGj1Khcx5AaXwiMmi4toFWRDP9/y0Woo6pXC+iyPu/KuhKtfSrNFd7jJB+/fkOtZy6aIC6Q== + dependencies: + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-unicode-property-regex@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.25.9.tgz#a901e96f2c1d071b0d1bb5dc0d3c880ce8f53dd3" + integrity sha512-Jt2d8Ga+QwRluxRQ307Vlxa6dMrYEMZCgGxoPR8V52rxPyldHu3hdlHspxaqYmE7oID5+kB+UKUB/eWS+DkkWg== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-unicode-regex@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.25.9.tgz#5eae747fe39eacf13a8bd006a4fb0b5d1fa5e9b1" + integrity sha512-yoxstj7Rg9dlNn9UQxzk4fcNivwv4nUYz7fYXBaKxvw/lnmPuOm/ikoELygbYq68Bls3D/D+NBPHiLwZdZZ4HA== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/plugin-transform-unicode-sets-regex@^7.25.9": + version "7.25.9" + resolved "https://registry.npmmirror.com/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.25.9.tgz#65114c17b4ffc20fa5b163c63c70c0d25621fabe" + integrity sha512-8BYqO3GeVNHtx69fdPshN3fnzUNLrWdHhk/icSwigksJGczKSizZ+Z6SBCxTs723Fr5VSNorTIK7a+R2tISvwQ== + dependencies: + "@babel/helper-create-regexp-features-plugin" "^7.25.9" + "@babel/helper-plugin-utils" "^7.25.9" + +"@babel/preset-env@^7.22.9": + version "7.26.9" + resolved "https://registry.npmmirror.com/@babel/preset-env/-/preset-env-7.26.9.tgz#2ec64e903d0efe743699f77a10bdf7955c2123c3" + integrity sha512-vX3qPGE8sEKEAZCWk05k3cpTAE3/nOYca++JA+Rd0z2NCNzabmYvEiSShKzm10zdquOIAVXsy2Ei/DTW34KlKQ== + dependencies: + "@babel/compat-data" "^7.26.8" + "@babel/helper-compilation-targets" "^7.26.5" + "@babel/helper-plugin-utils" "^7.26.5" + "@babel/helper-validator-option" "^7.25.9" + "@babel/plugin-bugfix-firefox-class-in-computed-class-key" "^7.25.9" + "@babel/plugin-bugfix-safari-class-field-initializer-scope" "^7.25.9" + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression" "^7.25.9" + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining" "^7.25.9" + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly" "^7.25.9" + "@babel/plugin-proposal-private-property-in-object" "7.21.0-placeholder-for-preset-env.2" + "@babel/plugin-syntax-import-assertions" "^7.26.0" + "@babel/plugin-syntax-import-attributes" "^7.26.0" + "@babel/plugin-syntax-unicode-sets-regex" "^7.18.6" + "@babel/plugin-transform-arrow-functions" "^7.25.9" + "@babel/plugin-transform-async-generator-functions" "^7.26.8" + "@babel/plugin-transform-async-to-generator" "^7.25.9" + "@babel/plugin-transform-block-scoped-functions" "^7.26.5" + "@babel/plugin-transform-block-scoping" "^7.25.9" + "@babel/plugin-transform-class-properties" "^7.25.9" + "@babel/plugin-transform-class-static-block" "^7.26.0" + "@babel/plugin-transform-classes" "^7.25.9" + "@babel/plugin-transform-computed-properties" "^7.25.9" + "@babel/plugin-transform-destructuring" "^7.25.9" + "@babel/plugin-transform-dotall-regex" "^7.25.9" + "@babel/plugin-transform-duplicate-keys" "^7.25.9" + "@babel/plugin-transform-duplicate-named-capturing-groups-regex" "^7.25.9" + "@babel/plugin-transform-dynamic-import" "^7.25.9" + "@babel/plugin-transform-exponentiation-operator" "^7.26.3" + "@babel/plugin-transform-export-namespace-from" "^7.25.9" + "@babel/plugin-transform-for-of" "^7.26.9" + "@babel/plugin-transform-function-name" "^7.25.9" + "@babel/plugin-transform-json-strings" "^7.25.9" + "@babel/plugin-transform-literals" "^7.25.9" + "@babel/plugin-transform-logical-assignment-operators" "^7.25.9" + "@babel/plugin-transform-member-expression-literals" "^7.25.9" + "@babel/plugin-transform-modules-amd" "^7.25.9" + "@babel/plugin-transform-modules-commonjs" "^7.26.3" + "@babel/plugin-transform-modules-systemjs" "^7.25.9" + "@babel/plugin-transform-modules-umd" "^7.25.9" + "@babel/plugin-transform-named-capturing-groups-regex" "^7.25.9" + "@babel/plugin-transform-new-target" "^7.25.9" + "@babel/plugin-transform-nullish-coalescing-operator" "^7.26.6" + "@babel/plugin-transform-numeric-separator" "^7.25.9" + "@babel/plugin-transform-object-rest-spread" "^7.25.9" + "@babel/plugin-transform-object-super" "^7.25.9" + "@babel/plugin-transform-optional-catch-binding" "^7.25.9" + "@babel/plugin-transform-optional-chaining" "^7.25.9" + "@babel/plugin-transform-parameters" "^7.25.9" + "@babel/plugin-transform-private-methods" "^7.25.9" + "@babel/plugin-transform-private-property-in-object" "^7.25.9" + "@babel/plugin-transform-property-literals" "^7.25.9" + "@babel/plugin-transform-regenerator" "^7.25.9" + "@babel/plugin-transform-regexp-modifiers" "^7.26.0" + "@babel/plugin-transform-reserved-words" "^7.25.9" + "@babel/plugin-transform-shorthand-properties" "^7.25.9" + "@babel/plugin-transform-spread" "^7.25.9" + "@babel/plugin-transform-sticky-regex" "^7.25.9" + "@babel/plugin-transform-template-literals" "^7.26.8" + "@babel/plugin-transform-typeof-symbol" "^7.26.7" + "@babel/plugin-transform-unicode-escapes" "^7.25.9" + "@babel/plugin-transform-unicode-property-regex" "^7.25.9" + "@babel/plugin-transform-unicode-regex" "^7.25.9" + "@babel/plugin-transform-unicode-sets-regex" "^7.25.9" + "@babel/preset-modules" "0.1.6-no-external-plugins" + babel-plugin-polyfill-corejs2 "^0.4.10" + babel-plugin-polyfill-corejs3 "^0.11.0" + babel-plugin-polyfill-regenerator "^0.6.1" + core-js-compat "^3.40.0" + semver "^6.3.1" + +"@babel/preset-modules@0.1.6-no-external-plugins": + version "0.1.6-no-external-plugins" + resolved "https://registry.npmmirror.com/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz#ccb88a2c49c817236861fee7826080573b8a923a" + integrity sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA== + dependencies: + "@babel/helper-plugin-utils" "^7.0.0" + "@babel/types" "^7.4.4" + esutils "^2.0.2" + +"@babel/runtime@^7.8.4": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/runtime/-/runtime-7.27.0.tgz#fbee7cf97c709518ecc1f590984481d5460d4762" + integrity sha512-VtPOkrdPHZsKc/clNqyi9WUA8TINkZ4cGk63UUE3u4pmB2k+ZMQRDuIOagv8UVd6j7k0T3+RRIb7beKTebNbcw== + dependencies: + regenerator-runtime "^0.14.0" + +"@babel/template@^7.25.9", "@babel/template@^7.26.9", "@babel/template@^7.27.0": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/template/-/template-7.27.0.tgz#b253e5406cc1df1c57dcd18f11760c2dbf40c0b4" + integrity sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA== + dependencies: + "@babel/code-frame" "^7.26.2" + "@babel/parser" "^7.27.0" + "@babel/types" "^7.27.0" + +"@babel/traverse@^7.25.9", "@babel/traverse@^7.26.10", "@babel/traverse@^7.26.5", "@babel/traverse@^7.26.8", "@babel/traverse@^7.26.9", "@babel/traverse@^7.27.0", "@babel/traverse@^7.7.0": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/traverse/-/traverse-7.27.0.tgz#11d7e644779e166c0442f9a07274d02cd91d4a70" + integrity sha512-19lYZFzYVQkkHkl4Cy4WrAVcqBkgvV2YM2TU3xG6DIwO7O3ecbDPfW3yM3bjAGcqcQHi+CCtjMR3dIEHxsd6bA== + dependencies: + "@babel/code-frame" "^7.26.2" + "@babel/generator" "^7.27.0" + "@babel/parser" "^7.27.0" + "@babel/template" "^7.27.0" + "@babel/types" "^7.27.0" + debug "^4.3.1" + globals "^11.1.0" + +"@babel/types@^7.25.9", "@babel/types@^7.26.10", "@babel/types@^7.26.9", "@babel/types@^7.27.0", "@babel/types@^7.4.4", "@babel/types@^7.7.0": + version "7.27.0" + resolved "https://registry.npmmirror.com/@babel/types/-/types-7.27.0.tgz#ef9acb6b06c3173f6632d993ecb6d4ae470b4559" + integrity sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg== + dependencies: + "@babel/helper-string-parser" "^7.25.9" + "@babel/helper-validator-identifier" "^7.25.9" + +"@bcoe/v8-coverage@^0.2.3": + version "0.2.3" + resolved "https://registry.npmmirror.com/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz#75a2e8b51cb758a7553d6804a5932d7aace75c39" + integrity sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw== + +"@codemirror/autocomplete@^6.0.0", "@codemirror/autocomplete@^6.18.1": + version "6.18.6" + resolved "https://registry.npmmirror.com/@codemirror/autocomplete/-/autocomplete-6.18.6.tgz#de26e864a1ec8192a1b241eb86addbb612964ddb" + integrity sha512-PHHBXFomUs5DF+9tCOM/UoW6XQ4R44lLNNhRaW9PKPTU0D7lIjRg3ElxaJnTwsl/oHiR93WSXDBrekhoUGCPtg== + dependencies: + "@codemirror/language" "^6.0.0" + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.17.0" + "@lezer/common" "^1.0.0" + +"@codemirror/commands@6.x", "@codemirror/commands@^6.0.0": + version "6.8.1" + resolved "https://registry.npmmirror.com/@codemirror/commands/-/commands-6.8.1.tgz#639f5559d2f33f2582a2429c58cb0c1b925c7a30" + integrity sha512-KlGVYufHMQzxbdQONiLyGQDUW0itrLZwq3CcY7xpv9ZLRHqzkBSoteocBHtMCoY7/Ci4xhzSrToIeLg7FxHuaw== + dependencies: + "@codemirror/language" "^6.0.0" + "@codemirror/state" "^6.4.0" + "@codemirror/view" "^6.27.0" + "@lezer/common" "^1.1.0" + +"@codemirror/lang-sql@^6.8.0": + version "6.8.0" + resolved "https://registry.npmmirror.com/@codemirror/lang-sql/-/lang-sql-6.8.0.tgz#1ae68ad49f378605ff88a4cc428ba667ce056068" + integrity sha512-aGLmY4OwGqN3TdSx3h6QeA1NrvaYtF7kkoWR/+W7/JzB0gQtJ+VJxewlnE3+VImhA4WVlhmkJr109PefOOhjLg== + dependencies: + "@codemirror/autocomplete" "^6.0.0" + "@codemirror/language" "^6.0.0" + "@codemirror/state" "^6.0.0" + "@lezer/common" "^1.2.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + +"@codemirror/language@6.x", "@codemirror/language@^6.0.0": + version "6.11.0" + resolved "https://registry.npmmirror.com/@codemirror/language/-/language-6.11.0.tgz#5ae90972601497f4575f30811519d720bf7232c9" + integrity sha512-A7+f++LodNNc1wGgoRDTt78cOwWm9KVezApgjOMp1W4hM0898nsqBXwF+sbePE7ZRcjN7Sa1Z5m2oN27XkmEjQ== + dependencies: + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.23.0" + "@lezer/common" "^1.1.0" + "@lezer/highlight" "^1.0.0" + "@lezer/lr" "^1.0.0" + style-mod "^4.0.0" + +"@codemirror/lint@^6.0.0": + version "6.8.5" + resolved "https://registry.npmmirror.com/@codemirror/lint/-/lint-6.8.5.tgz#9edaa808e764e28e07665b015951934c8ec3a418" + integrity sha512-s3n3KisH7dx3vsoeGMxsbRAgKe4O1vbrnKBClm99PU0fWxmxsx5rR2PfqQgIt+2MMJBHbiJ5rfIdLYfB9NNvsA== + dependencies: + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.35.0" + crelt "^1.0.5" + +"@codemirror/search@^6.0.0": + version "6.5.10" + resolved "https://registry.npmmirror.com/@codemirror/search/-/search-6.5.10.tgz#7367bfc88094d078b91c752bc74140fb565b55ee" + integrity sha512-RMdPdmsrUf53pb2VwflKGHEe1XVM07hI7vV2ntgw1dmqhimpatSJKva4VA9h4TLUDOD4EIF02201oZurpnEFsg== + dependencies: + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.0.0" + crelt "^1.0.5" + +"@codemirror/state@6.x", "@codemirror/state@^6.0.0", "@codemirror/state@^6.4.0", "@codemirror/state@^6.5.0": + version "6.5.2" + resolved "https://registry.npmmirror.com/@codemirror/state/-/state-6.5.2.tgz#8eca3a64212a83367dc85475b7d78d5c9b7076c6" + integrity sha512-FVqsPqtPWKVVL3dPSxy8wEF/ymIEuVzF1PK3VbUgrxXpJUSHQWWZz4JMToquRxnkw+36LTamCZG2iua2Ptq0fA== + dependencies: + "@marijn/find-cluster-break" "^1.0.0" + +"@codemirror/view@6.x", "@codemirror/view@^6.0.0", "@codemirror/view@^6.17.0", "@codemirror/view@^6.23.0", "@codemirror/view@^6.27.0", "@codemirror/view@^6.35.0": + version "6.36.5" + resolved "https://registry.npmmirror.com/@codemirror/view/-/view-6.36.5.tgz#bb99b971322b9a3f8c7013f0ef6c4a511c0d750a" + integrity sha512-cd+FZEUlu3GQCYnguYm3EkhJ8KJVisqqUsCOKedBoAt/d9c76JUUap6U0UrpElln5k6VyrEOYliMuDAKIeDQLg== + dependencies: + "@codemirror/state" "^6.5.0" + style-mod "^4.1.0" + w3c-keyname "^2.2.4" + +"@esbuild/aix-ppc64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz#d1bc06aedb6936b3b6d313bf809a5a40387d2b7f" + integrity sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA== + +"@esbuild/android-arm64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz#7ad65a36cfdb7e0d429c353e00f680d737c2aed4" + integrity sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA== + +"@esbuild/android-arm@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/android-arm/-/android-arm-0.19.12.tgz#b0c26536f37776162ca8bde25e42040c203f2824" + integrity sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w== + +"@esbuild/android-x64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/android-x64/-/android-x64-0.19.12.tgz#cb13e2211282012194d89bf3bfe7721273473b3d" + integrity sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew== + +"@esbuild/darwin-arm64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz#cbee41e988020d4b516e9d9e44dd29200996275e" + integrity sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g== + +"@esbuild/darwin-x64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz#e37d9633246d52aecf491ee916ece709f9d5f4cd" + integrity sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A== + +"@esbuild/freebsd-arm64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz#1ee4d8b682ed363b08af74d1ea2b2b4dbba76487" + integrity sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA== + +"@esbuild/freebsd-x64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz#37a693553d42ff77cd7126764b535fb6cc28a11c" + integrity sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg== + +"@esbuild/linux-arm64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz#be9b145985ec6c57470e0e051d887b09dddb2d4b" + integrity sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA== + +"@esbuild/linux-arm@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz#207ecd982a8db95f7b5279207d0ff2331acf5eef" + integrity sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w== + +"@esbuild/linux-ia32@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz#d0d86b5ca1562523dc284a6723293a52d5860601" + integrity sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA== + +"@esbuild/linux-loong64@0.14.54": + version "0.14.54" + resolved "https://registry.npmmirror.com/@esbuild/linux-loong64/-/linux-loong64-0.14.54.tgz#de2a4be678bd4d0d1ffbb86e6de779cde5999028" + integrity sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw== + +"@esbuild/linux-loong64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz#9a37f87fec4b8408e682b528391fa22afd952299" + integrity sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA== + +"@esbuild/linux-mips64el@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz#4ddebd4e6eeba20b509d8e74c8e30d8ace0b89ec" + integrity sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w== + +"@esbuild/linux-ppc64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz#adb67dadb73656849f63cd522f5ecb351dd8dee8" + integrity sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg== + +"@esbuild/linux-riscv64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz#11bc0698bf0a2abf8727f1c7ace2112612c15adf" + integrity sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg== + +"@esbuild/linux-s390x@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz#e86fb8ffba7c5c92ba91fc3b27ed5a70196c3cc8" + integrity sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg== + +"@esbuild/linux-x64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz#5f37cfdc705aea687dfe5dfbec086a05acfe9c78" + integrity sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg== + +"@esbuild/netbsd-x64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz#29da566a75324e0d0dd7e47519ba2f7ef168657b" + integrity sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA== + +"@esbuild/openbsd-x64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz#306c0acbdb5a99c95be98bdd1d47c916e7dc3ff0" + integrity sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw== + +"@esbuild/sunos-x64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz#0933eaab9af8b9b2c930236f62aae3fc593faf30" + integrity sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA== + +"@esbuild/win32-arm64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz#773bdbaa1971b36db2f6560088639ccd1e6773ae" + integrity sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A== + +"@esbuild/win32-ia32@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz#000516cad06354cc84a73f0943a4aa690ef6fd67" + integrity sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ== + +"@esbuild/win32-x64@0.19.12": + version "0.19.12" + resolved "https://registry.npmmirror.com/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz#c57c8afbb4054a3ab8317591a0b7320360b444ae" + integrity sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA== + +"@floating-ui/core@^1.6.0": + version "1.6.9" + resolved "https://registry.npmmirror.com/@floating-ui/core/-/core-1.6.9.tgz#64d1da251433019dafa091de9b2886ff35ec14e6" + integrity sha512-uMXCuQ3BItDUbAMhIXw7UPXRfAlOAvZzdK9BWpE60MCn+Svt3aLn9jsPTi/WNGlRUu2uI0v5S7JiIUsbsvh3fw== + dependencies: + "@floating-ui/utils" "^0.2.9" + +"@floating-ui/dom@^1.0.0", "@floating-ui/dom@^1.6.7": + version "1.6.13" + resolved "https://registry.npmmirror.com/@floating-ui/dom/-/dom-1.6.13.tgz#a8a938532aea27a95121ec16e667a7cbe8c59e34" + integrity sha512-umqzocjDgNRGTuO7Q8CU32dkHkECqI8ZdMZ5Swb6QAM0t5rnlrN3lGo1hdpscRd3WS8T6DKYK4ephgIH9iRh3w== + dependencies: + "@floating-ui/core" "^1.6.0" + "@floating-ui/utils" "^0.2.9" + +"@floating-ui/utils@^0.2.9": + version "0.2.9" + resolved "https://registry.npmmirror.com/@floating-ui/utils/-/utils-0.2.9.tgz#50dea3616bc8191fb8e112283b49eaff03e78429" + integrity sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg== + +"@floating-ui/vue@^1.1.0": + version "1.1.6" + resolved "https://registry.npmmirror.com/@floating-ui/vue/-/vue-1.1.6.tgz#1c7e8f257fae5b71a72d10c1746e6b0ba338399c" + integrity sha512-XFlUzGHGv12zbgHNk5FN2mUB7ROul3oG2ENdTpWdE+qMFxyNxWSRmsoyhiEnpmabNm6WnUvR1OvJfUfN4ojC1A== + dependencies: + "@floating-ui/dom" "^1.0.0" + "@floating-ui/utils" "^0.2.9" + vue-demi ">=0.13.0" + +"@headlessui/vue@^1.7.14": + version "1.7.23" + resolved "https://registry.npmmirror.com/@headlessui/vue/-/vue-1.7.23.tgz#7fe19dbeca35de9e6270c82c78c4864e6a6f7391" + integrity sha512-JzdCNqurrtuu0YW6QaDtR2PIYCKPUWq28csDyMvN4zmGccmE7lz40Is6hc3LA4HFeCI7sekZ/PQMTNmn9I/4Wg== + dependencies: + "@tanstack/vue-virtual" "^3.0.0-beta.60" + +"@iconify/json@^2.2.123": + version "2.2.324" + resolved "https://registry.npmmirror.com/@iconify/json/-/json-2.2.324.tgz#e30ebd92558f48e2cfefe3aea667b811f8ca8b7d" + integrity sha512-7rx2pY2NH4zn/7q04zFiiD3o7eQ8ZV0F0nf7Rkn2DyI272OWzDMw5goSULOyDdiW9sdfBLeZod/TRxEilaNNsA== + dependencies: + "@iconify/types" "*" + pathe "^1.1.2" + +"@iconify/types@*", "@iconify/types@^2.0.0": + version "2.0.0" + resolved "https://registry.npmmirror.com/@iconify/types/-/types-2.0.0.tgz#ab0e9ea681d6c8a1214f30cd741fe3a20cc57f57" + integrity sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg== + +"@iconify/utils@^2.1.11": + version "2.3.0" + resolved "https://registry.npmmirror.com/@iconify/utils/-/utils-2.3.0.tgz#1bbbf8c477ebe9a7cacaea78b1b7e8937f9cbfba" + integrity sha512-GmQ78prtwYW6EtzXRU1rY+KwOKfz32PD7iJh6Iyqw68GiKuoZ2A6pRtzWONz5VQJbp50mEjXh/7NkumtrAgRKA== + dependencies: + "@antfu/install-pkg" "^1.0.0" + "@antfu/utils" "^8.1.0" + "@iconify/types" "^2.0.0" + debug "^4.4.0" + globals "^15.14.0" + kolorist "^1.8.0" + local-pkg "^1.0.0" + mlly "^1.7.4" + +"@internationalized/date@^3.5.4": + version "3.7.0" + resolved "https://registry.npmmirror.com/@internationalized/date/-/date-3.7.0.tgz#23a4956308ee108e308517a7137c69ab8f5f2ad9" + integrity sha512-VJ5WS3fcVx0bejE/YHfbDKR/yawZgKqn/if+oEeLqNwBtPzVB06olkfcnojTmEMX+gTpH+FlQ69SHNitJ8/erQ== + dependencies: + "@swc/helpers" "^0.5.0" + +"@internationalized/number@^3.5.3": + version "3.6.0" + resolved "https://registry.npmmirror.com/@internationalized/number/-/number-3.6.0.tgz#dc6ba20c41b25eb605f1d5cac7d8668e9022c224" + integrity sha512-PtrRcJVy7nw++wn4W2OuePQQfTqDzfusSuY1QTtui4wa7r+rGVtR75pO8CyKvHvzyQYi3Q1uO5sY0AsB4e65Bw== + dependencies: + "@swc/helpers" "^0.5.0" + +"@isaacs/cliui@^8.0.2": + version "8.0.2" + resolved "https://registry.npmmirror.com/@isaacs/cliui/-/cliui-8.0.2.tgz#b37667b7bc181c168782259bab42474fbf52b550" + integrity sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA== + dependencies: + string-width "^5.1.2" + string-width-cjs "npm:string-width@^4.2.0" + strip-ansi "^7.0.1" + strip-ansi-cjs "npm:strip-ansi@^6.0.1" + wrap-ansi "^8.1.0" + wrap-ansi-cjs "npm:wrap-ansi@^7.0.0" + +"@istanbuljs/schema@^0.1.2", "@istanbuljs/schema@^0.1.3": + version "0.1.3" + resolved "https://registry.npmmirror.com/@istanbuljs/schema/-/schema-0.1.3.tgz#e45e384e4b8ec16bce2fd903af78450f6bf7ec98" + integrity sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA== + +"@jridgewell/gen-mapping@^0.3.2", "@jridgewell/gen-mapping@^0.3.5": + version "0.3.8" + resolved "https://registry.npmmirror.com/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz#4f0e06362e01362f823d348f1872b08f666d8142" + integrity sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA== + dependencies: + "@jridgewell/set-array" "^1.2.1" + "@jridgewell/sourcemap-codec" "^1.4.10" + "@jridgewell/trace-mapping" "^0.3.24" + +"@jridgewell/resolve-uri@^3.1.0": + version "3.1.2" + resolved "https://registry.npmmirror.com/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz#7a0ee601f60f99a20c7c7c5ff0c80388c1189bd6" + integrity sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw== + +"@jridgewell/set-array@^1.2.1": + version "1.2.1" + resolved "https://registry.npmmirror.com/@jridgewell/set-array/-/set-array-1.2.1.tgz#558fb6472ed16a4c850b889530e6b36438c49280" + integrity sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A== + +"@jridgewell/sourcemap-codec@^1.4.10", "@jridgewell/sourcemap-codec@^1.4.14", "@jridgewell/sourcemap-codec@^1.4.15", "@jridgewell/sourcemap-codec@^1.5.0": + version "1.5.0" + resolved "https://registry.npmmirror.com/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz#3188bcb273a414b0d215fd22a58540b989b9409a" + integrity sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ== + +"@jridgewell/trace-mapping@^0.3.12", "@jridgewell/trace-mapping@^0.3.24", "@jridgewell/trace-mapping@^0.3.25": + version "0.3.25" + resolved "https://registry.npmmirror.com/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz#15f190e98895f3fc23276ee14bc76b675c2e50f0" + integrity sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ== + dependencies: + "@jridgewell/resolve-uri" "^3.1.0" + "@jridgewell/sourcemap-codec" "^1.4.14" + +"@lezer/common@^1.0.0", "@lezer/common@^1.1.0", "@lezer/common@^1.2.0": + version "1.2.3" + resolved "https://registry.npmmirror.com/@lezer/common/-/common-1.2.3.tgz#138fcddab157d83da557554851017c6c1e5667fd" + integrity sha512-w7ojc8ejBqr2REPsWxJjrMFsA/ysDCFICn8zEOR9mrqzOu2amhITYuLD8ag6XZf0CFXDrhKqw7+tW8cX66NaDA== + +"@lezer/highlight@^1.0.0": + version "1.2.1" + resolved "https://registry.npmmirror.com/@lezer/highlight/-/highlight-1.2.1.tgz#596fa8f9aeb58a608be0a563e960c373cbf23f8b" + integrity sha512-Z5duk4RN/3zuVO7Jq0pGLJ3qynpxUVsh7IbUbGj88+uV2ApSAn6kWg2au3iJb+0Zi7kKtqffIESgNcRXWZWmSA== + dependencies: + "@lezer/common" "^1.0.0" + +"@lezer/lr@^1.0.0": + version "1.4.2" + resolved "https://registry.npmmirror.com/@lezer/lr/-/lr-1.4.2.tgz#931ea3dea8e9de84e90781001dae30dea9ff1727" + integrity sha512-pu0K1jCIdnQ12aWNaAVU5bzi7Bd1w54J3ECgANPmYLtQKP0HBj2cE/5coBD66MT10xbtIuUr7tg0Shbsvk0mDA== + dependencies: + "@lezer/common" "^1.0.0" + +"@marijn/find-cluster-break@^1.0.0": + version "1.0.2" + resolved "https://registry.npmmirror.com/@marijn/find-cluster-break/-/find-cluster-break-1.0.2.tgz#775374306116d51c0c500b8c4face0f9a04752d8" + integrity sha512-l0h88YhZFyKdXIFNfSWpyjStDjGHwZ/U7iobcK1cQQD8sejsONdQtTVU+1wVN1PBw40PiiHB1vA5S7VTfQiP9g== + +"@mswjs/cookies@^0.1.7": + version "0.1.7" + resolved "https://registry.npmmirror.com/@mswjs/cookies/-/cookies-0.1.7.tgz#d334081b2c51057a61c1dd7b76ca3cac02251651" + integrity sha512-bDg1ReMBx+PYDB4Pk7y1Q07Zz1iKIEUWQpkEXiA2lEWg9gvOZ8UBmGXilCEUvyYoRFlmr/9iXTRR69TrgSwX/Q== + dependencies: + "@types/set-cookie-parser" "^2.4.0" + set-cookie-parser "^2.4.6" + +"@mswjs/interceptors@^0.12.7": + version "0.12.7" + resolved "https://registry.npmmirror.com/@mswjs/interceptors/-/interceptors-0.12.7.tgz#0d1cd4cd31a0f663e0455993951201faa09d0909" + integrity sha512-eGjZ3JRAt0Fzi5FgXiV/P3bJGj0NqsN7vBS0J0FO2AQRQ0jCKQS4lEFm4wvlSgKQNfeuc/Vz6d81VtU3Gkx/zg== + dependencies: + "@open-draft/until" "^1.0.3" + "@xmldom/xmldom" "^0.7.2" + debug "^4.3.2" + headers-utils "^3.0.2" + outvariant "^1.2.0" + strict-event-emitter "^0.2.0" + +"@nodelib/fs.scandir@2.1.5": + version "2.1.5" + resolved "https://registry.npmmirror.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5" + integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g== + dependencies: + "@nodelib/fs.stat" "2.0.5" + run-parallel "^1.1.9" + +"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2": + version "2.0.5" + resolved "https://registry.npmmirror.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b" + integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A== + +"@nodelib/fs.walk@^1.2.3": + version "1.2.8" + resolved "https://registry.npmmirror.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a" + integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg== + dependencies: + "@nodelib/fs.scandir" "2.1.5" + fastq "^1.6.0" + +"@one-ini/wasm@0.1.1": + version "0.1.1" + resolved "https://registry.npmmirror.com/@one-ini/wasm/-/wasm-0.1.1.tgz#6013659736c9dbfccc96e8a9c2b3de317df39323" + integrity sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw== + +"@open-draft/until@^1.0.3": + version "1.0.3" + resolved "https://registry.npmmirror.com/@open-draft/until/-/until-1.0.3.tgz#db9cc719191a62e7d9200f6e7bab21c5b848adca" + integrity sha512-Aq58f5HiWdyDlFffbbSjAlv596h/cOnt2DO1w3DOC7OJ5EHs0hd/nycJfiu9RJbT6Yk6F1knnRRXNSpxoIVZ9Q== + +"@pkgjs/parseargs@^0.11.0": + version "0.11.0" + resolved "https://registry.npmmirror.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz#a77ea742fab25775145434eb1d2328cf5013ac33" + integrity sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg== + +"@popperjs/core@^2.11.2", "@popperjs/core@^2.9.0": + version "2.11.8" + resolved "https://registry.npmmirror.com/@popperjs/core/-/core-2.11.8.tgz#6b79032e760a0899cd4204710beede972a3a185f" + integrity sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A== + +"@remirror/core-constants@3.0.0": + version "3.0.0" + resolved "https://registry.npmmirror.com/@remirror/core-constants/-/core-constants-3.0.0.tgz#96fdb89d25c62e7b6a5d08caf0ce5114370e3b8f" + integrity sha512-42aWfPrimMfDKDi4YegyS7x+/0tlzaqwPQCULLanv3DMIlu96KTJR0fM5isWX2UViOqlGnX6YFgqWepcX+XMNg== + +"@rollup/pluginutils@^5.0.2": + version "5.1.4" + resolved "https://registry.npmmirror.com/@rollup/pluginutils/-/pluginutils-5.1.4.tgz#bb94f1f9eaaac944da237767cdfee6c5b2262d4a" + integrity sha512-USm05zrsFxYLPdWWq+K3STlWiT/3ELn3RcV5hJMghpeAIhxfsUIg6mt12CBJBInWMV4VneoV7SfGv8xIwo2qNQ== + dependencies: + "@types/estree" "^1.0.0" + estree-walker "^2.0.2" + picomatch "^4.0.2" + +"@rollup/rollup-android-arm-eabi@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.39.0.tgz#1d8cc5dd3d8ffe569d8f7f67a45c7909828a0f66" + integrity sha512-lGVys55Qb00Wvh8DMAocp5kIcaNzEFTmGhfFd88LfaogYTRKrdxgtlO5H6S49v2Nd8R2C6wLOal0qv6/kCkOwA== + +"@rollup/rollup-android-arm64@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.39.0.tgz#9c136034d3d9ed29d0b138c74dd63c5744507fca" + integrity sha512-It9+M1zE31KWfqh/0cJLrrsCPiF72PoJjIChLX+rEcujVRCb4NLQ5QzFkzIZW8Kn8FTbvGQBY5TkKBau3S8cCQ== + +"@rollup/rollup-darwin-arm64@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.39.0.tgz#830d07794d6a407c12b484b8cf71affd4d3800a6" + integrity sha512-lXQnhpFDOKDXiGxsU9/l8UEGGM65comrQuZ+lDcGUx+9YQ9dKpF3rSEGepyeR5AHZ0b5RgiligsBhWZfSSQh8Q== + +"@rollup/rollup-darwin-x64@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.39.0.tgz#b26f0f47005c1fa5419a880f323ed509dc8d885c" + integrity sha512-mKXpNZLvtEbgu6WCkNij7CGycdw9cJi2k9v0noMb++Vab12GZjFgUXD69ilAbBh034Zwn95c2PNSz9xM7KYEAQ== + +"@rollup/rollup-freebsd-arm64@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.39.0.tgz#2b60c81ac01ff7d1bc8df66aee7808b6690c6d19" + integrity sha512-jivRRlh2Lod/KvDZx2zUR+I4iBfHcu2V/BA2vasUtdtTN2Uk3jfcZczLa81ESHZHPHy4ih3T/W5rPFZ/hX7RtQ== + +"@rollup/rollup-freebsd-x64@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.39.0.tgz#4826af30f4d933d82221289068846c9629cc628c" + integrity sha512-8RXIWvYIRK9nO+bhVz8DwLBepcptw633gv/QT4015CpJ0Ht8punmoHU/DuEd3iw9Hr8UwUV+t+VNNuZIWYeY7Q== + +"@rollup/rollup-linux-arm-gnueabihf@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.39.0.tgz#a1f4f963d5dcc9e5575c7acf9911824806436bf7" + integrity sha512-mz5POx5Zu58f2xAG5RaRRhp3IZDK7zXGk5sdEDj4o96HeaXhlUwmLFzNlc4hCQi5sGdR12VDgEUqVSHer0lI9g== + +"@rollup/rollup-linux-arm-musleabihf@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.39.0.tgz#e924b0a8b7c400089146f6278446e6b398b75a06" + integrity sha512-+YDwhM6gUAyakl0CD+bMFpdmwIoRDzZYaTWV3SDRBGkMU/VpIBYXXEvkEcTagw/7VVkL2vA29zU4UVy1mP0/Yw== + +"@rollup/rollup-linux-arm64-gnu@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.39.0.tgz#cb43303274ec9a716f4440b01ab4e20c23aebe20" + integrity sha512-EKf7iF7aK36eEChvlgxGnk7pdJfzfQbNvGV/+l98iiMwU23MwvmV0Ty3pJ0p5WQfm3JRHOytSIqD9LB7Bq7xdQ== + +"@rollup/rollup-linux-arm64-musl@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.39.0.tgz#531c92533ce3d167f2111bfcd2aa1a2041266987" + integrity sha512-vYanR6MtqC7Z2SNr8gzVnzUul09Wi1kZqJaek3KcIlI/wq5Xtq4ZPIZ0Mr/st/sv/NnaPwy/D4yXg5x0B3aUUA== + +"@rollup/rollup-linux-loongarch64-gnu@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-loongarch64-gnu/-/rollup-linux-loongarch64-gnu-4.39.0.tgz#53403889755d0c37c92650aad016d5b06c1b061a" + integrity sha512-NMRUT40+h0FBa5fb+cpxtZoGAggRem16ocVKIv5gDB5uLDgBIwrIsXlGqYbLwW8YyO3WVTk1FkFDjMETYlDqiw== + +"@rollup/rollup-linux-powerpc64le-gnu@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.39.0.tgz#f669f162e29094c819c509e99dbeced58fc708f9" + integrity sha512-0pCNnmxgduJ3YRt+D+kJ6Ai/r+TaePu9ZLENl+ZDV/CdVczXl95CbIiwwswu4L+K7uOIGf6tMo2vm8uadRaICQ== + +"@rollup/rollup-linux-riscv64-gnu@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.39.0.tgz#4bab37353b11bcda5a74ca11b99dea929657fd5f" + integrity sha512-t7j5Zhr7S4bBtksT73bO6c3Qa2AV/HqiGlj9+KB3gNF5upcVkx+HLgxTm8DK4OkzsOYqbdqbLKwvGMhylJCPhQ== + +"@rollup/rollup-linux-riscv64-musl@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.39.0.tgz#4d66be1ce3cfd40a7910eb34dddc7cbd4c2dd2a5" + integrity sha512-m6cwI86IvQ7M93MQ2RF5SP8tUjD39Y7rjb1qjHgYh28uAPVU8+k/xYWvxRO3/tBN2pZkSMa5RjnPuUIbrwVxeA== + +"@rollup/rollup-linux-s390x-gnu@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.39.0.tgz#7181c329395ed53340a0c59678ad304a99627f6d" + integrity sha512-iRDJd2ebMunnk2rsSBYlsptCyuINvxUfGwOUldjv5M4tpa93K8tFMeYGpNk2+Nxl+OBJnBzy2/JCscGeO507kA== + +"@rollup/rollup-linux-x64-gnu@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.39.0.tgz#00825b3458094d5c27cb4ed66e88bfe9f1e65f90" + integrity sha512-t9jqYw27R6Lx0XKfEFe5vUeEJ5pF3SGIM6gTfONSMb7DuG6z6wfj2yjcoZxHg129veTqU7+wOhY6GX8wmf90dA== + +"@rollup/rollup-linux-x64-musl@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.39.0.tgz#81caac2a31b8754186f3acc142953a178fcd6fba" + integrity sha512-ThFdkrFDP55AIsIZDKSBWEt/JcWlCzydbZHinZ0F/r1h83qbGeenCt/G/wG2O0reuENDD2tawfAj2s8VK7Bugg== + +"@rollup/rollup-win32-arm64-msvc@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.39.0.tgz#3a3f421f5ce9bd99ed20ce1660cce7cee3e9f199" + integrity sha512-jDrLm6yUtbOg2TYB3sBF3acUnAwsIksEYjLeHL+TJv9jg+TmTwdyjnDex27jqEMakNKf3RwwPahDIt7QXCSqRQ== + +"@rollup/rollup-win32-ia32-msvc@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.39.0.tgz#a44972d5cdd484dfd9cf3705a884bf0c2b7785a7" + integrity sha512-6w9uMuza+LbLCVoNKL5FSLE7yvYkq9laSd09bwS0tMjkwXrmib/4KmoJcrKhLWHvw19mwU+33ndC69T7weNNjQ== + +"@rollup/rollup-win32-x64-msvc@4.39.0": + version "4.39.0" + resolved "https://registry.npmmirror.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.39.0.tgz#bfe0214e163f70c4fec1c8f7bb8ce266f4c05b7e" + integrity sha512-yAkUOkIKZlK5dl7u6dg897doBgLXmUHhIINM2c+sND3DZwnrdQkkSiDh7N75Ll4mM4dxSkYfXqU9fW3lLkMFug== + +"@samverschueren/stream-to-observable@^0.3.0": + version "0.3.1" + resolved "https://registry.npmmirror.com/@samverschueren/stream-to-observable/-/stream-to-observable-0.3.1.tgz#a21117b19ee9be70c379ec1877537ef2e1c63301" + integrity sha512-c/qwwcHyafOQuVQJj0IlBjf5yYgBI7YPJ77k4fOJYesb41jio65eaJODRUmfYKhTOFBrIZ66kgvGPlNbjuoRdQ== + dependencies: + any-observable "^0.3.0" + +"@sentry-internal/browser-utils@8.55.0": + version "8.55.0" + resolved "https://registry.npmmirror.com/@sentry-internal/browser-utils/-/browser-utils-8.55.0.tgz#d89bae423edd29c39f01285c8e2d59ce9289d9a6" + integrity sha512-ROgqtQfpH/82AQIpESPqPQe0UyWywKJsmVIqi3c5Fh+zkds5LUxnssTj3yNd1x+kxaPDVB023jAP+3ibNgeNDw== + dependencies: + "@sentry/core" "8.55.0" + +"@sentry-internal/feedback@8.55.0": + version "8.55.0" + resolved "https://registry.npmmirror.com/@sentry-internal/feedback/-/feedback-8.55.0.tgz#170b8e96a36ce6f71f53daad680f1a0c98381314" + integrity sha512-cP3BD/Q6pquVQ+YL+rwCnorKuTXiS9KXW8HNKu4nmmBAyf7urjs+F6Hr1k9MXP5yQ8W3yK7jRWd09Yu6DHWOiw== + dependencies: + "@sentry/core" "8.55.0" + +"@sentry-internal/replay-canvas@8.55.0": + version "8.55.0" + resolved "https://registry.npmmirror.com/@sentry-internal/replay-canvas/-/replay-canvas-8.55.0.tgz#e65430207a2f18e4a07c25c669ec758d11282aaf" + integrity sha512-nIkfgRWk1091zHdu4NbocQsxZF1rv1f7bbp3tTIlZYbrH62XVZosx5iHAuZG0Zc48AETLE7K4AX9VGjvQj8i9w== + dependencies: + "@sentry-internal/replay" "8.55.0" + "@sentry/core" "8.55.0" + +"@sentry-internal/replay@8.55.0": + version "8.55.0" + resolved "https://registry.npmmirror.com/@sentry-internal/replay/-/replay-8.55.0.tgz#4c00b22cdf58cac5b3e537f8d4f675f2b021f475" + integrity sha512-roCDEGkORwolxBn8xAKedybY+Jlefq3xYmgN2fr3BTnsXjSYOPC7D1/mYqINBat99nDtvgFvNfRcZPiwwZ1hSw== + dependencies: + "@sentry-internal/browser-utils" "8.55.0" + "@sentry/core" "8.55.0" + +"@sentry/babel-plugin-component-annotate@2.23.0": + version "2.23.0" + resolved "https://registry.npmmirror.com/@sentry/babel-plugin-component-annotate/-/babel-plugin-component-annotate-2.23.0.tgz#f7312e733d9e3b23ccc55d05d811de709e11e9b1" + integrity sha512-+uLqaCKeYmH/W2YUV1XHkFEtpHdx/aFjCQahPVsvXyqg13dfkR6jaygPL4DB5DJtUSmPFCUE3MEk9ZO5JlhJYg== + +"@sentry/browser@8.55.0": + version "8.55.0" + resolved "https://registry.npmmirror.com/@sentry/browser/-/browser-8.55.0.tgz#9a489e2a54d29c65e6271b4ee594b43679cab7bd" + integrity sha512-1A31mCEWCjaMxJt6qGUK+aDnLDcK6AwLAZnqpSchNysGni1pSn1RWSmk9TBF8qyTds5FH8B31H480uxMPUJ7Cw== + dependencies: + "@sentry-internal/browser-utils" "8.55.0" + "@sentry-internal/feedback" "8.55.0" + "@sentry-internal/replay" "8.55.0" + "@sentry-internal/replay-canvas" "8.55.0" + "@sentry/core" "8.55.0" + +"@sentry/bundler-plugin-core@2.23.0": + version "2.23.0" + resolved "https://registry.npmmirror.com/@sentry/bundler-plugin-core/-/bundler-plugin-core-2.23.0.tgz#5b3e919766917cda01bd640a2671e7ff2f8a613d" + integrity sha512-Qbw+jZFK63w+V193l0eCFKLzGba2Iu93Fx8kCRzZ3uqjky002H8U3pu4mKgcc11J+u8QTjfNZGUyXsxz0jv2mg== + dependencies: + "@babel/core" "^7.18.5" + "@sentry/babel-plugin-component-annotate" "2.23.0" + "@sentry/cli" "2.39.1" + dotenv "^16.3.1" + find-up "^5.0.0" + glob "^9.3.2" + magic-string "0.30.8" + unplugin "1.0.1" + +"@sentry/cli-darwin@2.39.1": + version "2.39.1" + resolved "https://registry.npmmirror.com/@sentry/cli-darwin/-/cli-darwin-2.39.1.tgz#75c338a53834b4cf72f57599f4c72ffb36cf0781" + integrity sha512-kiNGNSAkg46LNGatfNH5tfsmI/kCAaPA62KQuFZloZiemTNzhy9/6NJP8HZ/GxGs8GDMxic6wNrV9CkVEgFLJQ== + +"@sentry/cli-linux-arm64@2.39.1": + version "2.39.1" + resolved "https://registry.npmmirror.com/@sentry/cli-linux-arm64/-/cli-linux-arm64-2.39.1.tgz#27db44700c33fcb1e8966257020b43f8494373e6" + integrity sha512-5VbVJDatolDrWOgaffsEM7znjs0cR8bHt9Bq0mStM3tBolgAeSDHE89NgHggfZR+DJ2VWOy4vgCwkObrUD6NQw== + +"@sentry/cli-linux-arm@2.39.1": + version "2.39.1" + resolved "https://registry.npmmirror.com/@sentry/cli-linux-arm/-/cli-linux-arm-2.39.1.tgz#451683fa9a5a60b1359d104ec71334ed16f4b63c" + integrity sha512-DkENbxyRxUrfLnJLXTA4s5UL/GoctU5Cm4ER1eB7XN7p9WsamFJd/yf2KpltkjEyiTuplv0yAbdjl1KX3vKmEQ== + +"@sentry/cli-linux-i686@2.39.1": + version "2.39.1" + resolved "https://registry.npmmirror.com/@sentry/cli-linux-i686/-/cli-linux-i686-2.39.1.tgz#9965a81f97a94e8b6d1d15589e43fee158e35201" + integrity sha512-pXWVoKXCRrY7N8vc9H7mETiV9ZCz+zSnX65JQCzZxgYrayQPJTc+NPRnZTdYdk5RlAupXaFicBI2GwOCRqVRkg== + +"@sentry/cli-linux-x64@2.39.1": + version "2.39.1" + resolved "https://registry.npmmirror.com/@sentry/cli-linux-x64/-/cli-linux-x64-2.39.1.tgz#31fe008b02f92769543dc9919e2a5cbc4cda7889" + integrity sha512-IwayNZy+it7FWG4M9LayyUmG1a/8kT9+/IEm67sT5+7dkMIMcpmHDqL8rWcPojOXuTKaOBBjkVdNMBTXy0mXlA== + +"@sentry/cli-win32-i686@2.39.1": + version "2.39.1" + resolved "https://registry.npmmirror.com/@sentry/cli-win32-i686/-/cli-win32-i686-2.39.1.tgz#609e8790c49414011445e397130560c777850b35" + integrity sha512-NglnNoqHSmE+Dz/wHeIVRnV2bLMx7tIn3IQ8vXGO5HWA2f8zYJGktbkLq1Lg23PaQmeZLPGlja3gBQfZYSG10Q== + +"@sentry/cli-win32-x64@2.39.1": + version "2.39.1" + resolved "https://registry.npmmirror.com/@sentry/cli-win32-x64/-/cli-win32-x64-2.39.1.tgz#1a874a5570c6d162b35d9d001c96e5389d07d2cb" + integrity sha512-xv0R2CMf/X1Fte3cMWie1NXuHmUyQPDBfCyIt6k6RPFPxAYUgcqgMPznYwVMwWEA1W43PaOkSn3d8ZylsDaETw== + +"@sentry/cli@2.39.1": + version "2.39.1" + resolved "https://registry.npmmirror.com/@sentry/cli/-/cli-2.39.1.tgz#916bb5b7567ccf7fdf94ef6cf8a2b9ab78370d29" + integrity sha512-JIb3e9vh0+OmQ0KxmexMXg9oZsR/G7HMwxt5BUIKAXZ9m17Xll4ETXTRnRUBT3sf7EpNGAmlQk1xEmVN9pYZYQ== + dependencies: + https-proxy-agent "^5.0.0" + node-fetch "^2.6.7" + progress "^2.0.3" + proxy-from-env "^1.1.0" + which "^2.0.2" + optionalDependencies: + "@sentry/cli-darwin" "2.39.1" + "@sentry/cli-linux-arm" "2.39.1" + "@sentry/cli-linux-arm64" "2.39.1" + "@sentry/cli-linux-i686" "2.39.1" + "@sentry/cli-linux-x64" "2.39.1" + "@sentry/cli-win32-i686" "2.39.1" + "@sentry/cli-win32-x64" "2.39.1" + +"@sentry/core@8.55.0": + version "8.55.0" + resolved "https://registry.npmmirror.com/@sentry/core/-/core-8.55.0.tgz#4964920229fcf649237ef13b1533dfc4b9f6b22e" + integrity sha512-6g7jpbefjHYs821Z+EBJ8r4Z7LT5h80YSWRJaylGS4nW5W5Z2KXzpdnyFarv37O7QjauzVC2E+PABmpkw5/JGA== + +"@sentry/vite-plugin@^2.19.0": + version "2.23.0" + resolved "https://registry.npmmirror.com/@sentry/vite-plugin/-/vite-plugin-2.23.0.tgz#d6cc5ba63174f1413b3420939bd940c44f702d5e" + integrity sha512-iLbqxan3DUkFJqbx7DOtJ2fTd6g+TmNS1PIdaDFfpvVG4Lg9AYp4Xege6BBCrGQYl+wUE3poWfNhASfch/s51Q== + dependencies: + "@sentry/bundler-plugin-core" "2.23.0" + unplugin "1.0.1" + +"@sentry/vue@^8.10.0": + version "8.55.0" + resolved "https://registry.npmmirror.com/@sentry/vue/-/vue-8.55.0.tgz#b2d8011908f8d5928d0d024c1ccd9d5ca862350f" + integrity sha512-J6lcpzL39snV/spoGpwyk5Rp1wSFxOV4qV1NhQ9OlLHORVBp/Xpw7cEA0oKqG2w1wVtCV+gC5Jjf9HTmYiHQOQ== + dependencies: + "@sentry/browser" "8.55.0" + "@sentry/core" "8.55.0" + +"@socket.io/component-emitter@~3.1.0": + version "3.1.2" + resolved "https://registry.npmmirror.com/@socket.io/component-emitter/-/component-emitter-3.1.2.tgz#821f8442f4175d8f0467b9daf26e3a18e2d02af2" + integrity sha512-9BCxFwvbGg/RsZK9tjXd8s4UcwR0MWeFQ1XEKIQVVvAGJyINdrqKMcTRyLoK8Rse1GjzLV9cwjWV1olXRWEXVA== + +"@stripe/stripe-js@^1.3.0": + version "1.54.2" + resolved "https://registry.npmmirror.com/@stripe/stripe-js/-/stripe-js-1.54.2.tgz#0665848e22cbda936cfd05256facdfbba121438d" + integrity sha512-R1PwtDvUfs99cAjfuQ/WpwJ3c92+DAMy9xGApjqlWQMj0FKQabUAys2swfTRNzuYAYJh7NqK2dzcYVNkKLEKUg== + +"@swc/helpers@^0.5.0": + version "0.5.15" + resolved "https://registry.npmmirror.com/@swc/helpers/-/helpers-0.5.15.tgz#79efab344c5819ecf83a43f3f9f811fc84b516d7" + integrity sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g== + dependencies: + tslib "^2.8.0" + +"@tailwindcss/container-queries@^0.1.1": + version "0.1.1" + resolved "https://registry.npmmirror.com/@tailwindcss/container-queries/-/container-queries-0.1.1.tgz#9a759ce2cb8736a4c6a0cb93aeb740573a731974" + integrity sha512-p18dswChx6WnTSaJCSGx6lTmrGzNNvm2FtXmiO6AuA1V4U5REyoqwmT6kgAsIMdjo07QdAfYXHJ4hnMtfHzWgA== + +"@tailwindcss/forms@^0.4.0": + version "0.4.1" + resolved "https://registry.npmmirror.com/@tailwindcss/forms/-/forms-0.4.1.tgz#5a47ccd60490cbba84e662f2b9cf3d71a5126d17" + integrity sha512-gS9xjCmJjUBz/eP12QlENPLnf0tCx68oYE3mri0GMP5jdtVwLbGUNSRpjsp6NzLAZzZy3ueOwrcqB78Ax6Z84A== + dependencies: + mini-svg-data-uri "^1.2.3" + +"@tailwindcss/forms@^0.5.3": + version "0.5.10" + resolved "https://registry.npmmirror.com/@tailwindcss/forms/-/forms-0.5.10.tgz#0a1cd67b6933402f1985a04595bd24f9785aa302" + integrity sha512-utI1ONF6uf/pPNO68kmN1b8rEwNXv3czukalo8VtJH8ksIkZXr3Q3VYudZLkCsDd4Wku120uF02hYK25XGPorw== + dependencies: + mini-svg-data-uri "^1.2.3" + +"@tailwindcss/postcss7-compat@^2.0.2": + version "2.2.17" + resolved "https://registry.npmmirror.com/@tailwindcss/postcss7-compat/-/postcss7-compat-2.2.17.tgz#dc78f3880a2af84163150ff426a39e42b9ae8922" + integrity sha512-3h2svqQAqYHxRZ1KjsJjZOVTQ04m29LjfrLjXyZZEJuvUuJN+BCIF9GI8vhE1s0plS0mogd6E6YLg6mu4Wv/Vw== + dependencies: + arg "^5.0.1" + autoprefixer "^9" + bytes "^3.0.0" + chalk "^4.1.2" + chokidar "^3.5.2" + color "^4.0.1" + cosmiconfig "^7.0.1" + detective "^5.2.0" + didyoumean "^1.2.2" + dlv "^1.1.3" + fast-glob "^3.2.7" + fs-extra "^10.0.0" + glob-parent "^6.0.1" + html-tags "^3.1.0" + is-color-stop "^1.1.0" + is-glob "^4.0.1" + lodash "^4.17.21" + lodash.topath "^4.5.2" + modern-normalize "^1.1.0" + node-emoji "^1.11.0" + normalize-path "^3.0.0" + object-hash "^2.2.0" + postcss "^7" + postcss-functions "^3" + postcss-js "^2" + postcss-load-config "^3.1.0" + postcss-nested "^4" + postcss-selector-parser "^6.0.6" + postcss-value-parser "^4.1.0" + pretty-hrtime "^1.0.3" + purgecss "^4.0.3" + quick-lru "^5.1.1" + reduce-css-calc "^2.1.8" + resolve "^1.20.0" + tmp "^0.2.1" + +"@tailwindcss/typography@^0.5.0", "@tailwindcss/typography@^0.5.1": + version "0.5.16" + resolved "https://registry.npmmirror.com/@tailwindcss/typography/-/typography-0.5.16.tgz#a926c8f44d5c439b2915e231cad80058850047c6" + integrity sha512-0wDLwCVF5V3x3b1SGXPCDcdsbDHMBe+lkFzBRaHeLvNi+nrrnZ1lA18u+OTWO8iSWU2GxUOCvlXtDuqftc1oiA== + dependencies: + lodash.castarray "^4.4.0" + lodash.isplainobject "^4.0.6" + lodash.merge "^4.6.2" + postcss-selector-parser "6.0.10" + +"@tanstack/table-core@8.21.2": + version "8.21.2" + resolved "https://registry.npmmirror.com/@tanstack/table-core/-/table-core-8.21.2.tgz#dd57595a1773652bb6fb437e90a5f5386a49fd7e" + integrity sha512-uvXk/U4cBiFMxt+p9/G7yUWI/UbHYbyghLCjlpWZ3mLeIZiUBSKcUnw9UnKkdRz7Z/N4UBuFLWQdJCjUe7HjvA== + +"@tanstack/virtual-core@3.13.6": + version "3.13.6" + resolved "https://registry.npmmirror.com/@tanstack/virtual-core/-/virtual-core-3.13.6.tgz#329f962f1596b3280736c266a982897ed2112157" + integrity sha512-cnQUeWnhNP8tJ4WsGcYiX24Gjkc9ALstLbHcBj1t3E7EimN6n6kHH+DPV4PpDnuw00NApQp+ViojMj1GRdwYQg== + +"@tanstack/vue-table@^8.20.5": + version "8.21.2" + resolved "https://registry.npmmirror.com/@tanstack/vue-table/-/vue-table-8.21.2.tgz#1c9f0dcf8767455f63f29aa2332109af71af3c1d" + integrity sha512-KBgOWxha/x4m1EdhVWxOpqHb661UjqAxzPcmXR3QiA7aShZ547x19Gw0UJX9we+m+tVcPuLRZ61JsYW47QZFfQ== + dependencies: + "@tanstack/table-core" "8.21.2" + +"@tanstack/vue-virtual@^3.0.0-beta.60", "@tanstack/vue-virtual@^3.8.1": + version "3.13.6" + resolved "https://registry.npmmirror.com/@tanstack/vue-virtual/-/vue-virtual-3.13.6.tgz#4857c3539bf838d977b3a04bf157d60f569338a8" + integrity sha512-GYdZ3SJBQPzgxhuCE2fvpiH46qzHiVx5XzBSdtESgiqh4poj8UgckjGWYEhxaBbcVt1oLzh1m3Ql4TyH32TOzQ== + dependencies: + "@tanstack/virtual-core" "3.13.6" + +"@tiptap/core@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/core/-/core-2.11.7.tgz#38600a7dabc42ea84e8dfb7a74c19df10db95d14" + integrity sha512-zN+NFFxLsxNEL8Qioc+DL6b8+Tt2bmRbXH22Gk6F6nD30x83eaUSFlSv3wqvgyCq3I1i1NO394So+Agmayx6rQ== + +"@tiptap/extension-blockquote@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-blockquote/-/extension-blockquote-2.11.7.tgz#ccbabedd2e19581424730613f00e971b527198ee" + integrity sha512-liD8kWowl3CcYCG9JQlVx1eSNc/aHlt6JpVsuWvzq6J8APWX693i3+zFqyK2eCDn0k+vW62muhSBe3u09hA3Zw== + +"@tiptap/extension-bold@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-bold/-/extension-bold-2.11.7.tgz#de45d9ab47b7342d83bb38823e57d1d4de3f3ae0" + integrity sha512-VTR3JlldBixXbjpLTFme/Bxf1xeUgZZY3LTlt5JDlCW3CxO7k05CIa+kEZ8LXpog5annytZDUVtWqxrNjmsuHQ== + +"@tiptap/extension-bubble-menu@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-bubble-menu/-/extension-bubble-menu-2.11.7.tgz#12f4d10e340cbd32f3319bda18d2518aa92fd02e" + integrity sha512-0vYqSUSSap3kk3/VT4tFE1/6StX70I3/NKQ4J68ZSFgkgyB3ZVlYv7/dY3AkEukjsEp3yN7m8Gw8ei2eEwyzwg== + dependencies: + tippy.js "^6.3.7" + +"@tiptap/extension-bullet-list@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-bullet-list/-/extension-bullet-list-2.11.7.tgz#33d965074108ea8dd1ba558b525bb25e12e54876" + integrity sha512-WbPogE2/Q3e3/QYgbT1Sj4KQUfGAJNc5pvb7GrUbvRQsAh7HhtuO8hqdDwH8dEdD/cNUehgt17TO7u8qV6qeBw== + +"@tiptap/extension-code-block@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-code-block/-/extension-code-block-2.11.7.tgz#1aaaadd231317a0e1277aa987854a077cb83f0f7" + integrity sha512-To/y/2H04VWqiANy53aXjV7S6fA86c2759RsH1hTIe57jA1KyE7I5tlAofljOLZK/covkGmPeBddSPHGJbz++Q== + +"@tiptap/extension-code@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-code/-/extension-code-2.11.7.tgz#d9cb080992080d3a576482d8beee280d29d8a9df" + integrity sha512-VpPO1Uy/eF4hYOpohS/yMOcE1C07xmMj0/D989D9aS1x95jWwUVrSkwC+PlWMUBx9PbY2NRsg1ZDwVvlNKZ6yQ== + +"@tiptap/extension-color@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-color/-/extension-color-2.11.7.tgz#c5222456d2f017391707fe86d4f7f811faa60968" + integrity sha512-2CWb0Qnh8Crf9OwnnWB+M1QJtWrbn6IMSwuOzk+tSzdWSazjN8h6XAZVemr0qMdAA/SyUigzorStiPxN6o3/vQ== + +"@tiptap/extension-document@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-document/-/extension-document-2.11.7.tgz#07e3fcf42069fda02b63758b2f236822a7a14acc" + integrity sha512-95ouJXPjdAm9+VBRgFo4lhDoMcHovyl/awORDI8gyEn0Rdglt+ZRZYoySFzbVzer9h0cre+QdIwr9AIzFFbfdA== + +"@tiptap/extension-dropcursor@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-dropcursor/-/extension-dropcursor-2.11.7.tgz#389229b91ccce4d8ab2139acd9dafadbfedac7c0" + integrity sha512-63mL+nxQILizsr5NbmgDeOjFEWi34BLt7evwL6UUZEVM15K8V1G8pD9Y0kCXrZYpHWz0tqFRXdrhDz0Ppu8oVw== + +"@tiptap/extension-floating-menu@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-floating-menu/-/extension-floating-menu-2.11.7.tgz#cf674103730391dabd947c05c38f4e4b73d59842" + integrity sha512-DG54WoUu2vxHRVzKZiR5I5RMOYj45IlxQMkBAx1wjS0ch41W8DUYEeipvMMjCeKtEI+emz03xYUcOAP9LRmg+w== + dependencies: + tippy.js "^6.3.7" + +"@tiptap/extension-gapcursor@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-gapcursor/-/extension-gapcursor-2.11.7.tgz#82110b499ba7ae9a76cd57f42d535af3989abd98" + integrity sha512-EceesmPG7FyjXZ8EgeJPUov9G1mAf2AwdypxBNH275g6xd5dmU/KvjoFZjmQ0X1ve7mS+wNupVlGxAEUYoveew== + +"@tiptap/extension-hard-break@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-hard-break/-/extension-hard-break-2.11.7.tgz#6344893e9ac938684cecabad4ea037cc729f6cd7" + integrity sha512-zTkZSA6q+F5sLOdCkiC2+RqJQN0zdsJqvFIOVFL/IDVOnq6PZO5THzwRRLvOSnJJl3edRQCl/hUgS0L5sTInGQ== + +"@tiptap/extension-heading@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-heading/-/extension-heading-2.11.7.tgz#0e29db2a5dcd3456c7ab1a40817b56b926a073ec" + integrity sha512-8kWh7y4Rd2fwxfWOhFFWncHdkDkMC1Z60yzIZWjIu72+6yQxvo8w3yeb7LI7jER4kffbMmadgcfhCHC/fkObBA== + +"@tiptap/extension-highlight@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-highlight/-/extension-highlight-2.11.7.tgz#2c92a6b8de96edfbdb2d3cdbd9eb8cf9c8011da8" + integrity sha512-c/NH4kIpNOWCUQv8RkFNDyOcgt+2pYFpDf0QBJmzhAuv4BIeS2bDmDtuNS7VgoWRZH+xxCNXfvm2BG+kjtipEg== + +"@tiptap/extension-history@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-history/-/extension-history-2.11.7.tgz#428d3a4e11f1c261ec34b68c2d3d84f1377ed81e" + integrity sha512-Cu5x3aS13I040QSRoLdd+w09G4OCVfU+azpUqxufZxeNs9BIJC+0jowPLeOxKDh6D5GGT2A8sQtxc6a/ssbs8g== + +"@tiptap/extension-horizontal-rule@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-horizontal-rule/-/extension-horizontal-rule-2.11.7.tgz#36e33184064f844aeb3950011c346643926e682b" + integrity sha512-uVmQwD2dzZ5xwmvUlciy0ItxOdOfQjH6VLmu80zyJf8Yu7mvwP8JyxoXUX0vd1xHpwAhgQ9/ozjIWYGIw79DPQ== + +"@tiptap/extension-image@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-image/-/extension-image-2.11.7.tgz#999e309cf769c5730727551c8563793b690c3af6" + integrity sha512-YvCmTDB7Oo+A56tR4S/gcNaYpqU4DDlSQcRp5IQvmQV5EekSe0lnEazGDoqOCwsit9qQhj4MPQJhKrnaWrJUrg== + +"@tiptap/extension-italic@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-italic/-/extension-italic-2.11.7.tgz#f3830c6ec8c7a12e20c64f9b7f100eff1a5382d9" + integrity sha512-r985bkQfG0HMpmCU0X0p/Xe7U1qgRm2mxvcp6iPCuts2FqxaCoyfNZ8YnMsgVK1mRhM7+CQ5SEg2NOmQNtHvPw== + +"@tiptap/extension-link@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-link/-/extension-link-2.11.7.tgz#315588c536a7effe0fa2470c458a9734021a0b88" + integrity sha512-qKIowE73aAUrnQCIifYP34xXOHOsZw46cT/LBDlb0T60knVfQoKVE4ku08fJzAV+s6zqgsaaZ4HVOXkQYLoW7g== + dependencies: + linkifyjs "^4.2.0" + +"@tiptap/extension-list-item@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-list-item/-/extension-list-item-2.11.7.tgz#06d9ea69dadaa09d260cfde81d5c77b56d8b0937" + integrity sha512-6ikh7Y+qAbkSuIHXPIINqfzmWs5uIGrylihdZ9adaIyvrN1KSnWIqrZIk/NcZTg5YFIJlXrnGSRSjb/QM3WUhw== + +"@tiptap/extension-mention@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-mention/-/extension-mention-2.11.7.tgz#fbd6d73902caf49c9021dd36ab9066ee6b846625" + integrity sha512-Q/fkceDOug4VjiqrCRLzBnOL9Oj+XugWwDgwfucJJMBOJxZ3++3eZGZ54dri/xK39A4ZD+xuMBF7PrJIy+Z5dw== + +"@tiptap/extension-ordered-list@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-ordered-list/-/extension-ordered-list-2.11.7.tgz#3310176a22ed6010b646ccf9c4fe6c25d05553dc" + integrity sha512-bLGCHDMB0vbJk7uu8bRg8vES3GsvxkX7Cgjgm/6xysHFbK98y0asDtNxkW1VvuRreNGz4tyB6vkcVCfrxl4jKw== + +"@tiptap/extension-paragraph@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-paragraph/-/extension-paragraph-2.11.7.tgz#de7fba01ebd35bddf4d6225fff708eb223276bc5" + integrity sha512-Pl3B4q6DJqTvvAdraqZaNP9Hh0UWEHL5nNdxhaRNuhKaUo7lq8wbDSIxIW3lvV0lyCs0NfyunkUvSm1CXb6d4Q== + +"@tiptap/extension-placeholder@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-placeholder/-/extension-placeholder-2.11.7.tgz#285de89b0dd49e3f624ac7818c0d7fb4cb0760ac" + integrity sha512-/06zXV4HIjYoiaUq1fVJo/RcU8pHbzx21evOpeG/foCfNpMI4xLU/vnxdUi6/SQqpZMY0eFutDqod1InkSOqsg== + +"@tiptap/extension-strike@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-strike/-/extension-strike-2.11.7.tgz#311b55f06bb3d1f93c02b809844cc8d27d110422" + integrity sha512-D6GYiW9F24bvAY7XMOARNZbC8YGPzdzWdXd8VOOJABhf4ynMi/oW4NNiko+kZ67jn3EGaKoz32VMJzNQgYi1HA== + +"@tiptap/extension-table-cell@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-table-cell/-/extension-table-cell-2.11.7.tgz#8a9b5d224ef1af1c3ee27b4a343719d9f2be4173" + integrity sha512-JMOkSYRckc5SJP86yGGiHzCxCR8ecrRENvTWAKib6qer2tutxs5u42W+Z8uTcHC2dRz7Fv54snOkDoqPwkf6cw== + +"@tiptap/extension-table-header@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-table-header/-/extension-table-header-2.11.7.tgz#2ebfba675e1bd6e545a504fb9d830c0a08d87823" + integrity sha512-wPRKpliS5QQXgsp//ZjXrHMdLICMkjg2fUrQinOiBa7wDL5C7Y+SehtuK4s2tjeAkyAdj+nepfftyBRIlUSMXg== + +"@tiptap/extension-table-row@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-table-row/-/extension-table-row-2.11.7.tgz#01ca80eca98043858e422f9e50a481d07ab2f75c" + integrity sha512-K254RiXWGXGjz5Cm835hqfQiwnYXm8aw6oOa3isDh4A1B+1Ev4DB2vEDKMrgaOor3nbTsSYmAx2iEMrZSbpaRg== + +"@tiptap/extension-table@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-table/-/extension-table-2.11.7.tgz#4a8e477be809c06b43092de7db96fac6c3739ae8" + integrity sha512-rfwWkNXz/EZuhc8lylsCWPbx0Xr5FlIhreWFyeoXYrDEO3x4ytYcVOpNmbabJYP2semfM0PvPR5o84zfFkLZyg== + +"@tiptap/extension-text-align@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-text-align/-/extension-text-align-2.11.7.tgz#a8e7058eae4f31b1741c615c383946fad377e7cd" + integrity sha512-3M8zd9ROADXazVNpgR6Ejs1evSvBveN36qN4GgV71GqrNlTcjqYgQcXFLQrsd2hnE+aXir8/8bLJ+aaJXDninA== + +"@tiptap/extension-text-style@^2.0.3", "@tiptap/extension-text-style@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-text-style/-/extension-text-style-2.11.7.tgz#1af9c23ed6154680eac1c0621d1112764c3fafb9" + integrity sha512-LHO6DBg/9SkCQFdWlVfw9nolUmw+Cid94WkTY+7IwrpyG2+ZGQxnKpCJCKyeaFNbDoYAtvu0vuTsSXeCkgShcA== + +"@tiptap/extension-text@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-text/-/extension-text-2.11.7.tgz#bbd32763a5db1e6c0ad4239233a9f86e27d15179" + integrity sha512-wObCn8qZkIFnXTLvBP+X8KgaEvTap/FJ/i4hBMfHBCKPGDx99KiJU6VIbDXG8d5ZcFZE0tOetK1pP5oI7qgMlQ== + +"@tiptap/extension-typography@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/extension-typography/-/extension-typography-2.11.7.tgz#fd28ece04539b044d14763797432f0324b50da79" + integrity sha512-qyYROxuXuMAMw30RXFYjr9mfZv+7avD3BW+fVEIa3lwnUMFNExHj6j2HMgYvrPVByGXlQU/4uHWcB0uiG0Bf1w== + +"@tiptap/pm@^2.0.3", "@tiptap/pm@^2.11.7": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/pm/-/pm-2.11.7.tgz#34e1dbe1f27ea978bc740c9144ae8195948609e3" + integrity sha512-7gEEfz2Q6bYKXM07vzLUD0vqXFhC5geWRA6LCozTiLdVFDdHWiBrvb2rtkL5T7mfLq03zc1QhH7rI3F6VntOEA== + dependencies: + prosemirror-changeset "^2.2.1" + prosemirror-collab "^1.3.1" + prosemirror-commands "^1.6.2" + prosemirror-dropcursor "^1.8.1" + prosemirror-gapcursor "^1.3.2" + prosemirror-history "^1.4.1" + prosemirror-inputrules "^1.4.0" + prosemirror-keymap "^1.2.2" + prosemirror-markdown "^1.13.1" + prosemirror-menu "^1.2.4" + prosemirror-model "^1.23.0" + prosemirror-schema-basic "^1.2.3" + prosemirror-schema-list "^1.4.1" + prosemirror-state "^1.4.3" + prosemirror-tables "^1.6.4" + prosemirror-trailing-node "^3.0.0" + prosemirror-transform "^1.10.2" + prosemirror-view "^1.37.0" + +"@tiptap/starter-kit@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/starter-kit/-/starter-kit-2.11.7.tgz#fd7f2c0021d5cd913afe289eb211b196cf509caf" + integrity sha512-K+q51KwNU/l0kqRuV5e1824yOLVftj6kGplGQLvJG56P7Rb2dPbM/JeaDbxQhnHT/KDGamG0s0Po0M3pPY163A== + dependencies: + "@tiptap/core" "^2.11.7" + "@tiptap/extension-blockquote" "^2.11.7" + "@tiptap/extension-bold" "^2.11.7" + "@tiptap/extension-bullet-list" "^2.11.7" + "@tiptap/extension-code" "^2.11.7" + "@tiptap/extension-code-block" "^2.11.7" + "@tiptap/extension-document" "^2.11.7" + "@tiptap/extension-dropcursor" "^2.11.7" + "@tiptap/extension-gapcursor" "^2.11.7" + "@tiptap/extension-hard-break" "^2.11.7" + "@tiptap/extension-heading" "^2.11.7" + "@tiptap/extension-history" "^2.11.7" + "@tiptap/extension-horizontal-rule" "^2.11.7" + "@tiptap/extension-italic" "^2.11.7" + "@tiptap/extension-list-item" "^2.11.7" + "@tiptap/extension-ordered-list" "^2.11.7" + "@tiptap/extension-paragraph" "^2.11.7" + "@tiptap/extension-strike" "^2.11.7" + "@tiptap/extension-text" "^2.11.7" + "@tiptap/extension-text-style" "^2.11.7" + "@tiptap/pm" "^2.11.7" + +"@tiptap/suggestion@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/suggestion/-/suggestion-2.11.7.tgz#c94201d7d41b7bbef29e50eb1bf6a810c519cf85" + integrity sha512-I1ckVAEErpErPn/H9ZdDmTb5zuPNPiKj3krxCtJDUU4+3we0cgJY9NQFXl9//mrug3UIngH0ZQO+arbZfIk75A== + +"@tiptap/vue-3@^2.0.3": + version "2.11.7" + resolved "https://registry.npmmirror.com/@tiptap/vue-3/-/vue-3-2.11.7.tgz#b5d3293ce5621876a38e1e22b7403f265a4e508a" + integrity sha512-P4Dyi7Uvi+l2ubsVTibZU3XVLT15eWP0W3mPiQwT0IVI0+FjGyBa83TXgMh5Kb53nxABgIK7FiIMBtQPSkjqfg== + dependencies: + "@tiptap/extension-bubble-menu" "^2.11.7" + "@tiptap/extension-floating-menu" "^2.11.7" + +"@tootallnate/once@2": + version "2.0.0" + resolved "https://registry.npmmirror.com/@tootallnate/once/-/once-2.0.0.tgz#f544a148d3ab35801c1f633a7441fd87c2e484bf" + integrity sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A== + +"@types/chai-subset@^1.3.3": + version "1.3.6" + resolved "https://registry.npmmirror.com/@types/chai-subset/-/chai-subset-1.3.6.tgz#fc50f637ebd038ed58700f826d7bab2caa8a8d7e" + integrity sha512-m8lERkkQj+uek18hXOZuec3W/fCRTrU4hrnXjH3qhHy96ytuPaPiWGgu7sJb7tZxZonO75vYAjCvpe/e4VUwRw== + +"@types/chai@^4.3.1": + version "4.3.20" + resolved "https://registry.npmmirror.com/@types/chai/-/chai-4.3.20.tgz#cb291577ed342ca92600430841a00329ba05cecc" + integrity sha512-/pC9HAB5I/xMlc5FP77qjCnI16ChlJfW0tGa0IUcFn38VJrTV6DeZ60NU5KZBtaOZqjdpwTWohz5HU1RrhiYxQ== + +"@types/cookie@^0.4.1": + version "0.4.1" + resolved "https://registry.npmmirror.com/@types/cookie/-/cookie-0.4.1.tgz#bfd02c1f2224567676c1545199f87c3a861d878d" + integrity sha512-XW/Aa8APYr6jSVVA1y/DEIZX0/GMKLEVekNG727R8cs56ahETkRAy/3DR7+fJyh7oUgGwNQaRfXCun0+KbWY7Q== + +"@types/estree@1.0.7", "@types/estree@^1.0.0": + version "1.0.7" + resolved "https://registry.npmmirror.com/@types/estree/-/estree-1.0.7.tgz#4158d3105276773d5b7695cd4834b1722e4f37a8" + integrity sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ== + +"@types/glob@^7.1.1": + version "7.2.0" + resolved "https://registry.npmmirror.com/@types/glob/-/glob-7.2.0.tgz#bc1b5bf3aa92f25bd5dd39f35c57361bdce5b2eb" + integrity sha512-ZUxbzKl0IfJILTS6t7ip5fQQM/J3TJYubDm3nMbgubNNYS62eXeUpoLUC8/7fJNiFYHTrGPQn7hspDUzIHX3UA== + dependencies: + "@types/minimatch" "*" + "@types/node" "*" + +"@types/inquirer@^8.1.3": + version "8.2.10" + resolved "https://registry.npmmirror.com/@types/inquirer/-/inquirer-8.2.10.tgz#9444dce2d764c35bc5bb4d742598aaa4acb6561b" + integrity sha512-IdD5NmHyVjWM8SHWo/kPBgtzXatwPkfwzyP3fN1jF2g9BWt5WO+8hL2F4o2GKIYsU40PpqeevuUWvkS/roXJkA== + dependencies: + "@types/through" "*" + rxjs "^7.2.0" + +"@types/istanbul-lib-coverage@^2.0.1": + version "2.0.6" + resolved "https://registry.npmmirror.com/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.6.tgz#7739c232a1fee9b4d3ce8985f314c0c6d33549d7" + integrity sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w== + +"@types/js-levenshtein@^1.1.0": + version "1.1.3" + resolved "https://registry.npmmirror.com/@types/js-levenshtein/-/js-levenshtein-1.1.3.tgz#a6fd0bdc8255b274e5438e0bfb25f154492d1106" + integrity sha512-jd+Q+sD20Qfu9e2aEXogiO3vpOC1PYJOUdyN9gvs4Qrvkg4wF43L5OhqrPeokdv8TL0/mXoYfpkcoGZMNN2pkQ== + +"@types/linkify-it@^5": + version "5.0.0" + resolved "https://registry.npmmirror.com/@types/linkify-it/-/linkify-it-5.0.0.tgz#21413001973106cda1c3a9b91eedd4ccd5469d76" + integrity sha512-sVDA58zAw4eWAffKOaQH5/5j3XeayukzDk+ewSsnv3p4yJEZHCCzMDiZM8e0OUrRvmpGZ85jf4yDHkHsgBNr9Q== + +"@types/markdown-it@^14.0.0": + version "14.1.2" + resolved "https://registry.npmmirror.com/@types/markdown-it/-/markdown-it-14.1.2.tgz#57f2532a0800067d9b934f3521429a2e8bfb4c61" + integrity sha512-promo4eFwuiW+TfGxhi+0x3czqTYJkG8qB17ZUJiVF10Xm7NLVRSLUsfRTU/6h1e24VvRnXCx+hG7li58lkzog== + dependencies: + "@types/linkify-it" "^5" + "@types/mdurl" "^2" + +"@types/mdurl@^2": + version "2.0.0" + resolved "https://registry.npmmirror.com/@types/mdurl/-/mdurl-2.0.0.tgz#d43878b5b20222682163ae6f897b20447233bdfd" + integrity sha512-RGdgjQUZba5p6QEFAVx2OGb8rQDL/cPRG7GiedRzMcJ1tYnUANBncjbSB1NRGwbvjcPeikRABz2nshyPk1bhWg== + +"@types/minimatch@*": + version "5.1.2" + resolved "https://registry.npmmirror.com/@types/minimatch/-/minimatch-5.1.2.tgz#07508b45797cb81ec3f273011b054cd0755eddca" + integrity sha512-K0VQKziLUWkVKiRVrx4a40iPaxTUefQmjtkQofBkYRcoaaL/8rhwDWww9qWbrgicNOgnpIsMxyNIUM4+n6dUIA== + +"@types/node@*": + version "22.14.0" + resolved "https://registry.npmmirror.com/@types/node/-/node-22.14.0.tgz#d3bfa3936fef0dbacd79ea3eb17d521c628bb47e" + integrity sha512-Kmpl+z84ILoG+3T/zQFyAJsU6EPTmOCj8/2+83fSN6djd6I4o7uOuGIH6vq3PrjY5BGitSbFuMN18j3iknubbA== + dependencies: + undici-types "~6.21.0" + +"@types/parse-json@^4.0.0": + version "4.0.2" + resolved "https://registry.npmmirror.com/@types/parse-json/-/parse-json-4.0.2.tgz#5950e50960793055845e956c427fc2b0d70c5239" + integrity sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw== + +"@types/set-cookie-parser@^2.4.0": + version "2.4.10" + resolved "https://registry.npmmirror.com/@types/set-cookie-parser/-/set-cookie-parser-2.4.10.tgz#ad3a807d6d921db9720621ea3374c5d92020bcbc" + integrity sha512-GGmQVGpQWUe5qglJozEjZV/5dyxbOOZ0LHe/lqyWssB88Y4svNfst0uqBVscdDeIKl5Jy5+aPSvy7mI9tYRguw== + dependencies: + "@types/node" "*" + +"@types/through@*": + version "0.0.33" + resolved "https://registry.npmmirror.com/@types/through/-/through-0.0.33.tgz#14ebf599320e1c7851e7d598149af183c6b9ea56" + integrity sha512-HsJ+z3QuETzP3cswwtzt2vEIiHBk/dCcHGhbmG5X3ecnwFD/lPrMpliGXxSCg03L9AhrdwA4Oz/qfspkDW+xGQ== + dependencies: + "@types/node" "*" + +"@types/web-bluetooth@^0.0.20": + version "0.0.20" + resolved "https://registry.npmmirror.com/@types/web-bluetooth/-/web-bluetooth-0.0.20.tgz#f066abfcd1cbe66267cdbbf0de010d8a41b41597" + integrity sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow== + +"@vitejs/plugin-legacy@^4.1.1": + version "4.1.1" + resolved "https://registry.npmmirror.com/@vitejs/plugin-legacy/-/plugin-legacy-4.1.1.tgz#27aad7ab082d2d554cf48a8e4f1c0ae9f3eca522" + integrity sha512-um3gbVouD2Q/g19C0qpDfHwveXDCAHzs8OC3e9g6aXpKoD1H14himgs7wkMnhAynBJy7QqUoZNAXDuqN8zLR2g== + dependencies: + "@babel/core" "^7.22.9" + "@babel/preset-env" "^7.22.9" + browserslist "^4.21.9" + core-js "^3.31.1" + magic-string "^0.30.1" + regenerator-runtime "^0.13.11" + systemjs "^6.14.1" + +"@vitejs/plugin-vue-jsx@^3.1.0": + version "3.1.0" + resolved "https://registry.npmmirror.com/@vitejs/plugin-vue-jsx/-/plugin-vue-jsx-3.1.0.tgz#9953fd9456539e1f0f253bf0fcd1289e66c67cd1" + integrity sha512-w9M6F3LSEU5kszVb9An2/MmXNxocAnUb3WhRr8bHlimhDrXNt6n6D2nJQR3UXpGlZHh/EsgouOHCsM8V3Ln+WA== + dependencies: + "@babel/core" "^7.23.3" + "@babel/plugin-transform-typescript" "^7.23.3" + "@vue/babel-plugin-jsx" "^1.1.5" + +"@vitejs/plugin-vue@^5.0.3": + version "5.2.3" + resolved "https://registry.npmmirror.com/@vitejs/plugin-vue/-/plugin-vue-5.2.3.tgz#71a8fc82d4d2e425af304c35bf389506f674d89b" + integrity sha512-IYSLEQj4LgZZuoVpdSUCw3dIynTWQgPlaRP6iAvMle4My0HdYwr5g5wQAfwOeHQBmYwEkqF70nRpSilr6PoUDg== + +"@volar/language-core@2.4.12", "@volar/language-core@~2.4.11": + version "2.4.12" + resolved "https://registry.npmmirror.com/@volar/language-core/-/language-core-2.4.12.tgz#98c8424f8d81a9cad1760a587b1c6db27d05f0cc" + integrity sha512-RLrFdXEaQBWfSnYGVxvR2WrO6Bub0unkdHYIdC31HzIEqATIuuhRRzYu76iGPZ6OtA4Au1SnW0ZwIqPP217YhA== + dependencies: + "@volar/source-map" "2.4.12" + +"@volar/source-map@2.4.12": + version "2.4.12" + resolved "https://registry.npmmirror.com/@volar/source-map/-/source-map-2.4.12.tgz#7cc8c6b1b134a2215f06c91ad011d94eef81b0ed" + integrity sha512-bUFIKvn2U0AWojOaqf63ER0N/iHIBYZPpNGogfLPQ68F5Eet6FnLlyho7BS0y2HJ1jFhSif7AcuTx1TqsCzRzw== + +"@volar/typescript@~2.4.11": + version "2.4.12" + resolved "https://registry.npmmirror.com/@volar/typescript/-/typescript-2.4.12.tgz#8c638c23cab89ab131cdcd2d6f2a51768caaa015" + integrity sha512-HJB73OTJDgPc80K30wxi3if4fSsZZAOScbj2fcicMuOPoOkcf9NNAINb33o+DzhBdF9xTKC1gnPmIRDous5S0g== + dependencies: + "@volar/language-core" "2.4.12" + path-browserify "^1.0.1" + vscode-uri "^3.0.8" + +"@vue/babel-helper-vue-transform-on@1.4.0": + version "1.4.0" + resolved "https://registry.npmmirror.com/@vue/babel-helper-vue-transform-on/-/babel-helper-vue-transform-on-1.4.0.tgz#616020488692a9c42a613280d62ed1b727045d95" + integrity sha512-mCokbouEQ/ocRce/FpKCRItGo+013tHg7tixg3DUNS+6bmIchPt66012kBMm476vyEIJPafrvOf4E5OYj3shSw== + +"@vue/babel-plugin-jsx@^1.1.5": + version "1.4.0" + resolved "https://registry.npmmirror.com/@vue/babel-plugin-jsx/-/babel-plugin-jsx-1.4.0.tgz#c155c795ce980edf46aa6feceed93945a95ca658" + integrity sha512-9zAHmwgMWlaN6qRKdrg1uKsBKHvnUU+Py+MOCTuYZBoZsopa90Di10QRjB+YPnVss0BZbG/H5XFwJY1fTxJWhA== + dependencies: + "@babel/helper-module-imports" "^7.25.9" + "@babel/helper-plugin-utils" "^7.26.5" + "@babel/plugin-syntax-jsx" "^7.25.9" + "@babel/template" "^7.26.9" + "@babel/traverse" "^7.26.9" + "@babel/types" "^7.26.9" + "@vue/babel-helper-vue-transform-on" "1.4.0" + "@vue/babel-plugin-resolve-type" "1.4.0" + "@vue/shared" "^3.5.13" + +"@vue/babel-plugin-resolve-type@1.4.0": + version "1.4.0" + resolved "https://registry.npmmirror.com/@vue/babel-plugin-resolve-type/-/babel-plugin-resolve-type-1.4.0.tgz#4d357a81fb0cc9cad0e8c81b118115bda2c51543" + integrity sha512-4xqDRRbQQEWHQyjlYSgZsWj44KfiF6D+ktCuXyZ8EnVDYV3pztmXJDf1HveAjUAXxAnR8daCQT51RneWWxtTyQ== + dependencies: + "@babel/code-frame" "^7.26.2" + "@babel/helper-module-imports" "^7.25.9" + "@babel/helper-plugin-utils" "^7.26.5" + "@babel/parser" "^7.26.9" + "@vue/compiler-sfc" "^3.5.13" + +"@vue/compiler-core@3.5.13": + version "3.5.13" + resolved "https://registry.npmmirror.com/@vue/compiler-core/-/compiler-core-3.5.13.tgz#b0ae6c4347f60c03e849a05d34e5bf747c9bda05" + integrity sha512-oOdAkwqUfW1WqpwSYJce06wvt6HljgY3fGeM9NcVA1HaYOij3mZG9Rkysn0OHuyUAGMbEbARIpsG+LPVlBJ5/Q== + dependencies: + "@babel/parser" "^7.25.3" + "@vue/shared" "3.5.13" + entities "^4.5.0" + estree-walker "^2.0.2" + source-map-js "^1.2.0" + +"@vue/compiler-dom@3.5.13", "@vue/compiler-dom@^3.5.0": + version "3.5.13" + resolved "https://registry.npmmirror.com/@vue/compiler-dom/-/compiler-dom-3.5.13.tgz#bb1b8758dbc542b3658dda973b98a1c9311a8a58" + integrity sha512-ZOJ46sMOKUjO3e94wPdCzQ6P1Lx/vhp2RSvfaab88Ajexs0AHeV0uasYhi99WPaogmBlRHNRuly8xV75cNTMDA== + dependencies: + "@vue/compiler-core" "3.5.13" + "@vue/shared" "3.5.13" + +"@vue/compiler-sfc@3.5.13", "@vue/compiler-sfc@^3.1.0", "@vue/compiler-sfc@^3.5.13": + version "3.5.13" + resolved "https://registry.npmmirror.com/@vue/compiler-sfc/-/compiler-sfc-3.5.13.tgz#461f8bd343b5c06fac4189c4fef8af32dea82b46" + integrity sha512-6VdaljMpD82w6c2749Zhf5T9u5uLBWKnVue6XWxprDobftnletJ8+oel7sexFfM3qIxNmVE7LSFGTpv6obNyaQ== + dependencies: + "@babel/parser" "^7.25.3" + "@vue/compiler-core" "3.5.13" + "@vue/compiler-dom" "3.5.13" + "@vue/compiler-ssr" "3.5.13" + "@vue/shared" "3.5.13" + estree-walker "^2.0.2" + magic-string "^0.30.11" + postcss "^8.4.48" + source-map-js "^1.2.0" + +"@vue/compiler-ssr@3.5.13": + version "3.5.13" + resolved "https://registry.npmmirror.com/@vue/compiler-ssr/-/compiler-ssr-3.5.13.tgz#e771adcca6d3d000f91a4277c972a996d07f43ba" + integrity sha512-wMH6vrYHxQl/IybKJagqbquvxpWCuVYpoUJfCqFZwa/JY1GdATAQ+TgVtgrwwMZ0D07QhA99rs/EAAWfvG6KpA== + dependencies: + "@vue/compiler-dom" "3.5.13" + "@vue/shared" "3.5.13" + +"@vue/compiler-vue2@^2.7.16": + version "2.7.16" + resolved "https://registry.npmmirror.com/@vue/compiler-vue2/-/compiler-vue2-2.7.16.tgz#2ba837cbd3f1b33c2bc865fbe1a3b53fb611e249" + integrity sha512-qYC3Psj9S/mfu9uVi5WvNZIzq+xnXMhOwbTFKKDD7b1lhpnn71jXSFdTQ+WsIEk0ONCd7VV2IMm7ONl6tbQ86A== + dependencies: + de-indent "^1.0.2" + he "^1.2.0" + +"@vue/devtools-api@^6.6.4": + version "6.6.4" + resolved "https://registry.npmmirror.com/@vue/devtools-api/-/devtools-api-6.6.4.tgz#cbe97fe0162b365edc1dba80e173f90492535343" + integrity sha512-sGhTPMuXqZ1rVOk32RylztWkfXTRhuS7vgAKv0zjqk8gbsHkJ7xfFf+jbySxt7tWObEJwyKaHMikV/WGDiQm8g== + +"@vue/eslint-config-prettier@^6.0.0": + version "6.0.0" + resolved "https://registry.npmmirror.com/@vue/eslint-config-prettier/-/eslint-config-prettier-6.0.0.tgz#ad5912b308f4ae468458e02a2b05db0b9d246700" + integrity sha512-wFQmv45c3ige5EA+ngijq40YpVcIkAy0Lihupnsnd1Dao5CBbPyfCzqtejFLZX1EwH/kCJdpz3t6s+5wd3+KxQ== + dependencies: + eslint-config-prettier "^6.0.0" + +"@vue/language-core@2.2.8": + version "2.2.8" + resolved "https://registry.npmmirror.com/@vue/language-core/-/language-core-2.2.8.tgz#05befa390399fbd4409bc703ee0520b8ac1b7088" + integrity sha512-rrzB0wPGBvcwaSNRriVWdNAbHQWSf0NlGqgKHK5mEkXpefjUlVRP62u03KvwZpvKVjRnBIQ/Lwre+Mx9N6juUQ== + dependencies: + "@volar/language-core" "~2.4.11" + "@vue/compiler-dom" "^3.5.0" + "@vue/compiler-vue2" "^2.7.16" + "@vue/shared" "^3.5.0" + alien-signals "^1.0.3" + minimatch "^9.0.3" + muggle-string "^0.4.1" + path-browserify "^1.0.1" + +"@vue/reactivity@3.5.13": + version "3.5.13" + resolved "https://registry.npmmirror.com/@vue/reactivity/-/reactivity-3.5.13.tgz#b41ff2bb865e093899a22219f5b25f97b6fe155f" + integrity sha512-NaCwtw8o48B9I6L1zl2p41OHo/2Z4wqYGGIK1Khu5T7yxrn+ATOixn/Udn2m+6kZKB/J7cuT9DbWWhRxqixACg== + dependencies: + "@vue/shared" "3.5.13" + +"@vue/runtime-core@3.5.13": + version "3.5.13" + resolved "https://registry.npmmirror.com/@vue/runtime-core/-/runtime-core-3.5.13.tgz#1fafa4bf0b97af0ebdd9dbfe98cd630da363a455" + integrity sha512-Fj4YRQ3Az0WTZw1sFe+QDb0aXCerigEpw418pw1HBUKFtnQHWzwojaukAs2X/c9DQz4MQ4bsXTGlcpGxU/RCIw== + dependencies: + "@vue/reactivity" "3.5.13" + "@vue/shared" "3.5.13" + +"@vue/runtime-dom@3.5.13": + version "3.5.13" + resolved "https://registry.npmmirror.com/@vue/runtime-dom/-/runtime-dom-3.5.13.tgz#610fc795de9246300e8ae8865930d534e1246215" + integrity sha512-dLaj94s93NYLqjLiyFzVs9X6dWhTdAlEAciC3Moq7gzAc13VJUdCnjjRurNM6uTLFATRHexHCTu/Xp3eW6yoog== + dependencies: + "@vue/reactivity" "3.5.13" + "@vue/runtime-core" "3.5.13" + "@vue/shared" "3.5.13" + csstype "^3.1.3" + +"@vue/server-renderer@3.5.13": + version "3.5.13" + resolved "https://registry.npmmirror.com/@vue/server-renderer/-/server-renderer-3.5.13.tgz#429ead62ee51de789646c22efe908e489aad46f7" + integrity sha512-wAi4IRJV/2SAW3htkTlB+dHeRmpTiVIK1OGLWV1yeStVSebSQQOwGwIq0D3ZIoBj2C2qpgz5+vX9iEBkTdk5YA== + dependencies: + "@vue/compiler-ssr" "3.5.13" + "@vue/shared" "3.5.13" + +"@vue/shared@3.5.13", "@vue/shared@^3.5.0", "@vue/shared@^3.5.13": + version "3.5.13" + resolved "https://registry.npmmirror.com/@vue/shared/-/shared-3.5.13.tgz#87b309a6379c22b926e696893237826f64339b6f" + integrity sha512-/hnE/qP5ZoGpol0a5mDi45bOd7t3tjYJBjsgCsivow7D48cJeV5l05RD82lPqi7gRiphZM37rnhW1l6ZoCNNnQ== + +"@vue/test-utils@^2.0.0-rc.19": + version "2.4.6" + resolved "https://registry.npmmirror.com/@vue/test-utils/-/test-utils-2.4.6.tgz#7d534e70c4319d2a587d6a3b45a39e9695ade03c" + integrity sha512-FMxEjOpYNYiFe0GkaHsnJPXFHxQ6m4t8vI/ElPGpMWxZKpmRvQ33OIrvRXemy6yha03RxhOlQuy+gZMC3CQSow== + dependencies: + js-beautify "^1.14.9" + vue-component-type-helpers "^2.0.0" + +"@vueuse/components@^10.7.0": + version "10.11.1" + resolved "https://registry.npmmirror.com/@vueuse/components/-/components-10.11.1.tgz#f6ff60769221d94da8c300417765fb0fecf0602a" + integrity sha512-ThcreQCX/eq61sLkLKjigD4PQvs3Wy4zglICvQH9tP6xl87y5KsQEoizn6OI+R3hrOgwQHLJe7Y0wLLh3fBKcg== + dependencies: + "@vueuse/core" "10.11.1" + "@vueuse/shared" "10.11.1" + vue-demi ">=0.14.8" + +"@vueuse/core@10.11.1", "@vueuse/core@^10.11.0", "@vueuse/core@^10.3.0", "@vueuse/core@^10.4.1": + version "10.11.1" + resolved "https://registry.npmmirror.com/@vueuse/core/-/core-10.11.1.tgz#15d2c0b6448d2212235b23a7ba29c27173e0c2c6" + integrity sha512-guoy26JQktXPcz+0n3GukWIy/JDNKti9v6VEMu6kV2sYBsWuGiTU8OWdg+ADfUbHg3/3DlqySDe7JmdHrktiww== + dependencies: + "@types/web-bluetooth" "^0.0.20" + "@vueuse/metadata" "10.11.1" + "@vueuse/shared" "10.11.1" + vue-demi ">=0.14.8" + +"@vueuse/metadata@10.11.1": + version "10.11.1" + resolved "https://registry.npmmirror.com/@vueuse/metadata/-/metadata-10.11.1.tgz#209db7bb5915aa172a87510b6de2ca01cadbd2a7" + integrity sha512-IGa5FXd003Ug1qAZmyE8wF3sJ81xGLSqTqtQ6jaVfkeZ4i5kS2mwQF61yhVqojRnenVew5PldLyRgvdl4YYuSw== + +"@vueuse/shared@10.11.1", "@vueuse/shared@^10.11.0": + version "10.11.1" + resolved "https://registry.npmmirror.com/@vueuse/shared/-/shared-10.11.1.tgz#62b84e3118ae6e1f3ff38f4fbe71b0c5d0f10938" + integrity sha512-LHpC8711VFZlDaYUXEBbFBCQ7GS3dVU9mjOhhMhXP6txTV4EhYQg/KGnQuvt/sPAtoUKq7VVUnL6mVtFoL42sA== + dependencies: + vue-demi ">=0.14.8" + +"@xmldom/xmldom@^0.7.2": + version "0.7.13" + resolved "https://registry.npmmirror.com/@xmldom/xmldom/-/xmldom-0.7.13.tgz#ff34942667a4e19a9f4a0996a76814daac364cf3" + integrity sha512-lm2GW5PkosIzccsaZIz7tp8cPADSIlIHWDFTR1N0SzfinhhYgeIQjFMz4rYzanCScr3DqQLeomUDArp6MWKm+g== + +abab@^2.0.5, abab@^2.0.6: + version "2.0.6" + resolved "https://registry.npmmirror.com/abab/-/abab-2.0.6.tgz#41b80f2c871d19686216b82309231cfd3cb3d291" + integrity sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA== + +abbrev@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/abbrev/-/abbrev-2.0.0.tgz#cf59829b8b4f03f89dda2771cb7f3653828c89bf" + integrity sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ== + +acorn-globals@^6.0.0: + version "6.0.0" + resolved "https://registry.npmmirror.com/acorn-globals/-/acorn-globals-6.0.0.tgz#46cdd39f0f8ff08a876619b55f5ac8a6dc770b45" + integrity sha512-ZQl7LOWaF5ePqqcX4hLuv/bLXYQNfNWw2c0/yX/TsPRKamzHcTGQnlCjHT3TsmkOUVEPS3crCxiPfdzE/Trlhg== + dependencies: + acorn "^7.1.1" + acorn-walk "^7.1.1" + +acorn-jsx@^5.2.0: + version "5.3.2" + resolved "https://registry.npmmirror.com/acorn-jsx/-/acorn-jsx-5.3.2.tgz#7ed5bb55908b3b2f1bc55c6af1653bada7f07937" + integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== + +acorn-node@^1.8.2: + version "1.8.2" + resolved "https://registry.npmmirror.com/acorn-node/-/acorn-node-1.8.2.tgz#114c95d64539e53dede23de8b9d96df7c7ae2af8" + integrity sha512-8mt+fslDufLYntIoPAaIMUe/lrbrehIiwmR3t2k9LljIzoigEPF27eLk2hy8zSGzmR/ogr7zbRKINMo1u0yh5A== + dependencies: + acorn "^7.0.0" + acorn-walk "^7.0.0" + xtend "^4.0.2" + +acorn-walk@^7.0.0, acorn-walk@^7.1.1: + version "7.2.0" + resolved "https://registry.npmmirror.com/acorn-walk/-/acorn-walk-7.2.0.tgz#0de889a601203909b0fbe07b8938dc21d2e967bc" + integrity sha512-OPdCF6GsMIP+Az+aWfAAOEt2/+iVDKE7oy6lJ098aoe59oAmK76qV6Gw60SbZ8jHuG2wH058GF4pLFbYamYrVA== + +acorn@^7.0.0, acorn@^7.1.1: + version "7.4.1" + resolved "https://registry.npmmirror.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa" + integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== + +acorn@^8.14.0, acorn@^8.5.0, acorn@^8.8.1: + version "8.14.1" + resolved "https://registry.npmmirror.com/acorn/-/acorn-8.14.1.tgz#721d5dc10f7d5b5609a891773d47731796935dfb" + integrity sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg== + +agent-base@6: + version "6.0.2" + resolved "https://registry.npmmirror.com/agent-base/-/agent-base-6.0.2.tgz#49fff58577cfee3f37176feab4c22e00f86d7f77" + integrity sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ== + dependencies: + debug "4" + +aggregate-error@^3.0.0: + version "3.1.0" + resolved "https://registry.npmmirror.com/aggregate-error/-/aggregate-error-3.1.0.tgz#92670ff50f5359bdb7a3e0d40d0ec30c5737687a" + integrity sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA== + dependencies: + clean-stack "^2.0.0" + indent-string "^4.0.0" + +ajv@^6.10.0, ajv@^6.10.2: + version "6.12.6" + resolved "https://registry.npmmirror.com/ajv/-/ajv-6.12.6.tgz#baf5a62e802b07d977034586f8c3baf5adf26df4" + integrity sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g== + dependencies: + fast-deep-equal "^3.1.1" + fast-json-stable-stringify "^2.0.0" + json-schema-traverse "^0.4.1" + uri-js "^4.2.2" + +alien-signals@^1.0.3: + version "1.0.13" + resolved "https://registry.npmmirror.com/alien-signals/-/alien-signals-1.0.13.tgz#8d6db73462f742ee6b89671fbd8c37d0b1727a7e" + integrity sha512-OGj9yyTnJEttvzhTUWuscOvtqxq5vrhF7vL9oS0xJ2mK0ItPYP1/y+vCFebfxoEyAz0++1AIwJ5CMr+Fk3nDmg== + +ansi-escapes@^3.0.0: + version "3.2.0" + resolved "https://registry.npmmirror.com/ansi-escapes/-/ansi-escapes-3.2.0.tgz#8780b98ff9dbf5638152d1f1fe5c1d7b4442976b" + integrity sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ== + +ansi-escapes@^4.2.1: + version "4.3.2" + resolved "https://registry.npmmirror.com/ansi-escapes/-/ansi-escapes-4.3.2.tgz#6b2291d1db7d98b6521d5f1efa42d0f3a9feb65e" + integrity sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ== + dependencies: + type-fest "^0.21.3" + +ansi-regex@^2.0.0: + version "2.1.1" + resolved "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-2.1.1.tgz#c3b33ab5ee360d86e0e628f0468ae7ef27d654df" + integrity sha512-TIGnTpdo+E3+pCyAluZvtED5p5wCqLdezCyhPZzKPcxvFplEt4i+W7OONCKgeZFT3+y5NZZfOOS/Bdcanm1MYA== + +ansi-regex@^3.0.0: + version "3.0.1" + resolved "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-3.0.1.tgz#123d6479e92ad45ad897d4054e3c7ca7db4944e1" + integrity sha512-+O9Jct8wf++lXxxFc4hc8LsjaSq0HFzzL7cVsw8pRDIPdjKD2mT4ytDZlLuSBZ4cLKZFXIrMGO7DbQCtMJJMKw== + +ansi-regex@^4.1.0: + version "4.1.1" + resolved "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-4.1.1.tgz#164daac87ab2d6f6db3a29875e2d1766582dabed" + integrity sha512-ILlv4k/3f6vfQ4OoP2AGvirOktlQ98ZEL1k9FaQjxa3L1abBgbuTDAdPOpvbGncC0BTVQrl+OM8xZGK6tWXt7g== + +ansi-regex@^5.0.1: + version "5.0.1" + resolved "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304" + integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== + +ansi-regex@^6.0.1: + version "6.1.0" + resolved "https://registry.npmmirror.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654" + integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA== + +ansi-styles@^2.2.1: + version "2.2.1" + resolved "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-2.2.1.tgz#b432dd3358b634cf75e1e4664368240533c1ddbe" + integrity sha512-kmCevFghRiWM7HB5zTPULl4r9bVFSWjz62MhqizDGUrq2NWuNMQyuv4tHHoKJHs69M/MF64lEcHdYIocrdWQYA== + +ansi-styles@^3.2.0, ansi-styles@^3.2.1: + version "3.2.1" + resolved "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" + integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== + dependencies: + color-convert "^1.9.0" + +ansi-styles@^4.0.0, ansi-styles@^4.1.0: + version "4.3.0" + resolved "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" + integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== + dependencies: + color-convert "^2.0.1" + +ansi-styles@^6.1.0: + version "6.2.1" + resolved "https://registry.npmmirror.com/ansi-styles/-/ansi-styles-6.2.1.tgz#0e62320cf99c21afff3b3012192546aacbfb05c5" + integrity sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug== + +any-observable@^0.3.0: + version "0.3.0" + resolved "https://registry.npmmirror.com/any-observable/-/any-observable-0.3.0.tgz#af933475e5806a67d0d7df090dd5e8bef65d119b" + integrity sha512-/FQM1EDkTsf63Ub2C6O7GuYFDsSXUwsaZDurV0np41ocwq0jthUAYCmhBX9f+KwlaCgIuWyr/4WlUQUBfKfZog== + +any-promise@^1.0.0: + version "1.3.0" + resolved "https://registry.npmmirror.com/any-promise/-/any-promise-1.3.0.tgz#abc6afeedcea52e809cdc0376aed3ce39635d17f" + integrity sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A== + +anymatch@~3.1.2: + version "3.1.3" + resolved "https://registry.npmmirror.com/anymatch/-/anymatch-3.1.3.tgz#790c58b19ba1720a84205b57c618d5ad8524973e" + integrity sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw== + dependencies: + normalize-path "^3.0.0" + picomatch "^2.0.4" + +arg@^5.0.1, arg@^5.0.2: + version "5.0.2" + resolved "https://registry.npmmirror.com/arg/-/arg-5.0.2.tgz#c81433cc427c92c4dcf4865142dbca6f15acd59c" + integrity sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg== + +argparse@^1.0.7: + version "1.0.10" + resolved "https://registry.npmmirror.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911" + integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg== + dependencies: + sprintf-js "~1.0.2" + +argparse@^2.0.1: + version "2.0.1" + resolved "https://registry.npmmirror.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38" + integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q== + +aria-hidden@^1.2.4: + version "1.2.4" + resolved "https://registry.npmmirror.com/aria-hidden/-/aria-hidden-1.2.4.tgz#b78e383fdbc04d05762c78b4a25a501e736c4522" + integrity sha512-y+CcFFwelSXpLZk/7fMB2mUbGtX9lKycf1MWJ7CaTIERyitVlyQx6C+sxcROU2BAJ24OiZyK+8wj2i8AlBoS3A== + dependencies: + tslib "^2.0.0" + +array-union@^1.0.1: + version "1.0.2" + resolved "https://registry.npmmirror.com/array-union/-/array-union-1.0.2.tgz#9a34410e4f4e3da23dea375be5be70f24778ec39" + integrity sha512-Dxr6QJj/RdU/hCaBjOfxW+q6lyuVE6JFWIrAUpuOOhoJJoQ99cUn3igRaHVB5P9WrgFVN0FfArM3x0cueOU8ng== + dependencies: + array-uniq "^1.0.1" + +array-union@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" + integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== + +array-uniq@^1.0.1: + version "1.0.3" + resolved "https://registry.npmmirror.com/array-uniq/-/array-uniq-1.0.3.tgz#af6ac877a25cc7f74e058894753858dfdb24fdb6" + integrity sha512-MNha4BWQ6JbwhFhj03YK552f7cb3AzoE8SzeljgChvL1dl3IcvggXVz1DilzySZkCja+CXuZbdW7yATchWn8/Q== + +assertion-error@^1.1.0: + version "1.1.0" + resolved "https://registry.npmmirror.com/assertion-error/-/assertion-error-1.1.0.tgz#e60b6b0e8f301bd97e5375215bda406c85118c0b" + integrity sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw== + +astral-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/astral-regex/-/astral-regex-1.0.0.tgz#6c8c3fb827dd43ee3918f27b82782ab7658a6fd9" + integrity sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg== + +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.npmmirror.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" + integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q== + +autoprefixer@^10.4.2: + version "10.4.21" + resolved "https://registry.npmmirror.com/autoprefixer/-/autoprefixer-10.4.21.tgz#77189468e7a8ad1d9a37fbc08efc9f480cf0a95d" + integrity sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ== + dependencies: + browserslist "^4.24.4" + caniuse-lite "^1.0.30001702" + fraction.js "^4.3.7" + normalize-range "^0.1.2" + picocolors "^1.1.1" + postcss-value-parser "^4.2.0" + +autoprefixer@^9: + version "9.8.8" + resolved "https://registry.npmmirror.com/autoprefixer/-/autoprefixer-9.8.8.tgz#fd4bd4595385fa6f06599de749a4d5f7a474957a" + integrity sha512-eM9d/swFopRt5gdJ7jrpCwgvEMIayITpojhkkSMRsFHYuH5bkSQ4p/9qTEHtmNudUZh22Tehu7I6CxAW0IXTKA== + dependencies: + browserslist "^4.12.0" + caniuse-lite "^1.0.30001109" + normalize-range "^0.1.2" + num2fraction "^1.2.2" + picocolors "^0.2.1" + postcss "^7.0.32" + postcss-value-parser "^4.1.0" + +babel-eslint@^10.0.3: + version "10.1.0" + resolved "https://registry.npmmirror.com/babel-eslint/-/babel-eslint-10.1.0.tgz#6968e568a910b78fb3779cdd8b6ac2f479943232" + integrity sha512-ifWaTHQ0ce+448CYop8AdrQiBsGrnC+bMgfyKFdi6EsPLTAWG+QfyDeM6OH+FmWnKvEq5NnBMLvlBUPKQZoDSg== + dependencies: + "@babel/code-frame" "^7.0.0" + "@babel/parser" "^7.7.0" + "@babel/traverse" "^7.7.0" + "@babel/types" "^7.7.0" + eslint-visitor-keys "^1.0.0" + resolve "^1.12.0" + +babel-plugin-polyfill-corejs2@^0.4.10: + version "0.4.13" + resolved "https://registry.npmmirror.com/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.13.tgz#7d445f0e0607ebc8fb6b01d7e8fb02069b91dd8b" + integrity sha512-3sX/eOms8kd3q2KZ6DAhKPc0dgm525Gqq5NtWKZ7QYYZEv57OQ54KtblzJzH1lQF/eQxO8KjWGIK9IPUJNus5g== + dependencies: + "@babel/compat-data" "^7.22.6" + "@babel/helper-define-polyfill-provider" "^0.6.4" + semver "^6.3.1" + +babel-plugin-polyfill-corejs3@^0.11.0: + version "0.11.1" + resolved "https://registry.npmmirror.com/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.11.1.tgz#4e4e182f1bb37c7ba62e2af81d8dd09df31344f6" + integrity sha512-yGCqvBT4rwMczo28xkH/noxJ6MZ4nJfkVYdoDaC/utLtWrXxv27HVrzAeSbqR8SxDsp46n0YF47EbHoixy6rXQ== + dependencies: + "@babel/helper-define-polyfill-provider" "^0.6.3" + core-js-compat "^3.40.0" + +babel-plugin-polyfill-regenerator@^0.6.1: + version "0.6.4" + resolved "https://registry.npmmirror.com/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.4.tgz#428c615d3c177292a22b4f93ed99e358d7906a9b" + integrity sha512-7gD3pRadPrbjhjLyxebmx/WrFYcuSjZ0XbdUujQMZ/fcE9oeewk2U/7PCvez84UeuK3oSjmPZ0Ch0dlupQvGzw== + dependencies: + "@babel/helper-define-polyfill-provider" "^0.6.4" + +balanced-match@^1.0.0: + version "1.0.2" + resolved "https://registry.npmmirror.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee" + integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== + +base64-js@^1.3.1: + version "1.5.1" + resolved "https://registry.npmmirror.com/base64-js/-/base64-js-1.5.1.tgz#1b1b440160a5bf7ad40b650f095963481903930a" + integrity sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA== + +binary-extensions@^2.0.0: + version "2.3.0" + resolved "https://registry.npmmirror.com/binary-extensions/-/binary-extensions-2.3.0.tgz#f6e14a97858d327252200242d4ccfe522c445522" + integrity sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw== + +bl@^4.1.0: + version "4.1.0" + resolved "https://registry.npmmirror.com/bl/-/bl-4.1.0.tgz#451535264182bec2fbbc83a62ab98cf11d9f7b3a" + integrity sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w== + dependencies: + buffer "^5.5.0" + inherits "^2.0.4" + readable-stream "^3.4.0" + +brace-expansion@^1.1.7: + version "1.1.11" + resolved "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" + integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== + dependencies: + balanced-match "^1.0.0" + concat-map "0.0.1" + +brace-expansion@^2.0.1: + version "2.0.1" + resolved "https://registry.npmmirror.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae" + integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA== + dependencies: + balanced-match "^1.0.0" + +braces@^3.0.3, braces@~3.0.2: + version "3.0.3" + resolved "https://registry.npmmirror.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" + integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== + dependencies: + fill-range "^7.1.1" + +browser-process-hrtime@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/browser-process-hrtime/-/browser-process-hrtime-1.0.0.tgz#3c9b4b7d782c8121e56f10106d84c0d0ffc94626" + integrity sha512-9o5UecI3GhkpM6DrXr69PblIuWxPKk9Y0jHBRhdocZ2y7YECBFCsHm79Pr3OyR2AvjhDkabFJaDJMYRazHgsow== + +browserslist@^4.12.0, browserslist@^4.21.9, browserslist@^4.24.0, browserslist@^4.24.4: + version "4.24.4" + resolved "https://registry.npmmirror.com/browserslist/-/browserslist-4.24.4.tgz#c6b2865a3f08bcb860a0e827389003b9fe686e4b" + integrity sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A== + dependencies: + caniuse-lite "^1.0.30001688" + electron-to-chromium "^1.5.73" + node-releases "^2.0.19" + update-browserslist-db "^1.1.1" + +buffer@^5.5.0: + version "5.7.1" + resolved "https://registry.npmmirror.com/buffer/-/buffer-5.7.1.tgz#ba62e7c13133053582197160851a8f648e99eed0" + integrity sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ== + dependencies: + base64-js "^1.3.1" + ieee754 "^1.1.13" + +bytes@^3.0.0: + version "3.1.2" + resolved "https://registry.npmmirror.com/bytes/-/bytes-3.1.2.tgz#8b0beeb98605adf1b128fa4386403c009e0221a5" + integrity sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg== + +c8@^7.11.0: + version "7.14.0" + resolved "https://registry.npmmirror.com/c8/-/c8-7.14.0.tgz#f368184c73b125a80565e9ab2396ff0be4d732f3" + integrity sha512-i04rtkkcNcCf7zsQcSv/T9EbUn4RXQ6mropeMcjFOsQXQ0iGLAr/xT6TImQg4+U9hmNpN9XdvPkjUL1IzbgxJw== + dependencies: + "@bcoe/v8-coverage" "^0.2.3" + "@istanbuljs/schema" "^0.1.3" + find-up "^5.0.0" + foreground-child "^2.0.0" + istanbul-lib-coverage "^3.2.0" + istanbul-lib-report "^3.0.0" + istanbul-reports "^3.1.4" + rimraf "^3.0.2" + test-exclude "^6.0.0" + v8-to-istanbul "^9.0.0" + yargs "^16.2.0" + yargs-parser "^20.2.9" + +call-bind-apply-helpers@^1.0.1, call-bind-apply-helpers@^1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz#4b5428c222be985d79c3d82657479dbe0b59b2d6" + integrity sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ== + dependencies: + es-errors "^1.3.0" + function-bind "^1.1.2" + +caller-callsite@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/caller-callsite/-/caller-callsite-2.0.0.tgz#847e0fce0a223750a9a027c54b33731ad3154134" + integrity sha512-JuG3qI4QOftFsZyOn1qq87fq5grLIyk1JYd5lJmdA+fG7aQ9pA/i3JIJGcO3q0MrRcHlOt1U+ZeHW8Dq9axALQ== + dependencies: + callsites "^2.0.0" + +caller-path@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/caller-path/-/caller-path-2.0.0.tgz#468f83044e369ab2010fac5f06ceee15bb2cb1f4" + integrity sha512-MCL3sf6nCSXOwCTzvPKhN18TU7AHTvdtam8DAogxcrJ8Rjfbbg7Lgng64H9Iy+vUV6VGFClN/TyxBkAebLRR4A== + dependencies: + caller-callsite "^2.0.0" + +callsites@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/callsites/-/callsites-2.0.0.tgz#06eb84f00eea413da86affefacbffb36093b3c50" + integrity sha512-ksWePWBloaWPxJYQ8TL0JHvtci6G5QTKwQ95RcWAa/lzoAKuAOflGdAK92hpHXjkwb8zLxoLNUoNYZgVsaJzvQ== + +callsites@^3.0.0: + version "3.1.0" + resolved "https://registry.npmmirror.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" + integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== + +camelcase-css@^2.0.1: + version "2.0.1" + resolved "https://registry.npmmirror.com/camelcase-css/-/camelcase-css-2.0.1.tgz#ee978f6947914cc30c6b44741b6ed1df7f043fd5" + integrity sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA== + +camelcase@^5.0.0: + version "5.3.1" + resolved "https://registry.npmmirror.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" + integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== + +caniuse-lite@^1.0.30001109, caniuse-lite@^1.0.30001688, caniuse-lite@^1.0.30001702: + version "1.0.30001712" + resolved "https://registry.npmmirror.com/caniuse-lite/-/caniuse-lite-1.0.30001712.tgz#41ee150f12de11b5f57c5889d4f30deb451deedf" + integrity sha512-MBqPpGYYdQ7/hfKiet9SCI+nmN5/hp4ZzveOJubl5DTAMa5oggjAuoi0Z4onBpKPFI2ePGnQuQIzF3VxDjDJig== + +chai@^4.3.6: + version "4.5.0" + resolved "https://registry.npmmirror.com/chai/-/chai-4.5.0.tgz#707e49923afdd9b13a8b0b47d33d732d13812fd8" + integrity sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw== + dependencies: + assertion-error "^1.1.0" + check-error "^1.0.3" + deep-eql "^4.1.3" + get-func-name "^2.0.2" + loupe "^2.3.6" + pathval "^1.1.1" + type-detect "^4.1.0" + +chalk@4.1.1: + version "4.1.1" + resolved "https://registry.npmmirror.com/chalk/-/chalk-4.1.1.tgz#c80b3fab28bf6371e6863325eee67e618b77e6ad" + integrity sha512-diHzdDKxcU+bAsUboHLPEDQiw0qEe0qd7SYUn3HgcFlWgbDcfLGswOHYeGrHKzG9z6UYf01d9VFMfZxPM1xZSg== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +chalk@^1.0.0, chalk@^1.1.3: + version "1.1.3" + resolved "https://registry.npmmirror.com/chalk/-/chalk-1.1.3.tgz#a8115c55e4a702fe4d150abd3872822a7e09fc98" + integrity sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A== + dependencies: + ansi-styles "^2.2.1" + escape-string-regexp "^1.0.2" + has-ansi "^2.0.0" + strip-ansi "^3.0.0" + supports-color "^2.0.0" + +chalk@^2.1.0, chalk@^2.4.1, chalk@^2.4.2: + version "2.4.2" + resolved "https://registry.npmmirror.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" + integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== + dependencies: + ansi-styles "^3.2.1" + escape-string-regexp "^1.0.5" + supports-color "^5.3.0" + +chalk@^4.1.0, chalk@^4.1.1, chalk@^4.1.2: + version "4.1.2" + resolved "https://registry.npmmirror.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" + integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== + dependencies: + ansi-styles "^4.1.0" + supports-color "^7.1.0" + +chardet@^0.7.0: + version "0.7.0" + resolved "https://registry.npmmirror.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" + integrity sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA== + +check-error@^1.0.3: + version "1.0.3" + resolved "https://registry.npmmirror.com/check-error/-/check-error-1.0.3.tgz#a6502e4312a7ee969f646e83bb3ddd56281bd694" + integrity sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg== + dependencies: + get-func-name "^2.0.2" + +chokidar@^3.4.2, chokidar@^3.5.2, chokidar@^3.5.3, chokidar@^3.6.0: + version "3.6.0" + resolved "https://registry.npmmirror.com/chokidar/-/chokidar-3.6.0.tgz#197c6cc669ef2a8dc5e7b4d97ee4e092c3eb0d5b" + integrity sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw== + dependencies: + anymatch "~3.1.2" + braces "~3.0.2" + glob-parent "~5.1.2" + is-binary-path "~2.1.0" + is-glob "~4.0.1" + normalize-path "~3.0.0" + readdirp "~3.6.0" + optionalDependencies: + fsevents "~2.3.2" + +ci-info@^1.5.0: + version "1.6.0" + resolved "https://registry.npmmirror.com/ci-info/-/ci-info-1.6.0.tgz#2ca20dbb9ceb32d4524a683303313f0304b1e497" + integrity sha512-vsGdkwSCDpWmP80ncATX7iea5DWQemg1UgCW5J8tqjU3lYw4FBYuj89J0CTVomA7BEfvSZd84GmHko+MxFQU2A== + +classnames@^2.2.5: + version "2.5.1" + resolved "https://registry.npmmirror.com/classnames/-/classnames-2.5.1.tgz#ba774c614be0f016da105c858e7159eae8e7687b" + integrity sha512-saHYOzhIQs6wy2sVxTM6bUDsQO4F50V9RQ22qBpEdCW+I+/Wmke2HOl6lS6dTpdxVhb88/I6+Hs+438c3lfUow== + +clean-stack@^2.0.0: + version "2.2.0" + resolved "https://registry.npmmirror.com/clean-stack/-/clean-stack-2.2.0.tgz#ee8472dbb129e727b31e8a10a427dee9dfe4008b" + integrity sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A== + +cli-cursor@^2.0.0, cli-cursor@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/cli-cursor/-/cli-cursor-2.1.0.tgz#b35dac376479facc3e94747d41d0d0f5238ffcb5" + integrity sha512-8lgKz8LmCRYZZQDpRyT2m5rKJ08TnU4tR9FFFW2rxpxR1FzWi4PQ/NfyODchAatHaUgnSPVcx/R5w6NuTBzFiw== + dependencies: + restore-cursor "^2.0.0" + +cli-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.npmmirror.com/cli-cursor/-/cli-cursor-3.1.0.tgz#264305a7ae490d1d03bf0c9ba7c925d1753af307" + integrity sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw== + dependencies: + restore-cursor "^3.1.0" + +cli-spinners@^2.5.0: + version "2.9.2" + resolved "https://registry.npmmirror.com/cli-spinners/-/cli-spinners-2.9.2.tgz#1773a8f4b9c4d6ac31563df53b3fc1d79462fe41" + integrity sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg== + +cli-truncate@^0.2.1: + version "0.2.1" + resolved "https://registry.npmmirror.com/cli-truncate/-/cli-truncate-0.2.1.tgz#9f15cfbb0705005369216c626ac7d05ab90dd574" + integrity sha512-f4r4yJnbT++qUPI9NR4XLDLq41gQ+uqnPItWG0F5ZkehuNiTTa3EY0S4AqTSUOeJ7/zU41oWPQSNkW5BqPL9bg== + dependencies: + slice-ansi "0.0.4" + string-width "^1.0.1" + +cli-width@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/cli-width/-/cli-width-3.0.0.tgz#a2f48437a2caa9a22436e794bf071ec9e61cedf6" + integrity sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw== + +cliui@^6.0.0: + version "6.0.0" + resolved "https://registry.npmmirror.com/cliui/-/cliui-6.0.0.tgz#511d702c0c4e41ca156d7d0e96021f23e13225b1" + integrity sha512-t6wbgtoCXvAzst7QgXxJYqPt0usEfbgQdftEPbLL/cvv6HPE5VgvqCuAIDR0NgU52ds6rFwqrgakNLrHEjCbrQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.0" + wrap-ansi "^6.2.0" + +cliui@^7.0.2: + version "7.0.4" + resolved "https://registry.npmmirror.com/cliui/-/cliui-7.0.4.tgz#a0265ee655476fc807aea9df3df8df7783808b4f" + integrity sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.0" + wrap-ansi "^7.0.0" + +cliui@^8.0.1: + version "8.0.1" + resolved "https://registry.npmmirror.com/cliui/-/cliui-8.0.1.tgz#0c04b075db02cbfe60dc8e6cf2f5486b1a3608aa" + integrity sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ== + dependencies: + string-width "^4.2.0" + strip-ansi "^6.0.1" + wrap-ansi "^7.0.0" + +clone@^1.0.2: + version "1.0.4" + resolved "https://registry.npmmirror.com/clone/-/clone-1.0.4.tgz#da309cc263df15994c688ca902179ca3c7cd7c7e" + integrity sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg== + +code-point-at@^1.0.0: + version "1.1.0" + resolved "https://registry.npmmirror.com/code-point-at/-/code-point-at-1.1.0.tgz#0d070b4d043a5bea33a2f1a40e2edb3d9a4ccf77" + integrity sha512-RpAVKQA5T63xEj6/giIbUEtZwJ4UFIc3ZtvEkiaUERylqe8xb5IvqcgOurZLahv93CLKfxcw5YI+DZcUBRyLXA== + +codemirror@^6.0.1: + version "6.0.1" + resolved "https://registry.npmmirror.com/codemirror/-/codemirror-6.0.1.tgz#62b91142d45904547ee3e0e0e4c1a79158035a29" + integrity sha512-J8j+nZ+CdWmIeFIGXEFbFPtpiYacFMDR8GlHK3IyHQJMCaVRfGx9NT+Hxivv1ckLWPvNdZqndbr/7lVhrf/Svg== + dependencies: + "@codemirror/autocomplete" "^6.0.0" + "@codemirror/commands" "^6.0.0" + "@codemirror/language" "^6.0.0" + "@codemirror/lint" "^6.0.0" + "@codemirror/search" "^6.0.0" + "@codemirror/state" "^6.0.0" + "@codemirror/view" "^6.0.0" + +color-convert@^1.9.0: + version "1.9.3" + resolved "https://registry.npmmirror.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" + integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== + dependencies: + color-name "1.1.3" + +color-convert@^2.0.1: + version "2.0.1" + resolved "https://registry.npmmirror.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" + integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== + dependencies: + color-name "~1.1.4" + +color-name@1.1.3: + version "1.1.3" + resolved "https://registry.npmmirror.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" + integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw== + +color-name@^1.0.0, color-name@~1.1.4: + version "1.1.4" + resolved "https://registry.npmmirror.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" + integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== + +color-string@^1.9.0: + version "1.9.1" + resolved "https://registry.npmmirror.com/color-string/-/color-string-1.9.1.tgz#4467f9146f036f855b764dfb5bf8582bf342c7a4" + integrity sha512-shrVawQFojnZv6xM40anx4CkoDP+fZsw/ZerEMsW/pyzsRbElpsL/DBVW7q3ExxwusdNXI3lXpuhEZkzs8p5Eg== + dependencies: + color-name "^1.0.0" + simple-swizzle "^0.2.2" + +color@^4.0.1: + version "4.2.3" + resolved "https://registry.npmmirror.com/color/-/color-4.2.3.tgz#d781ecb5e57224ee43ea9627560107c0e0c6463a" + integrity sha512-1rXeuUUiGGrykh+CeBdu5Ie7OJwinCgQY0bc7GCRxy5xVHy+moaqkpL/jqQq0MtQOeYcrqEz4abc5f0KtU7W4A== + dependencies: + color-convert "^2.0.1" + color-string "^1.9.0" + +combined-stream@^1.0.8: + version "1.0.8" + resolved "https://registry.npmmirror.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" + integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== + dependencies: + delayed-stream "~1.0.0" + +commander@^10.0.0: + version "10.0.1" + resolved "https://registry.npmmirror.com/commander/-/commander-10.0.1.tgz#881ee46b4f77d1c1dccc5823433aa39b022cbe06" + integrity sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug== + +commander@^2.19.0, commander@^2.20.0: + version "2.20.3" + resolved "https://registry.npmmirror.com/commander/-/commander-2.20.3.tgz#fd485e84c03eb4881c20722ba48035e8531aeb33" + integrity sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ== + +commander@^4.0.0: + version "4.1.1" + resolved "https://registry.npmmirror.com/commander/-/commander-4.1.1.tgz#9fd602bd936294e9e9ef46a3f4d6964044b18068" + integrity sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA== + +commander@^8.0.0: + version "8.3.0" + resolved "https://registry.npmmirror.com/commander/-/commander-8.3.0.tgz#4837ea1b2da67b9c616a67afbb0fafee567bca66" + integrity sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww== + +commander@^9.0.0: + version "9.5.0" + resolved "https://registry.npmmirror.com/commander/-/commander-9.5.0.tgz#bc08d1eb5cedf7ccb797a96199d41c7bc3e60d30" + integrity sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ== + +concat-map@0.0.1: + version "0.0.1" + resolved "https://registry.npmmirror.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" + integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== + +confbox@^0.1.8: + version "0.1.8" + resolved "https://registry.npmmirror.com/confbox/-/confbox-0.1.8.tgz#820d73d3b3c82d9bd910652c5d4d599ef8ff8b06" + integrity sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w== + +confbox@^0.2.1: + version "0.2.2" + resolved "https://registry.npmmirror.com/confbox/-/confbox-0.2.2.tgz#8652f53961c74d9e081784beed78555974a9c110" + integrity sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ== + +config-chain@^1.1.13: + version "1.1.13" + resolved "https://registry.npmmirror.com/config-chain/-/config-chain-1.1.13.tgz#fad0795aa6a6cdaff9ed1b68e9dff94372c232f4" + integrity sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ== + dependencies: + ini "^1.3.4" + proto-list "~1.2.1" + +connect-history-api-fallback@^1.6.0: + version "1.6.0" + resolved "https://registry.npmmirror.com/connect-history-api-fallback/-/connect-history-api-fallback-1.6.0.tgz#8b32089359308d111115d81cad3fceab888f97bc" + integrity sha512-e54B99q/OUoH64zYYRf3HBP5z24G38h5D3qXu23JGRoigpX5Ss4r9ZnDk3g0Z8uQC2x2lPaJ+UlWBc1ZWBWdLg== + +convert-source-map@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/convert-source-map/-/convert-source-map-2.0.0.tgz#4b560f649fc4e918dd0ab75cf4961e8bc882d82a" + integrity sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg== + +cookie@^0.4.1: + version "0.4.2" + resolved "https://registry.npmmirror.com/cookie/-/cookie-0.4.2.tgz#0e41f24de5ecf317947c82fc789e06a884824432" + integrity sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA== + +core-js-compat@^3.40.0: + version "3.41.0" + resolved "https://registry.npmmirror.com/core-js-compat/-/core-js-compat-3.41.0.tgz#4cdfce95f39a8f27759b667cf693d96e5dda3d17" + integrity sha512-RFsU9LySVue9RTwdDVX/T0e2Y6jRYWXERKElIjpuEOEnxaXffI0X7RUwVzfYLfzuLXSNJDYoRYUAmRUcyln20A== + dependencies: + browserslist "^4.24.4" + +core-js@^3.1.3, core-js@^3.31.1, core-js@^3.6.4: + version "3.41.0" + resolved "https://registry.npmmirror.com/core-js/-/core-js-3.41.0.tgz#57714dafb8c751a6095d028a7428f1fb5834a776" + integrity sha512-SJ4/EHwS36QMJd6h/Rg+GyR4A5xE0FSI3eZ+iBVpfqf1x0eTSg1smWLHrA+2jQThZSh97fmSgFSU8B61nxosxA== + +cosmiconfig@^5.2.1: + version "5.2.1" + resolved "https://registry.npmmirror.com/cosmiconfig/-/cosmiconfig-5.2.1.tgz#040f726809c591e77a17c0a3626ca45b4f168b1a" + integrity sha512-H65gsXo1SKjf8zmrJ67eJk8aIRKV5ff2D4uKZIBZShbhGSpEmsQOPW/SKMKYhSTrqR7ufy6RP69rPogdaPh/kA== + dependencies: + import-fresh "^2.0.0" + is-directory "^0.3.1" + js-yaml "^3.13.1" + parse-json "^4.0.0" + +cosmiconfig@^7.0.1: + version "7.1.0" + resolved "https://registry.npmmirror.com/cosmiconfig/-/cosmiconfig-7.1.0.tgz#1443b9afa596b670082ea46cbd8f6a62b84635f6" + integrity sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA== + dependencies: + "@types/parse-json" "^4.0.0" + import-fresh "^3.2.1" + parse-json "^5.0.0" + path-type "^4.0.0" + yaml "^1.10.0" + +crelt@^1.0.0, crelt@^1.0.5: + version "1.0.6" + resolved "https://registry.npmmirror.com/crelt/-/crelt-1.0.6.tgz#7cc898ea74e190fb6ef9dae57f8f81cf7302df72" + integrity sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g== + +cross-spawn@^5.0.1: + version "5.1.0" + resolved "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-5.1.0.tgz#e8bd0efee58fcff6f8f94510a0a554bbfa235449" + integrity sha512-pTgQJ5KC0d2hcY8eyL1IzlBPYjTkyH72XRZPnLyKus2mBfNjQs3klqbJU2VILqZryAZUt9JOb3h/mWMy23/f5A== + dependencies: + lru-cache "^4.0.1" + shebang-command "^1.2.0" + which "^1.2.9" + +cross-spawn@^6.0.5: + version "6.0.6" + resolved "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-6.0.6.tgz#30d0efa0712ddb7eb5a76e1e8721bffafa6b5d57" + integrity sha512-VqCUuhcd1iB+dsv8gxPttb5iZh/D0iubSP21g36KXdEuf6I5JiioesUVjpCdHV9MZRUfVFlvwtIUyPfxo5trtw== + dependencies: + nice-try "^1.0.4" + path-key "^2.0.1" + semver "^5.5.0" + shebang-command "^1.2.0" + which "^1.2.9" + +cross-spawn@^7.0.0, cross-spawn@^7.0.3, cross-spawn@^7.0.6: + version "7.0.6" + resolved "https://registry.npmmirror.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== + dependencies: + path-key "^3.1.0" + shebang-command "^2.0.0" + which "^2.0.1" + +css-color-names@^0.0.4: + version "0.0.4" + resolved "https://registry.npmmirror.com/css-color-names/-/css-color-names-0.0.4.tgz#808adc2e79cf84738069b646cb20ec27beb629e0" + integrity sha512-zj5D7X1U2h2zsXOAM8EyUREBnnts6H+Jm+d1M2DbiQQcUtnqgQsMrdo8JW9R80YFUmIdBZeMu5wvYM7hcgWP/Q== + +css-unit-converter@^1.1.1: + version "1.1.2" + resolved "https://registry.npmmirror.com/css-unit-converter/-/css-unit-converter-1.1.2.tgz#4c77f5a1954e6dbff60695ecb214e3270436ab21" + integrity sha512-IiJwMC8rdZE0+xiEZHeru6YoONC4rfPMqGm2W85jMIbkFvv5nFTwJVFHam2eFrN6txmoUYFAFXiv8ICVeTO0MA== + +cssesc@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/cssesc/-/cssesc-3.0.0.tgz#37741919903b868565e1c09ea747445cd18983ee" + integrity sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg== + +cssom@^0.5.0: + version "0.5.0" + resolved "https://registry.npmmirror.com/cssom/-/cssom-0.5.0.tgz#d254fa92cd8b6fbd83811b9fbaed34663cc17c36" + integrity sha512-iKuQcq+NdHqlAcwUY0o/HL69XQrUaQdMjmStJ8JFmUaiiQErlhrmuigkg/CU4E2J0IyUKUrMAgl36TvN67MqTw== + +cssom@~0.3.6: + version "0.3.8" + resolved "https://registry.npmmirror.com/cssom/-/cssom-0.3.8.tgz#9f1276f5b2b463f2114d3f2c75250af8c1a36f4a" + integrity sha512-b0tGHbfegbhPJpxpiBPU2sCkigAqtM9O121le6bbOlgyV+NyGyCmVfJ6QW9eRjz8CpNfWEOYBIMIGRYkLwsIYg== + +cssstyle@^2.3.0: + version "2.3.0" + resolved "https://registry.npmmirror.com/cssstyle/-/cssstyle-2.3.0.tgz#ff665a0ddbdc31864b09647f34163443d90b0852" + integrity sha512-AZL67abkUzIuvcHqk7c09cezpGNcxUxU4Ioi/05xHk4DQeTkWmGYftIE6ctU6AEt+Gn4n1lDStOtj7FKycP71A== + dependencies: + cssom "~0.3.6" + +csstype@^3.1.3: + version "3.1.3" + resolved "https://registry.npmmirror.com/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81" + integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw== + +data-uri-to-buffer@^4.0.0: + version "4.0.1" + resolved "https://registry.npmmirror.com/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz#d8feb2b2881e6a4f58c2e08acfd0e2834e26222e" + integrity sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A== + +data-urls@^3.0.1: + version "3.0.2" + resolved "https://registry.npmmirror.com/data-urls/-/data-urls-3.0.2.tgz#9cf24a477ae22bcef5cd5f6f0bfbc1d2d3be9143" + integrity sha512-Jy/tj3ldjZJo63sVAvg6LHt2mHvl4V6AgRAmNDtLdm7faqtsx+aJG42rsyCo9JCoRVKwPFzKlIPx3DIibwSIaQ== + dependencies: + abab "^2.0.6" + whatwg-mimetype "^3.0.0" + whatwg-url "^11.0.0" + +date-fns@^1.27.2: + version "1.30.1" + resolved "https://registry.npmmirror.com/date-fns/-/date-fns-1.30.1.tgz#2e71bf0b119153dbb4cc4e88d9ea5acfb50dc05c" + integrity sha512-hBSVCvSmWC+QypYObzwGOd9wqdDpOt+0wl0KbU+R+uuZBS1jN8VsD1ss3irQDknRj5NvxiTF6oj/nDRnN/UQNw== + +dayjs@^1.10.7, dayjs@^1.11.13: + version "1.11.13" + resolved "https://registry.npmmirror.com/dayjs/-/dayjs-1.11.13.tgz#92430b0139055c3ebb60150aa13e860a4b5a366c" + integrity sha512-oaMBel6gjolK862uaPQOVTA7q3TZhuSvuMQAAglQDOWYO9A91IrAOUJEyKVlqJlHE0vq5p5UXxzdPfMH/x6xNg== + +de-indent@^1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/de-indent/-/de-indent-1.0.2.tgz#b2038e846dc33baa5796128d0804b455b8c1e21d" + integrity sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg== + +debug@4, debug@^4.0.1, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.4, debug@^4.4.0: + version "4.4.0" + resolved "https://registry.npmmirror.com/debug/-/debug-4.4.0.tgz#2b3f2aea2ffeb776477460267377dc8710faba8a" + integrity sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA== + dependencies: + ms "^2.1.3" + +debug@~4.3.1, debug@~4.3.2: + version "4.3.7" + resolved "https://registry.npmmirror.com/debug/-/debug-4.3.7.tgz#87945b4151a011d76d95a198d7111c865c360a52" + integrity sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ== + dependencies: + ms "^2.1.3" + +decamelize@^1.2.0: + version "1.2.0" + resolved "https://registry.npmmirror.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290" + integrity sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA== + +decimal.js@^10.3.1: + version "10.5.0" + resolved "https://registry.npmmirror.com/decimal.js/-/decimal.js-10.5.0.tgz#0f371c7cf6c4898ce0afb09836db73cd82010f22" + integrity sha512-8vDa8Qxvr/+d94hSh5P3IJwI5t8/c0KsMp+g8bNw9cY2icONa5aPfvKeieW1WlG0WQYwwhJ7mjui2xtiePQSXw== + +dedent@^0.7.0: + version "0.7.0" + resolved "https://registry.npmmirror.com/dedent/-/dedent-0.7.0.tgz#2495ddbaf6eb874abb0e1be9df22d2e5a544326c" + integrity sha512-Q6fKUPqnAHAyhiUgFU7BUzLiv0kd8saH9al7tnu5Q/okj6dnupxyTgFIBjVzJATdfIAm9NAsvXNzjaKa+bxVyA== + +deep-eql@^4.1.3: + version "4.1.4" + resolved "https://registry.npmmirror.com/deep-eql/-/deep-eql-4.1.4.tgz#d0d3912865911bb8fac5afb4e3acfa6a28dc72b7" + integrity sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg== + dependencies: + type-detect "^4.0.0" + +deep-is@~0.1.3: + version "0.1.4" + resolved "https://registry.npmmirror.com/deep-is/-/deep-is-0.1.4.tgz#a6f2dce612fadd2ef1f519b73551f17e85199831" + integrity sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ== + +defaults@^1.0.3: + version "1.0.4" + resolved "https://registry.npmmirror.com/defaults/-/defaults-1.0.4.tgz#b0b02062c1e2aa62ff5d9528f0f98baa90978d7a" + integrity sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A== + dependencies: + clone "^1.0.2" + +defined@^1.0.0: + version "1.0.1" + resolved "https://registry.npmmirror.com/defined/-/defined-1.0.1.tgz#c0b9db27bfaffd95d6f61399419b893df0f91ebf" + integrity sha512-hsBd2qSVCRE+5PmNdHt1uzyrFu5d3RwmFDKzyNZMFq/EwDNJF7Ee5+D5oEKF0hU6LhtoUF1macFvOe4AskQC1Q== + +defu@^6.1.4: + version "6.1.4" + resolved "https://registry.npmmirror.com/defu/-/defu-6.1.4.tgz#4e0c9cf9ff68fe5f3d7f2765cc1a012dfdcb0479" + integrity sha512-mEQCMmwJu317oSz8CwdIOdwf3xMif1ttiM8LTufzc3g6kR+9Pe236twL8j3IYT1F7GfRgGcW6MWxzZjLIkuHIg== + +del@^5.0.0: + version "5.1.0" + resolved "https://registry.npmmirror.com/del/-/del-5.1.0.tgz#d9487c94e367410e6eff2925ee58c0c84a75b3a7" + integrity sha512-wH9xOVHnczo9jN2IW68BabcecVPxacIA3g/7z6vhSU/4stOKQzeCRK0yD0A24WiAAUJmmVpWqrERcTxnLo3AnA== + dependencies: + globby "^10.0.1" + graceful-fs "^4.2.2" + is-glob "^4.0.1" + is-path-cwd "^2.2.0" + is-path-inside "^3.0.1" + p-map "^3.0.0" + rimraf "^3.0.0" + slash "^3.0.0" + +delayed-stream@~1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" + integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== + +detective@^5.2.0: + version "5.2.1" + resolved "https://registry.npmmirror.com/detective/-/detective-5.2.1.tgz#6af01eeda11015acb0e73f933242b70f24f91034" + integrity sha512-v9XE1zRnz1wRtgurGu0Bs8uHKFSTdteYZNbIPFVhUZ39L/S79ppMpdmVOZAnoz1jfEFodc48n6MX483Xo3t1yw== + dependencies: + acorn-node "^1.8.2" + defined "^1.0.0" + minimist "^1.2.6" + +didyoumean@^1.2.2: + version "1.2.2" + resolved "https://registry.npmmirror.com/didyoumean/-/didyoumean-1.2.2.tgz#989346ffe9e839b4555ecf5666edea0d3e8ad037" + integrity sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw== + +dijkstrajs@^1.0.1: + version "1.0.3" + resolved "https://registry.npmmirror.com/dijkstrajs/-/dijkstrajs-1.0.3.tgz#4c8dbdea1f0f6478bff94d9c49c784d623e4fc23" + integrity sha512-qiSlmBq9+BCdCA/L46dw8Uy93mloxsPSbwnm5yrKn2vMPiy8KyAskTF6zuV/j5BMsmOGZDPs7KjU+mjb670kfA== + +dir-glob@^3.0.1: + version "3.0.1" + resolved "https://registry.npmmirror.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" + integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== + dependencies: + path-type "^4.0.0" + +discontinuous-range@1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/discontinuous-range/-/discontinuous-range-1.0.0.tgz#e38331f0844bba49b9a9cb71c771585aab1bc65a" + integrity sha512-c68LpLbO+7kP/b1Hr1qs8/BJ09F5khZGTxqxZuhzxpmwJKOgRFHJWIb9/KmqnqHhLdO55aOxFH/EGBvUQbL/RQ== + +dlv@^1.1.3: + version "1.1.3" + resolved "https://registry.npmmirror.com/dlv/-/dlv-1.1.3.tgz#5c198a8a11453596e751494d49874bc7732f2e79" + integrity sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA== + +doctrine@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/doctrine/-/doctrine-3.0.0.tgz#addebead72a6574db783639dc87a121773973961" + integrity sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w== + dependencies: + esutils "^2.0.2" + +domexception@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/domexception/-/domexception-4.0.0.tgz#4ad1be56ccadc86fc76d033353999a8037d03673" + integrity sha512-A2is4PLG+eeSfoTMA95/s4pvAoSo2mKtiM5jlHkAVewmiO8ISFTFKZjH7UAM1Atli/OT/7JHOrJRJiMKUZKYBw== + dependencies: + webidl-conversions "^7.0.0" + +dotenv@^16.3.1: + version "16.4.7" + resolved "https://registry.npmmirror.com/dotenv/-/dotenv-16.4.7.tgz#0e20c5b82950140aa99be360a8a5f52335f53c26" + integrity sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ== + +dunder-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/dunder-proto/-/dunder-proto-1.0.1.tgz#d7ae667e1dc83482f8b70fd0f6eefc50da30f58a" + integrity sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A== + dependencies: + call-bind-apply-helpers "^1.0.1" + es-errors "^1.3.0" + gopd "^1.2.0" + +eastasianwidth@^0.2.0: + version "0.2.0" + resolved "https://registry.npmmirror.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb" + integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA== + +echarts@^5.4.3: + version "5.6.0" + resolved "https://registry.npmmirror.com/echarts/-/echarts-5.6.0.tgz#2377874dca9fb50f104051c3553544752da3c9d6" + integrity sha512-oTbVTsXfKuEhxftHqL5xprgLoc0k7uScAwtryCgWF6hPYFLRwOUHiFmHGCBKP5NPFNkDVopOieyUqYGH8Fa3kA== + dependencies: + tslib "2.3.0" + zrender "5.6.1" + +editorconfig@^1.0.4: + version "1.0.4" + resolved "https://registry.npmmirror.com/editorconfig/-/editorconfig-1.0.4.tgz#040c9a8e9a6c5288388b87c2db07028aa89f53a3" + integrity sha512-L9Qe08KWTlqYMVvMcTIvMAdl1cDUubzRNYL+WfA4bLDMHe4nemKkpmYzkznE1FwLKu0EEmy6obgQKzMJrg4x9Q== + dependencies: + "@one-ini/wasm" "0.1.1" + commander "^10.0.0" + minimatch "9.0.1" + semver "^7.5.3" + +electron-to-chromium@^1.5.73: + version "1.5.134" + resolved "https://registry.npmmirror.com/electron-to-chromium/-/electron-to-chromium-1.5.134.tgz#d90008c4f8a506c1a6d1b329f922d83e18904101" + integrity sha512-zSwzrLg3jNP3bwsLqWHmS5z2nIOQ5ngMnfMZOWWtXnqqQkPVyOipxK98w+1beLw1TB+EImPNcG8wVP/cLVs2Og== + +elegant-spinner@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/elegant-spinner/-/elegant-spinner-1.0.1.tgz#db043521c95d7e303fd8f345bedc3349cfb0729e" + integrity sha512-B+ZM+RXvRqQaAmkMlO/oSe5nMUOaUnyfGYCEHoR8wrXsZR2mA0XVibsxV1bvTwxdRWah1PkQqso2EzhILGHtEQ== + +emoji-regex@^7.0.1: + version "7.0.3" + resolved "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156" + integrity sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA== + +emoji-regex@^8.0.0: + version "8.0.0" + resolved "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" + integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== + +emoji-regex@^9.2.2: + version "9.2.2" + resolved "https://registry.npmmirror.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72" + integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg== + +end-of-stream@^1.1.0: + version "1.4.4" + resolved "https://registry.npmmirror.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" + integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== + dependencies: + once "^1.4.0" + +engine.io-client@~6.6.1: + version "6.6.3" + resolved "https://registry.npmmirror.com/engine.io-client/-/engine.io-client-6.6.3.tgz#815393fa24f30b8e6afa8f77ccca2f28146be6de" + integrity sha512-T0iLjnyNWahNyv/lcjS2y4oE358tVS/SYQNxYXGAJ9/GLgH4VCvOQ/mhTjqU88mLZCQgiG8RIegFHYCdVC+j5w== + dependencies: + "@socket.io/component-emitter" "~3.1.0" + debug "~4.3.1" + engine.io-parser "~5.2.1" + ws "~8.17.1" + xmlhttprequest-ssl "~2.1.1" + +engine.io-parser@~5.2.1: + version "5.2.3" + resolved "https://registry.npmmirror.com/engine.io-parser/-/engine.io-parser-5.2.3.tgz#00dc5b97b1f233a23c9398d0209504cf5f94d92f" + integrity sha512-HqD3yTBfnBxIrbnM1DoD6Pcq8NECnh8d4As1Qgh0z5Gg3jRRIqijury0CL3ghu/edArpUYiYqQiDUQBIs4np3Q== + +entities@^4.4.0, entities@^4.5.0: + version "4.5.0" + resolved "https://registry.npmmirror.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48" + integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw== + +entities@~2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/entities/-/entities-2.1.0.tgz#992d3129cf7df6870b96c57858c249a120f8b8b5" + integrity sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w== + +error-ex@^1.3.1: + version "1.3.2" + resolved "https://registry.npmmirror.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" + integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== + dependencies: + is-arrayish "^0.2.1" + +es-define-property@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/es-define-property/-/es-define-property-1.0.1.tgz#983eb2f9a6724e9303f61addf011c72e09e0b0fa" + integrity sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g== + +es-errors@^1.3.0: + version "1.3.0" + resolved "https://registry.npmmirror.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f" + integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw== + +es-object-atoms@^1.0.0, es-object-atoms@^1.1.1: + version "1.1.1" + resolved "https://registry.npmmirror.com/es-object-atoms/-/es-object-atoms-1.1.1.tgz#1c4f2c4837327597ce69d2ca190a7fdd172338c1" + integrity sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA== + dependencies: + es-errors "^1.3.0" + +es-set-tostringtag@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz#f31dbbe0c183b00a6d26eb6325c810c0fd18bd4d" + integrity sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA== + dependencies: + es-errors "^1.3.0" + get-intrinsic "^1.2.6" + has-tostringtag "^1.0.2" + hasown "^2.0.2" + +esbuild-android-64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-android-64/-/esbuild-android-64-0.14.54.tgz#505f41832884313bbaffb27704b8bcaa2d8616be" + integrity sha512-Tz2++Aqqz0rJ7kYBfz+iqyE3QMycD4vk7LBRyWaAVFgFtQ/O8EJOnVmTOiDWYZ/uYzB4kvP+bqejYdVKzE5lAQ== + +esbuild-android-arm64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-android-arm64/-/esbuild-android-arm64-0.14.54.tgz#8ce69d7caba49646e009968fe5754a21a9871771" + integrity sha512-F9E+/QDi9sSkLaClO8SOV6etqPd+5DgJje1F9lOWoNncDdOBL2YF59IhsWATSt0TLZbYCf3pNlTHvVV5VfHdvg== + +esbuild-darwin-64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-darwin-64/-/esbuild-darwin-64-0.14.54.tgz#24ba67b9a8cb890a3c08d9018f887cc221cdda25" + integrity sha512-jtdKWV3nBviOd5v4hOpkVmpxsBy90CGzebpbO9beiqUYVMBtSc0AL9zGftFuBon7PNDcdvNCEuQqw2x0wP9yug== + +esbuild-darwin-arm64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-darwin-arm64/-/esbuild-darwin-arm64-0.14.54.tgz#3f7cdb78888ee05e488d250a2bdaab1fa671bf73" + integrity sha512-OPafJHD2oUPyvJMrsCvDGkRrVCar5aVyHfWGQzY1dWnzErjrDuSETxwA2HSsyg2jORLY8yBfzc1MIpUkXlctmw== + +esbuild-freebsd-64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-freebsd-64/-/esbuild-freebsd-64-0.14.54.tgz#09250f997a56ed4650f3e1979c905ffc40bbe94d" + integrity sha512-OKwd4gmwHqOTp4mOGZKe/XUlbDJ4Q9TjX0hMPIDBUWWu/kwhBAudJdBoxnjNf9ocIB6GN6CPowYpR/hRCbSYAg== + +esbuild-freebsd-arm64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-freebsd-arm64/-/esbuild-freebsd-arm64-0.14.54.tgz#bafb46ed04fc5f97cbdb016d86947a79579f8e48" + integrity sha512-sFwueGr7OvIFiQT6WeG0jRLjkjdqWWSrfbVwZp8iMP+8UHEHRBvlaxL6IuKNDwAozNUmbb8nIMXa7oAOARGs1Q== + +esbuild-linux-32@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-linux-32/-/esbuild-linux-32-0.14.54.tgz#e2a8c4a8efdc355405325033fcebeb941f781fe5" + integrity sha512-1ZuY+JDI//WmklKlBgJnglpUL1owm2OX+8E1syCD6UAxcMM/XoWd76OHSjl/0MR0LisSAXDqgjT3uJqT67O3qw== + +esbuild-linux-64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-linux-64/-/esbuild-linux-64-0.14.54.tgz#de5fdba1c95666cf72369f52b40b03be71226652" + integrity sha512-EgjAgH5HwTbtNsTqQOXWApBaPVdDn7XcK+/PtJwZLT1UmpLoznPd8c5CxqsH2dQK3j05YsB3L17T8vE7cp4cCg== + +esbuild-linux-arm64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-linux-arm64/-/esbuild-linux-arm64-0.14.54.tgz#dae4cd42ae9787468b6a5c158da4c84e83b0ce8b" + integrity sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig== + +esbuild-linux-arm@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-linux-arm/-/esbuild-linux-arm-0.14.54.tgz#a2c1dff6d0f21dbe8fc6998a122675533ddfcd59" + integrity sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw== + +esbuild-linux-mips64le@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-linux-mips64le/-/esbuild-linux-mips64le-0.14.54.tgz#d9918e9e4cb972f8d6dae8e8655bf9ee131eda34" + integrity sha512-qTHGQB8D1etd0u1+sB6p0ikLKRVuCWhYQhAHRPkO+OF3I/iSlTKNNS0Lh2Oc0g0UFGguaFZZiPJdJey3AGpAlw== + +esbuild-linux-ppc64le@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-linux-ppc64le/-/esbuild-linux-ppc64le-0.14.54.tgz#3f9a0f6d41073fb1a640680845c7de52995f137e" + integrity sha512-j3OMlzHiqwZBDPRCDFKcx595XVfOfOnv68Ax3U4UKZ3MTYQB5Yz3X1mn5GnodEVYzhtZgxEBidLWeIs8FDSfrQ== + +esbuild-linux-riscv64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-linux-riscv64/-/esbuild-linux-riscv64-0.14.54.tgz#618853c028178a61837bc799d2013d4695e451c8" + integrity sha512-y7Vt7Wl9dkOGZjxQZnDAqqn+XOqFD7IMWiewY5SPlNlzMX39ocPQlOaoxvT4FllA5viyV26/QzHtvTjVNOxHZg== + +esbuild-linux-s390x@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-linux-s390x/-/esbuild-linux-s390x-0.14.54.tgz#d1885c4c5a76bbb5a0fe182e2c8c60eb9e29f2a6" + integrity sha512-zaHpW9dziAsi7lRcyV4r8dhfG1qBidQWUXweUjnw+lliChJqQr+6XD71K41oEIC3Mx1KStovEmlzm+MkGZHnHA== + +esbuild-netbsd-64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-netbsd-64/-/esbuild-netbsd-64-0.14.54.tgz#69ae917a2ff241b7df1dbf22baf04bd330349e81" + integrity sha512-PR01lmIMnfJTgeU9VJTDY9ZerDWVFIUzAtJuDHwwceppW7cQWjBBqP48NdeRtoP04/AtO9a7w3viI+PIDr6d+w== + +esbuild-openbsd-64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-openbsd-64/-/esbuild-openbsd-64-0.14.54.tgz#db4c8495287a350a6790de22edea247a57c5d47b" + integrity sha512-Qyk7ikT2o7Wu76UsvvDS5q0amJvmRzDyVlL0qf5VLsLchjCa1+IAvd8kTBgUxD7VBUUVgItLkk609ZHUc1oCaw== + +esbuild-sunos-64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-sunos-64/-/esbuild-sunos-64-0.14.54.tgz#54287ee3da73d3844b721c21bc80c1dc7e1bf7da" + integrity sha512-28GZ24KmMSeKi5ueWzMcco6EBHStL3B6ubM7M51RmPwXQGLe0teBGJocmWhgwccA1GeFXqxzILIxXpHbl9Q/Kw== + +esbuild-windows-32@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-windows-32/-/esbuild-windows-32-0.14.54.tgz#f8aaf9a5667630b40f0fb3aa37bf01bbd340ce31" + integrity sha512-T+rdZW19ql9MjS7pixmZYVObd9G7kcaZo+sETqNH4RCkuuYSuv9AGHUVnPoP9hhuE1WM1ZimHz1CIBHBboLU7w== + +esbuild-windows-64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-windows-64/-/esbuild-windows-64-0.14.54.tgz#bf54b51bd3e9b0f1886ffdb224a4176031ea0af4" + integrity sha512-AoHTRBUuYwXtZhjXZbA1pGfTo8cJo3vZIcWGLiUcTNgHpJJMC1rVA44ZereBHMJtotyN71S8Qw0npiCIkW96cQ== + +esbuild-windows-arm64@0.14.54: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild-windows-arm64/-/esbuild-windows-arm64-0.14.54.tgz#937d15675a15e4b0e4fafdbaa3a01a776a2be982" + integrity sha512-M0kuUvXhot1zOISQGXwWn6YtS+Y/1RT9WrVIOywZnJHo3jCDyewAc79aKNQWFCQm+xNHVTq9h8dZKvygoXQQRg== + +esbuild@^0.14.27: + version "0.14.54" + resolved "https://registry.npmmirror.com/esbuild/-/esbuild-0.14.54.tgz#8b44dcf2b0f1a66fc22459943dccf477535e9aa2" + integrity sha512-Cy9llcy8DvET5uznocPyqL3BFRrFXSVqbgpMJ9Wz8oVjZlh/zUSNbPRbov0VX7VxN2JH1Oa0uNxZ7eLRb62pJA== + optionalDependencies: + "@esbuild/linux-loong64" "0.14.54" + esbuild-android-64 "0.14.54" + esbuild-android-arm64 "0.14.54" + esbuild-darwin-64 "0.14.54" + esbuild-darwin-arm64 "0.14.54" + esbuild-freebsd-64 "0.14.54" + esbuild-freebsd-arm64 "0.14.54" + esbuild-linux-32 "0.14.54" + esbuild-linux-64 "0.14.54" + esbuild-linux-arm "0.14.54" + esbuild-linux-arm64 "0.14.54" + esbuild-linux-mips64le "0.14.54" + esbuild-linux-ppc64le "0.14.54" + esbuild-linux-riscv64 "0.14.54" + esbuild-linux-s390x "0.14.54" + esbuild-netbsd-64 "0.14.54" + esbuild-openbsd-64 "0.14.54" + esbuild-sunos-64 "0.14.54" + esbuild-windows-32 "0.14.54" + esbuild-windows-64 "0.14.54" + esbuild-windows-arm64 "0.14.54" + +esbuild@^0.19.3: + version "0.19.12" + resolved "https://registry.npmmirror.com/esbuild/-/esbuild-0.19.12.tgz#dc82ee5dc79e82f5a5c3b4323a2a641827db3e04" + integrity sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg== + optionalDependencies: + "@esbuild/aix-ppc64" "0.19.12" + "@esbuild/android-arm" "0.19.12" + "@esbuild/android-arm64" "0.19.12" + "@esbuild/android-x64" "0.19.12" + "@esbuild/darwin-arm64" "0.19.12" + "@esbuild/darwin-x64" "0.19.12" + "@esbuild/freebsd-arm64" "0.19.12" + "@esbuild/freebsd-x64" "0.19.12" + "@esbuild/linux-arm" "0.19.12" + "@esbuild/linux-arm64" "0.19.12" + "@esbuild/linux-ia32" "0.19.12" + "@esbuild/linux-loong64" "0.19.12" + "@esbuild/linux-mips64el" "0.19.12" + "@esbuild/linux-ppc64" "0.19.12" + "@esbuild/linux-riscv64" "0.19.12" + "@esbuild/linux-s390x" "0.19.12" + "@esbuild/linux-x64" "0.19.12" + "@esbuild/netbsd-x64" "0.19.12" + "@esbuild/openbsd-x64" "0.19.12" + "@esbuild/sunos-x64" "0.19.12" + "@esbuild/win32-arm64" "0.19.12" + "@esbuild/win32-ia32" "0.19.12" + "@esbuild/win32-x64" "0.19.12" + +escalade@^3.1.1, escalade@^3.2.0: + version "3.2.0" + resolved "https://registry.npmmirror.com/escalade/-/escalade-3.2.0.tgz#011a3f69856ba189dffa7dc8fcce99d2a87903e5" + integrity sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA== + +escape-string-regexp@^1.0.2, escape-string-regexp@^1.0.5: + version "1.0.5" + resolved "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" + integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg== + +escape-string-regexp@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34" + integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA== + +escodegen@^2.0.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/escodegen/-/escodegen-2.1.0.tgz#ba93bbb7a43986d29d6041f99f5262da773e2e17" + integrity sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w== + dependencies: + esprima "^4.0.1" + estraverse "^5.2.0" + esutils "^2.0.2" + optionalDependencies: + source-map "~0.6.1" + +eslint-config-prettier@^6.0.0: + version "6.15.0" + resolved "https://registry.npmmirror.com/eslint-config-prettier/-/eslint-config-prettier-6.15.0.tgz#7f93f6cb7d45a92f1537a70ecc06366e1ac6fed9" + integrity sha512-a1+kOYLR8wMGustcgAjdydMsQ2A/2ipRPwRKUmfYaSxc9ZPcrku080Ctl6zrZzZNs/U82MjSv+qKREkoq3bJaw== + dependencies: + get-stdin "^6.0.0" + +eslint-plugin-prettier@^3.1.1: + version "3.4.1" + resolved "https://registry.npmmirror.com/eslint-plugin-prettier/-/eslint-plugin-prettier-3.4.1.tgz#e9ddb200efb6f3d05ffe83b1665a716af4a387e5" + integrity sha512-htg25EUYUeIhKHXjOinK4BgCcDwtLHjqaxCDsMy5nbnUMkKFvIhMVCp+5GFUXQ4Nr8lBsPqtGAqBenbpFqAA2g== + dependencies: + prettier-linter-helpers "^1.0.0" + +eslint-plugin-vue@^6.2.2: + version "6.2.2" + resolved "https://registry.npmmirror.com/eslint-plugin-vue/-/eslint-plugin-vue-6.2.2.tgz#27fecd9a3a24789b0f111ecdd540a9e56198e0fe" + integrity sha512-Nhc+oVAHm0uz/PkJAWscwIT4ijTrK5fqNqz9QB1D35SbbuMG1uB6Yr5AJpvPSWg+WOw7nYNswerYh0kOk64gqQ== + dependencies: + natural-compare "^1.4.0" + semver "^5.6.0" + vue-eslint-parser "^7.0.0" + +eslint-scope@^5.0.0, eslint-scope@^5.1.1: + version "5.1.1" + resolved "https://registry.npmmirror.com/eslint-scope/-/eslint-scope-5.1.1.tgz#e786e59a66cb92b3f6c1fb0d508aab174848f48c" + integrity sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw== + dependencies: + esrecurse "^4.3.0" + estraverse "^4.1.1" + +eslint-utils@^1.4.3: + version "1.4.3" + resolved "https://registry.npmmirror.com/eslint-utils/-/eslint-utils-1.4.3.tgz#74fec7c54d0776b6f67e0251040b5806564e981f" + integrity sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q== + dependencies: + eslint-visitor-keys "^1.1.0" + +eslint-visitor-keys@^1.0.0, eslint-visitor-keys@^1.1.0: + version "1.3.0" + resolved "https://registry.npmmirror.com/eslint-visitor-keys/-/eslint-visitor-keys-1.3.0.tgz#30ebd1ef7c2fdff01c3a4f151044af25fab0523e" + integrity sha512-6J72N8UNa462wa/KFODt/PJ3IU60SDpC3QXC1Hjc1BXXpfL2C9R5+AU7jhe0F6GREqVMh4Juu+NY7xn+6dipUQ== + +eslint@^6.7.2: + version "6.8.0" + resolved "https://registry.npmmirror.com/eslint/-/eslint-6.8.0.tgz#62262d6729739f9275723824302fb227c8c93ffb" + integrity sha512-K+Iayyo2LtyYhDSYwz5D5QdWw0hCacNzyq1Y821Xna2xSJj7cijoLLYmLxTQgcgZ9mC61nryMy9S7GRbYpI5Ig== + dependencies: + "@babel/code-frame" "^7.0.0" + ajv "^6.10.0" + chalk "^2.1.0" + cross-spawn "^6.0.5" + debug "^4.0.1" + doctrine "^3.0.0" + eslint-scope "^5.0.0" + eslint-utils "^1.4.3" + eslint-visitor-keys "^1.1.0" + espree "^6.1.2" + esquery "^1.0.1" + esutils "^2.0.2" + file-entry-cache "^5.0.1" + functional-red-black-tree "^1.0.1" + glob-parent "^5.0.0" + globals "^12.1.0" + ignore "^4.0.6" + import-fresh "^3.0.0" + imurmurhash "^0.1.4" + inquirer "^7.0.0" + is-glob "^4.0.0" + js-yaml "^3.13.1" + json-stable-stringify-without-jsonify "^1.0.1" + levn "^0.3.0" + lodash "^4.17.14" + minimatch "^3.0.4" + mkdirp "^0.5.1" + natural-compare "^1.4.0" + optionator "^0.8.3" + progress "^2.0.0" + regexpp "^2.0.1" + semver "^6.1.2" + strip-ansi "^5.2.0" + strip-json-comments "^3.0.1" + table "^5.2.3" + text-table "^0.2.0" + v8-compile-cache "^2.0.3" + +espree@^6.1.2, espree@^6.2.1: + version "6.2.1" + resolved "https://registry.npmmirror.com/espree/-/espree-6.2.1.tgz#77fc72e1fd744a2052c20f38a5b575832e82734a" + integrity sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw== + dependencies: + acorn "^7.1.1" + acorn-jsx "^5.2.0" + eslint-visitor-keys "^1.1.0" + +esprima@^4.0.0, esprima@^4.0.1: + version "4.0.1" + resolved "https://registry.npmmirror.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71" + integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== + +esquery@^1.0.1, esquery@^1.4.0: + version "1.6.0" + resolved "https://registry.npmmirror.com/esquery/-/esquery-1.6.0.tgz#91419234f804d852a82dceec3e16cdc22cf9dae7" + integrity sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg== + dependencies: + estraverse "^5.1.0" + +esrecurse@^4.3.0: + version "4.3.0" + resolved "https://registry.npmmirror.com/esrecurse/-/esrecurse-4.3.0.tgz#7ad7964d679abb28bee72cec63758b1c5d2c9921" + integrity sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag== + dependencies: + estraverse "^5.2.0" + +estraverse@^4.1.1: + version "4.3.0" + resolved "https://registry.npmmirror.com/estraverse/-/estraverse-4.3.0.tgz#398ad3f3c5a24948be7725e83d11a7de28cdbd1d" + integrity sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw== + +estraverse@^5.1.0, estraverse@^5.2.0: + version "5.3.0" + resolved "https://registry.npmmirror.com/estraverse/-/estraverse-5.3.0.tgz#2eea5290702f26ab8fe5370370ff86c965d21123" + integrity sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA== + +estree-walker@^2.0.2: + version "2.0.2" + resolved "https://registry.npmmirror.com/estree-walker/-/estree-walker-2.0.2.tgz#52f010178c2a4c117a7757cfe942adb7d2da4cac" + integrity sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w== + +esutils@^2.0.2: + version "2.0.3" + resolved "https://registry.npmmirror.com/esutils/-/esutils-2.0.3.tgz#74d2eb4de0b8da1293711910d50775b9b710ef64" + integrity sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g== + +events@^3.3.0: + version "3.3.0" + resolved "https://registry.npmmirror.com/events/-/events-3.3.0.tgz#31a95ad0a924e2d2c419a813aeb2c4e878ea7400" + integrity sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q== + +execa@^0.8.0: + version "0.8.0" + resolved "https://registry.npmmirror.com/execa/-/execa-0.8.0.tgz#d8d76bbc1b55217ed190fd6dd49d3c774ecfc8da" + integrity sha512-zDWS+Rb1E8BlqqhALSt9kUhss8Qq4nN3iof3gsOdyINksElaPyNBtKUMTR62qhvgVWR0CqCX7sdnKe4MnUbFEA== + dependencies: + cross-spawn "^5.0.1" + get-stream "^3.0.0" + is-stream "^1.1.0" + npm-run-path "^2.0.0" + p-finally "^1.0.0" + signal-exit "^3.0.0" + strip-eof "^1.0.0" + +execa@^2.0.3: + version "2.1.0" + resolved "https://registry.npmmirror.com/execa/-/execa-2.1.0.tgz#e5d3ecd837d2a60ec50f3da78fd39767747bbe99" + integrity sha512-Y/URAVapfbYy2Xp/gb6A0E7iR8xeqOCXsuuaoMn7A5PzrXUK84E1gyiEfq0wQd/GHA6GsoHWwhNq8anb0mleIw== + dependencies: + cross-spawn "^7.0.0" + get-stream "^5.0.0" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^3.0.0" + onetime "^5.1.0" + p-finally "^2.0.0" + signal-exit "^3.0.2" + strip-final-newline "^2.0.0" + +execa@^5.1.1: + version "5.1.1" + resolved "https://registry.npmmirror.com/execa/-/execa-5.1.1.tgz#f80ad9cbf4298f7bd1d4c9555c21e93741c411dd" + integrity sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg== + dependencies: + cross-spawn "^7.0.3" + get-stream "^6.0.0" + human-signals "^2.1.0" + is-stream "^2.0.0" + merge-stream "^2.0.0" + npm-run-path "^4.0.1" + onetime "^5.1.2" + signal-exit "^3.0.3" + strip-final-newline "^2.0.0" + +exsolve@^1.0.1: + version "1.0.4" + resolved "https://registry.npmmirror.com/exsolve/-/exsolve-1.0.4.tgz#7de5c75af82ecd15998328fbf5f2295883be3a39" + integrity sha512-xsZH6PXaER4XoV+NiT7JHp1bJodJVT+cxeSH1G0f0tlT0lJqYuHUP3bUx2HtfTDvOagMINYp8rsqusxud3RXhw== + +external-editor@^3.0.3: + version "3.1.0" + resolved "https://registry.npmmirror.com/external-editor/-/external-editor-3.1.0.tgz#cb03f740befae03ea4d283caed2741a83f335495" + integrity sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew== + dependencies: + chardet "^0.7.0" + iconv-lite "^0.4.24" + tmp "^0.0.33" + +fast-deep-equal@^3.1.1, fast-deep-equal@^3.1.3: + version "3.1.3" + resolved "https://registry.npmmirror.com/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz#3a7d56b559d6cbc3eb512325244e619a65c6c525" + integrity sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q== + +fast-diff@^1.1.2: + version "1.3.0" + resolved "https://registry.npmmirror.com/fast-diff/-/fast-diff-1.3.0.tgz#ece407fa550a64d638536cd727e129c61616e0f0" + integrity sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw== + +fast-glob@^3.0.3, fast-glob@^3.2.7, fast-glob@^3.3.0, fast-glob@^3.3.2: + version "3.3.3" + resolved "https://registry.npmmirror.com/fast-glob/-/fast-glob-3.3.3.tgz#d06d585ce8dba90a16b0505c543c3ccfb3aeb818" + integrity sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg== + dependencies: + "@nodelib/fs.stat" "^2.0.2" + "@nodelib/fs.walk" "^1.2.3" + glob-parent "^5.1.2" + merge2 "^1.3.0" + micromatch "^4.0.8" + +fast-json-stable-stringify@^2.0.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz#874bf69c6f404c2b5d99c481341399fd55892633" + integrity sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw== + +fast-levenshtein@~2.0.6: + version "2.0.6" + resolved "https://registry.npmmirror.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" + integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== + +fastq@^1.6.0: + version "1.19.1" + resolved "https://registry.npmmirror.com/fastq/-/fastq-1.19.1.tgz#d50eaba803c8846a883c16492821ebcd2cda55f5" + integrity sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ== + dependencies: + reusify "^1.0.4" + +feather-icons@^4.26.0, feather-icons@^4.28.0: + version "4.29.2" + resolved "https://registry.npmmirror.com/feather-icons/-/feather-icons-4.29.2.tgz#b03a47588a1c400f215e884504db1c18860d89f8" + integrity sha512-0TaCFTnBTVCz6U+baY2UJNKne5ifGh7sMG4ZC2LoBWCZdIyPa+y6UiR4lEYGws1JOFWdee8KAsAIvu0VcXqiqA== + dependencies: + classnames "^2.2.5" + core-js "^3.1.3" + +fetch-blob@^3.1.2, fetch-blob@^3.1.4: + version "3.2.0" + resolved "https://registry.npmmirror.com/fetch-blob/-/fetch-blob-3.2.0.tgz#f09b8d4bbd45adc6f0c20b7e787e793e309dcce9" + integrity sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ== + dependencies: + node-domexception "^1.0.0" + web-streams-polyfill "^3.0.3" + +figures@^1.7.0: + version "1.7.0" + resolved "https://registry.npmmirror.com/figures/-/figures-1.7.0.tgz#cbe1e3affcf1cd44b80cadfed28dc793a9701d2e" + integrity sha512-UxKlfCRuCBxSXU4C6t9scbDyWZ4VlaFFdojKtzJuSkuOBQ5CNFum+zZXFwHjo+CxBC1t6zlYPgHIgFjL8ggoEQ== + dependencies: + escape-string-regexp "^1.0.5" + object-assign "^4.1.0" + +figures@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/figures/-/figures-2.0.0.tgz#3ab1a2d2a62c8bfb431a0c94cb797a2fce27c962" + integrity sha512-Oa2M9atig69ZkfwiApY8F2Yy+tzMbazyvqv21R0NsSC8floSOC09BbT1ITWAdoMGQvJ/aZnR1KMwdx9tvHnTNA== + dependencies: + escape-string-regexp "^1.0.5" + +figures@^3.0.0: + version "3.2.0" + resolved "https://registry.npmmirror.com/figures/-/figures-3.2.0.tgz#625c18bd293c604dc4a8ddb2febf0c88341746af" + integrity sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg== + dependencies: + escape-string-regexp "^1.0.5" + +file-entry-cache@^5.0.1: + version "5.0.1" + resolved "https://registry.npmmirror.com/file-entry-cache/-/file-entry-cache-5.0.1.tgz#ca0f6efa6dd3d561333fb14515065c2fafdf439c" + integrity sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g== + dependencies: + flat-cache "^2.0.1" + +fill-range@^7.1.1: + version "7.1.1" + resolved "https://registry.npmmirror.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" + integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== + dependencies: + to-regex-range "^5.0.1" + +find-up@^4.1.0: + version "4.1.0" + resolved "https://registry.npmmirror.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19" + integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw== + dependencies: + locate-path "^5.0.0" + path-exists "^4.0.0" + +find-up@^5.0.0: + version "5.0.0" + resolved "https://registry.npmmirror.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" + integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== + dependencies: + locate-path "^6.0.0" + path-exists "^4.0.0" + +flat-cache@^2.0.1: + version "2.0.1" + resolved "https://registry.npmmirror.com/flat-cache/-/flat-cache-2.0.1.tgz#5d296d6f04bda44a4630a301413bdbc2ec085ec0" + integrity sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA== + dependencies: + flatted "^2.0.0" + rimraf "2.6.3" + write "1.0.3" + +flatted@^2.0.0: + version "2.0.2" + resolved "https://registry.npmmirror.com/flatted/-/flatted-2.0.2.tgz#4575b21e2bcee7434aa9be662f4b7b5f9c2b5138" + integrity sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA== + +foreground-child@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/foreground-child/-/foreground-child-2.0.0.tgz#71b32800c9f15aa8f2f83f4a6bd9bff35d861a53" + integrity sha512-dCIq9FpEcyQyXKCkyzmlPTFNgrCzPudOe+mhvJU5zAtlBnGVy2yKxtfsxK2tQBThwq225jcvBjpw1Gr40uzZCA== + dependencies: + cross-spawn "^7.0.0" + signal-exit "^3.0.2" + +foreground-child@^3.1.0: + version "3.3.1" + resolved "https://registry.npmmirror.com/foreground-child/-/foreground-child-3.3.1.tgz#32e8e9ed1b68a3497befb9ac2b6adf92a638576f" + integrity sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw== + dependencies: + cross-spawn "^7.0.6" + signal-exit "^4.0.1" + +form-data@^4.0.0: + version "4.0.2" + resolved "https://registry.npmmirror.com/form-data/-/form-data-4.0.2.tgz#35cabbdd30c3ce73deb2c42d3c8d3ed9ca51794c" + integrity sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w== + dependencies: + asynckit "^0.4.0" + combined-stream "^1.0.8" + es-set-tostringtag "^2.1.0" + mime-types "^2.1.12" + +formdata-polyfill@^4.0.10: + version "4.0.10" + resolved "https://registry.npmmirror.com/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz#24807c31c9d402e002ab3d8c720144ceb8848423" + integrity sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g== + dependencies: + fetch-blob "^3.1.2" + +fraction.js@^4.3.7: + version "4.3.7" + resolved "https://registry.npmmirror.com/fraction.js/-/fraction.js-4.3.7.tgz#06ca0085157e42fda7f9e726e79fefc4068840f7" + integrity sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew== + +fs-extra@^10.0.0: + version "10.1.0" + resolved "https://registry.npmmirror.com/fs-extra/-/fs-extra-10.1.0.tgz#02873cfbc4084dde127eaa5f9905eef2325d1abf" + integrity sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ== + dependencies: + graceful-fs "^4.2.0" + jsonfile "^6.0.1" + universalify "^2.0.0" + +fs.realpath@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" + integrity sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw== + +fsevents@~2.3.2, fsevents@~2.3.3: + version "2.3.3" + resolved "https://registry.npmmirror.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6" + integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw== + +function-bind@^1.1.2: + version "1.1.2" + resolved "https://registry.npmmirror.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" + integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== + +functional-red-black-tree@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz#1b0ab3bd553b2a0d6399d29c0e3ea0b252078327" + integrity sha512-dsKNQNdj6xA3T+QlADDA7mOSlX0qiMINjn0cgr+eGHGsbSHzTabcIogz2+p/iqP1Xs6EP/sS2SbqH+brGTbq0g== + +fuse.js@6.6.2: + version "6.6.2" + resolved "https://registry.npmmirror.com/fuse.js/-/fuse.js-6.6.2.tgz#fe463fed4b98c0226ac3da2856a415576dc9a111" + integrity sha512-cJaJkxCCxC8qIIcPBF9yGxY0W/tVZS3uEISDxhYIdtk8OL93pe+6Zj7LjCqVV4dzbqcriOZ+kQ/NE4RXZHsIGA== + +gensync@^1.0.0-beta.2: + version "1.0.0-beta.2" + resolved "https://registry.npmmirror.com/gensync/-/gensync-1.0.0-beta.2.tgz#32a6ee76c3d7f52d46b2b1ae5d93fea8580a25e0" + integrity sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg== + +get-caller-file@^2.0.1, get-caller-file@^2.0.5: + version "2.0.5" + resolved "https://registry.npmmirror.com/get-caller-file/-/get-caller-file-2.0.5.tgz#4f94412a82db32f36e3b0b9741f8a97feb031f7e" + integrity sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg== + +get-func-name@^2.0.1, get-func-name@^2.0.2: + version "2.0.2" + resolved "https://registry.npmmirror.com/get-func-name/-/get-func-name-2.0.2.tgz#0d7cf20cd13fda808669ffa88f4ffc7a3943fc41" + integrity sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ== + +get-intrinsic@^1.2.6: + version "1.3.0" + resolved "https://registry.npmmirror.com/get-intrinsic/-/get-intrinsic-1.3.0.tgz#743f0e3b6964a93a5491ed1bffaae054d7f98d01" + integrity sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ== + dependencies: + call-bind-apply-helpers "^1.0.2" + es-define-property "^1.0.1" + es-errors "^1.3.0" + es-object-atoms "^1.1.1" + function-bind "^1.1.2" + get-proto "^1.0.1" + gopd "^1.2.0" + has-symbols "^1.1.0" + hasown "^2.0.2" + math-intrinsics "^1.1.0" + +get-own-enumerable-property-symbols@^3.0.0: + version "3.0.2" + resolved "https://registry.npmmirror.com/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz#b5fde77f22cbe35f390b4e089922c50bce6ef664" + integrity sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g== + +get-proto@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/get-proto/-/get-proto-1.0.1.tgz#150b3f2743869ef3e851ec0c49d15b1d14d00ee1" + integrity sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g== + dependencies: + dunder-proto "^1.0.1" + es-object-atoms "^1.0.0" + +get-stdin@^6.0.0: + version "6.0.0" + resolved "https://registry.npmmirror.com/get-stdin/-/get-stdin-6.0.0.tgz#9e09bf712b360ab9225e812048f71fde9c89657b" + integrity sha512-jp4tHawyV7+fkkSKyvjuLZswblUtz+SQKzSWnBbii16BuZksJlU1wuBYXY75r+duh/llF1ur6oNwi+2ZzjKZ7g== + +get-stream@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/get-stream/-/get-stream-3.0.0.tgz#8e943d1358dc37555054ecbe2edb05aa174ede14" + integrity sha512-GlhdIUuVakc8SJ6kK0zAFbiGzRFzNnY4jUuEbV9UROo4Y+0Ny4fjvcZFVTeDA4odpFyOQzaw6hXukJSq/f28sQ== + +get-stream@^5.0.0: + version "5.2.0" + resolved "https://registry.npmmirror.com/get-stream/-/get-stream-5.2.0.tgz#4966a1795ee5ace65e706c4b7beb71257d6e22d3" + integrity sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA== + dependencies: + pump "^3.0.0" + +get-stream@^6.0.0: + version "6.0.1" + resolved "https://registry.npmmirror.com/get-stream/-/get-stream-6.0.1.tgz#a262d8eef67aced57c2852ad6167526a43cbf7b7" + integrity sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg== + +glob-parent@^5.0.0, glob-parent@^5.1.2, glob-parent@~5.1.2: + version "5.1.2" + resolved "https://registry.npmmirror.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4" + integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow== + dependencies: + is-glob "^4.0.1" + +glob-parent@^6.0.1, glob-parent@^6.0.2: + version "6.0.2" + resolved "https://registry.npmmirror.com/glob-parent/-/glob-parent-6.0.2.tgz#6d237d99083950c79290f24c7642a3de9a28f9e3" + integrity sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A== + dependencies: + is-glob "^4.0.3" + +glob@^10.3.10, glob@^10.4.2: + version "10.4.5" + resolved "https://registry.npmmirror.com/glob/-/glob-10.4.5.tgz#f4d9f0b90ffdbab09c9d77f5f29b4262517b0956" + integrity sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg== + dependencies: + foreground-child "^3.1.0" + jackspeak "^3.1.2" + minimatch "^9.0.4" + minipass "^7.1.2" + package-json-from-dist "^1.0.0" + path-scurry "^1.11.1" + +glob@^7.0.3, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.7: + version "7.2.3" + resolved "https://registry.npmmirror.com/glob/-/glob-7.2.3.tgz#b8df0fb802bbfa8e89bd1d938b4e16578ed44f2b" + integrity sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^3.1.1" + once "^1.3.0" + path-is-absolute "^1.0.0" + +glob@^9.3.2: + version "9.3.5" + resolved "https://registry.npmmirror.com/glob/-/glob-9.3.5.tgz#ca2ed8ca452781a3009685607fdf025a899dfe21" + integrity sha512-e1LleDykUz2Iu+MTYdkSsuWX8lvAjAcs0Xef0lNIu0S2wOAzuTxCJtcd9S3cijlwYF18EsU3rzb8jPVobxDh9Q== + dependencies: + fs.realpath "^1.0.0" + minimatch "^8.0.2" + minipass "^4.2.4" + path-scurry "^1.6.1" + +globals@^11.1.0: + version "11.12.0" + resolved "https://registry.npmmirror.com/globals/-/globals-11.12.0.tgz#ab8795338868a0babd8525758018c2a7eb95c42e" + integrity sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA== + +globals@^12.1.0: + version "12.4.0" + resolved "https://registry.npmmirror.com/globals/-/globals-12.4.0.tgz#a18813576a41b00a24a97e7f815918c2e19925f8" + integrity sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg== + dependencies: + type-fest "^0.8.1" + +globals@^15.14.0: + version "15.15.0" + resolved "https://registry.npmmirror.com/globals/-/globals-15.15.0.tgz#7c4761299d41c32b075715a4ce1ede7897ff72a8" + integrity sha512-7ACyT3wmyp3I61S4fG682L0VA2RGD9otkqGJIwNUMF1SWUombIIk+af1unuDYgMm082aHYwD+mzJvv9Iu8dsgg== + +globby@^10.0.1: + version "10.0.2" + resolved "https://registry.npmmirror.com/globby/-/globby-10.0.2.tgz#277593e745acaa4646c3ab411289ec47a0392543" + integrity sha512-7dUi7RvCoT/xast/o/dLN53oqND4yk0nsHkhRgn9w65C4PofCLOoJ39iSOg+qVDdWQPIEj+eszMHQ+aLVwwQSg== + dependencies: + "@types/glob" "^7.1.1" + array-union "^2.1.0" + dir-glob "^3.0.1" + fast-glob "^3.0.3" + glob "^7.1.3" + ignore "^5.1.1" + merge2 "^1.2.3" + slash "^3.0.0" + +globby@^6.1.0: + version "6.1.0" + resolved "https://registry.npmmirror.com/globby/-/globby-6.1.0.tgz#f5a6d70e8395e21c858fb0489d64df02424d506c" + integrity sha512-KVbFv2TQtbzCoxAnfD6JcHZTYCzyliEaaeM/gH8qQdkKr5s0OP9scEgvdcngyk7AVdY6YVW/TJHd+lQ/Df3Daw== + dependencies: + array-union "^1.0.1" + glob "^7.0.3" + object-assign "^4.0.1" + pify "^2.0.0" + pinkie-promise "^2.0.0" + +gopd@^1.2.0: + version "1.2.0" + resolved "https://registry.npmmirror.com/gopd/-/gopd-1.2.0.tgz#89f56b8217bdbc8802bd299df6d7f1081d7e51a1" + integrity sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg== + +graceful-fs@^4.1.6, graceful-fs@^4.2.0, graceful-fs@^4.2.2: + version "4.2.11" + resolved "https://registry.npmmirror.com/graceful-fs/-/graceful-fs-4.2.11.tgz#4183e4e8bf08bb6e05bbb2f7d2e0c8f712ca40e3" + integrity sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ== + +graphql@^15.5.1: + version "15.10.1" + resolved "https://registry.npmmirror.com/graphql/-/graphql-15.10.1.tgz#e9ff3bb928749275477f748b14aa5c30dcad6f2f" + integrity sha512-BL/Xd/T9baO6NFzoMpiMD7YUZ62R6viR5tp/MULVEnbYJXZA//kRNW7J0j1w/wXArgL0sCxhDfK5dczSKn3+cg== + +has-ansi@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/has-ansi/-/has-ansi-2.0.0.tgz#34f5049ce1ecdf2b0649af3ef24e45ed35416d91" + integrity sha512-C8vBJ8DwUCx19vhm7urhTuUsr4/IyP6l4VzNQDv+ryHQObW3TTTp9yB68WpYgRe2bbaGuZ/se74IqFeVnMnLZg== + dependencies: + ansi-regex "^2.0.0" + +has-flag@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" + integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw== + +has-flag@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" + integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== + +has-symbols@^1.0.3, has-symbols@^1.1.0: + version "1.1.0" + resolved "https://registry.npmmirror.com/has-symbols/-/has-symbols-1.1.0.tgz#fc9c6a783a084951d0b971fe1018de813707a338" + integrity sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ== + +has-tostringtag@^1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc" + integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw== + dependencies: + has-symbols "^1.0.3" + +hasown@^2.0.2: + version "2.0.2" + resolved "https://registry.npmmirror.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003" + integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ== + dependencies: + function-bind "^1.1.2" + +he@^1.2.0: + version "1.2.0" + resolved "https://registry.npmmirror.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" + integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== + +headers-utils@^3.0.2: + version "3.0.2" + resolved "https://registry.npmmirror.com/headers-utils/-/headers-utils-3.0.2.tgz#dfc65feae4b0e34357308aefbcafa99c895e59ef" + integrity sha512-xAxZkM1dRyGV2Ou5bzMxBPNLoRCjcX+ya7KSWybQD2KwLphxsapUVK6x/02o7f4VU6GPSXch9vNY2+gkU8tYWQ== + +hex-color-regex@^1.1.0: + version "1.1.0" + resolved "https://registry.npmmirror.com/hex-color-regex/-/hex-color-regex-1.1.0.tgz#4c06fccb4602fe2602b3c93df82d7e7dbf1a8a8e" + integrity sha512-l9sfDFsuqtOqKDsQdqrMRk0U85RZc0RtOR9yPI7mRVOa4FsR/BVnZ0shmQRM96Ji99kYZP/7hn1cedc1+ApsTQ== + +hsl-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/hsl-regex/-/hsl-regex-1.0.0.tgz#d49330c789ed819e276a4c0d272dffa30b18fe6e" + integrity sha512-M5ezZw4LzXbBKMruP+BNANf0k+19hDQMgpzBIYnya//Al+fjNct9Wf3b1WedLqdEs2hKBvxq/jh+DsHJLj0F9A== + +hsla-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/hsla-regex/-/hsla-regex-1.0.0.tgz#c1ce7a3168c8c6614033a4b5f7877f3b225f9c38" + integrity sha512-7Wn5GMLuHBjZCb2bTmnDOycho0p/7UVaAeqXZGbHrBCl6Yd/xDhQJAXe6Ga9AXJH2I5zY1dEdYw2u1UptnSBJA== + +html-encoding-sniffer@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/html-encoding-sniffer/-/html-encoding-sniffer-3.0.0.tgz#2cb1a8cf0db52414776e5b2a7a04d5dd98158de9" + integrity sha512-oWv4T4yJ52iKrufjnyZPkrN0CH3QnrUqdB6In1g5Fe1mia8GmF36gnfNySxoZtxD5+NmYw1EElVXiBk93UeskA== + dependencies: + whatwg-encoding "^2.0.0" + +html-escaper@^2.0.0: + version "2.0.2" + resolved "https://registry.npmmirror.com/html-escaper/-/html-escaper-2.0.2.tgz#dfd60027da36a36dfcbe236262c00a5822681453" + integrity sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg== + +html-tags@^3.1.0: + version "3.3.1" + resolved "https://registry.npmmirror.com/html-tags/-/html-tags-3.3.1.tgz#a04026a18c882e4bba8a01a3d39cfe465d40b5ce" + integrity sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ== + +http-proxy-agent@^5.0.0: + version "5.0.0" + resolved "https://registry.npmmirror.com/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz#5129800203520d434f142bc78ff3c170800f2b43" + integrity sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w== + dependencies: + "@tootallnate/once" "2" + agent-base "6" + debug "4" + +https-proxy-agent@^5.0.0: + version "5.0.1" + resolved "https://registry.npmmirror.com/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz#c59ef224a04fe8b754f3db0063a25ea30d0005d6" + integrity sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA== + dependencies: + agent-base "6" + debug "4" + +human-signals@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" + integrity sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw== + +iconv-lite@0.6.3: + version "0.6.3" + resolved "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.6.3.tgz#a52f80bf38da1952eb5c681790719871a1a72501" + integrity sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw== + dependencies: + safer-buffer ">= 2.1.2 < 3.0.0" + +iconv-lite@^0.4.24: + version "0.4.24" + resolved "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" + integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== + dependencies: + safer-buffer ">= 2.1.2 < 3" + +idb-keyval@^6.2.0: + version "6.2.1" + resolved "https://registry.npmmirror.com/idb-keyval/-/idb-keyval-6.2.1.tgz#94516d625346d16f56f3b33855da11bfded2db33" + integrity sha512-8Sb3veuYCyrZL+VBt9LJfZjLUPWVvqn8tG28VqYNFCo43KHcKuq+b4EiXGeuaLAQWL2YmyDgMp2aSpH9JHsEQg== + +ieee754@^1.1.13: + version "1.2.1" + resolved "https://registry.npmmirror.com/ieee754/-/ieee754-1.2.1.tgz#8eb7a10a63fff25d15a57b001586d177d1b0d352" + integrity sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA== + +ignore@^4.0.6: + version "4.0.6" + resolved "https://registry.npmmirror.com/ignore/-/ignore-4.0.6.tgz#750e3db5862087b4737ebac8207ffd1ef27b25fc" + integrity sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg== + +ignore@^5.1.1: + version "5.3.2" + resolved "https://registry.npmmirror.com/ignore/-/ignore-5.3.2.tgz#3cd40e729f3643fd87cb04e50bf0eb722bc596f5" + integrity sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g== + +import-fresh@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/import-fresh/-/import-fresh-2.0.0.tgz#d81355c15612d386c61f9ddd3922d4304822a546" + integrity sha512-eZ5H8rcgYazHbKC3PG4ClHNykCSxtAhxSSEM+2mb+7evD2CKF5V7c0dNum7AdpDh0ZdICwZY9sRSn8f+KH96sg== + dependencies: + caller-path "^2.0.0" + resolve-from "^3.0.0" + +import-fresh@^3.0.0, import-fresh@^3.2.1: + version "3.3.1" + resolved "https://registry.npmmirror.com/import-fresh/-/import-fresh-3.3.1.tgz#9cecb56503c0ada1f2741dbbd6546e4b13b57ccf" + integrity sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ== + dependencies: + parent-module "^1.0.0" + resolve-from "^4.0.0" + +imurmurhash@^0.1.4: + version "0.1.4" + resolved "https://registry.npmmirror.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" + integrity sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA== + +indent-string@^3.0.0: + version "3.2.0" + resolved "https://registry.npmmirror.com/indent-string/-/indent-string-3.2.0.tgz#4a5fd6d27cc332f37e5419a504dbb837105c9289" + integrity sha512-BYqTHXTGUIvg7t1r4sJNKcbDZkL92nkXA8YtRpbjFHRHGDL/NtUeiBJMeE60kIFN/Mg8ESaWQvftaYMGJzQZCQ== + +indent-string@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251" + integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg== + +inflight@^1.0.4: + version "1.0.6" + resolved "https://registry.npmmirror.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" + integrity sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA== + dependencies: + once "^1.3.0" + wrappy "1" + +inherits@2, inherits@^2.0.3, inherits@^2.0.4: + version "2.0.4" + resolved "https://registry.npmmirror.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" + integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== + +ini@^1.3.4: + version "1.3.8" + resolved "https://registry.npmmirror.com/ini/-/ini-1.3.8.tgz#a29da425b48806f34767a4efce397269af28432c" + integrity sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew== + +inquirer@^7.0.0: + version "7.3.3" + resolved "https://registry.npmmirror.com/inquirer/-/inquirer-7.3.3.tgz#04d176b2af04afc157a83fd7c100e98ee0aad003" + integrity sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA== + dependencies: + ansi-escapes "^4.2.1" + chalk "^4.1.0" + cli-cursor "^3.1.0" + cli-width "^3.0.0" + external-editor "^3.0.3" + figures "^3.0.0" + lodash "^4.17.19" + mute-stream "0.0.8" + run-async "^2.4.0" + rxjs "^6.6.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + through "^2.3.6" + +inquirer@^8.2.0: + version "8.2.6" + resolved "https://registry.npmmirror.com/inquirer/-/inquirer-8.2.6.tgz#733b74888195d8d400a67ac332011b5fae5ea562" + integrity sha512-M1WuAmb7pn9zdFRtQYk26ZBoY043Sse0wVDdk4Bppr+JOXyQYybdtvK+l9wUibhtjdjvtoiNy8tk+EgsYIUqKg== + dependencies: + ansi-escapes "^4.2.1" + chalk "^4.1.1" + cli-cursor "^3.1.0" + cli-width "^3.0.0" + external-editor "^3.0.3" + figures "^3.0.0" + lodash "^4.17.21" + mute-stream "0.0.8" + ora "^5.4.1" + run-async "^2.4.0" + rxjs "^7.5.5" + string-width "^4.1.0" + strip-ansi "^6.0.0" + through "^2.3.6" + wrap-ansi "^6.0.1" + +is-arrayish@^0.2.1: + version "0.2.1" + resolved "https://registry.npmmirror.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" + integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg== + +is-arrayish@^0.3.1: + version "0.3.2" + resolved "https://registry.npmmirror.com/is-arrayish/-/is-arrayish-0.3.2.tgz#4574a2ae56f7ab206896fb431eaeed066fdf8f03" + integrity sha512-eVRqCvVlZbuw3GrM63ovNSNAeA1K16kaR/LRY/92w0zxQ5/1YzwblUX652i4Xs9RwAGjW9d9y6X88t8OaAJfWQ== + +is-binary-path@~2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/is-binary-path/-/is-binary-path-2.1.0.tgz#ea1f7f3b80f064236e83470f86c09c254fb45b09" + integrity sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw== + dependencies: + binary-extensions "^2.0.0" + +is-ci@^1.0.10: + version "1.2.1" + resolved "https://registry.npmmirror.com/is-ci/-/is-ci-1.2.1.tgz#e3779c8ee17fccf428488f6e281187f2e632841c" + integrity sha512-s6tfsaQaQi3JNciBH6shVqEDvhGut0SUXr31ag8Pd8BBbVVlcGfWhpPmEOoM6RJ5TFhbypvf5yyRw/VXW1IiWg== + dependencies: + ci-info "^1.5.0" + +is-color-stop@^1.1.0: + version "1.1.0" + resolved "https://registry.npmmirror.com/is-color-stop/-/is-color-stop-1.1.0.tgz#cfff471aee4dd5c9e158598fbe12967b5cdad345" + integrity sha512-H1U8Vz0cfXNujrJzEcvvwMDW9Ra+biSYA3ThdQvAnMLJkEHQXn6bWzLkxHtVYJ+Sdbx0b6finn3jZiaVe7MAHA== + dependencies: + css-color-names "^0.0.4" + hex-color-regex "^1.1.0" + hsl-regex "^1.0.0" + hsla-regex "^1.0.0" + rgb-regex "^1.0.1" + rgba-regex "^1.0.0" + +is-core-module@^2.16.0: + version "2.16.1" + resolved "https://registry.npmmirror.com/is-core-module/-/is-core-module-2.16.1.tgz#2a98801a849f43e2add644fbb6bc6229b19a4ef4" + integrity sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w== + dependencies: + hasown "^2.0.2" + +is-directory@^0.3.1: + version "0.3.1" + resolved "https://registry.npmmirror.com/is-directory/-/is-directory-0.3.1.tgz#61339b6f2475fc772fd9c9d83f5c8575dc154ae1" + integrity sha512-yVChGzahRFvbkscn2MlwGismPO12i9+znNruC5gVEntG3qu0xQMzsGg/JFbrsqDOHtHFPci+V5aP5T9I+yeKqw== + +is-extglob@^2.1.1: + version "2.1.1" + resolved "https://registry.npmmirror.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" + integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ== + +is-fullwidth-code-point@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz#ef9e31386f031a7f0d643af82fde50c457ef00cb" + integrity sha512-1pqUqRjkhPJ9miNq9SwMfdvi6lBJcd6eFxvfaivQhaH3SgisfiuudvFntdKOmxuee/77l+FPjKrQjWvmPjWrRw== + dependencies: + number-is-nan "^1.0.0" + +is-fullwidth-code-point@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" + integrity sha512-VHskAKYM8RfSFXwee5t5cbN5PZeq1Wrh6qd5bkyiXIf6UQcN6w/A0eXM9r6t8d+GYOh+o6ZhiEnb88LN/Y8m2w== + +is-fullwidth-code-point@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" + integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== + +is-glob@^4.0.0, is-glob@^4.0.1, is-glob@^4.0.3, is-glob@~4.0.1: + version "4.0.3" + resolved "https://registry.npmmirror.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084" + integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg== + dependencies: + is-extglob "^2.1.1" + +is-interactive@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/is-interactive/-/is-interactive-1.0.0.tgz#cea6e6ae5c870a7b0a0004070b7b587e0252912e" + integrity sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w== + +is-node-process@^1.0.1: + version "1.2.0" + resolved "https://registry.npmmirror.com/is-node-process/-/is-node-process-1.2.0.tgz#ea02a1b90ddb3934a19aea414e88edef7e11d134" + integrity sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw== + +is-number@^7.0.0: + version "7.0.0" + resolved "https://registry.npmmirror.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" + integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== + +is-obj@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/is-obj/-/is-obj-1.0.1.tgz#3e4729ac1f5fde025cd7d83a896dab9f4f67db0f" + integrity sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg== + +is-observable@^1.1.0: + version "1.1.0" + resolved "https://registry.npmmirror.com/is-observable/-/is-observable-1.1.0.tgz#b3e986c8f44de950867cab5403f5a3465005975e" + integrity sha512-NqCa4Sa2d+u7BWc6CukaObG3Fh+CU9bvixbpcXYhy2VvYS7vVGIdAgnIS5Ks3A/cqk4rebLJ9s8zBstT2aKnIA== + dependencies: + symbol-observable "^1.1.0" + +is-path-cwd@^2.2.0: + version "2.2.0" + resolved "https://registry.npmmirror.com/is-path-cwd/-/is-path-cwd-2.2.0.tgz#67d43b82664a7b5191fd9119127eb300048a9fdb" + integrity sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ== + +is-path-inside@^3.0.1: + version "3.0.3" + resolved "https://registry.npmmirror.com/is-path-inside/-/is-path-inside-3.0.3.tgz#d231362e53a07ff2b0e0ea7fed049161ffd16283" + integrity sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ== + +is-potential-custom-element-name@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz#171ed6f19e3ac554394edf78caa05784a45bebb5" + integrity sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ== + +is-promise@^2.1.0: + version "2.2.2" + resolved "https://registry.npmmirror.com/is-promise/-/is-promise-2.2.2.tgz#39ab959ccbf9a774cf079f7b40c7a26f763135f1" + integrity sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ== + +is-regexp@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/is-regexp/-/is-regexp-1.0.0.tgz#fd2d883545c46bac5a633e7b9a09e87fa2cb5069" + integrity sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA== + +is-stream@^1.1.0: + version "1.1.0" + resolved "https://registry.npmmirror.com/is-stream/-/is-stream-1.1.0.tgz#12d4a3dd4e68e0b79ceb8dbc84173ae80d91ca44" + integrity sha512-uQPm8kcs47jx38atAcWTVxyltQYoPT68y9aWYdV6yWXSyW8mzSat0TL6CiWdZeCdF3KrAvpVtnHbTv4RN+rqdQ== + +is-stream@^2.0.0: + version "2.0.1" + resolved "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077" + integrity sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg== + +is-unicode-supported@^0.1.0: + version "0.1.0" + resolved "https://registry.npmmirror.com/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz#3f26c76a809593b52bfa2ecb5710ed2779b522a7" + integrity sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw== + +isexe@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" + integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw== + +istanbul-lib-coverage@^3.0.0, istanbul-lib-coverage@^3.2.0: + version "3.2.2" + resolved "https://registry.npmmirror.com/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz#2d166c4b0644d43a39f04bf6c2edd1e585f31756" + integrity sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg== + +istanbul-lib-report@^3.0.0: + version "3.0.1" + resolved "https://registry.npmmirror.com/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz#908305bac9a5bd175ac6a74489eafd0fc2445a7d" + integrity sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw== + dependencies: + istanbul-lib-coverage "^3.0.0" + make-dir "^4.0.0" + supports-color "^7.1.0" + +istanbul-reports@^3.1.4: + version "3.1.7" + resolved "https://registry.npmmirror.com/istanbul-reports/-/istanbul-reports-3.1.7.tgz#daed12b9e1dca518e15c056e1e537e741280fa0b" + integrity sha512-BewmUXImeuRk2YY0PVbxgKAysvhRPUQE0h5QRM++nVWyubKGV0l8qQ5op8+B2DOmwSe63Jivj0BjkPQVf8fP5g== + dependencies: + html-escaper "^2.0.0" + istanbul-lib-report "^3.0.0" + +jackspeak@^3.1.2: + version "3.4.3" + resolved "https://registry.npmmirror.com/jackspeak/-/jackspeak-3.4.3.tgz#8833a9d89ab4acde6188942bd1c53b6390ed5a8a" + integrity sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw== + dependencies: + "@isaacs/cliui" "^8.0.2" + optionalDependencies: + "@pkgjs/parseargs" "^0.11.0" + +"jingrow-charts@http://npm.jingrow.com:105/jingrow-charts-2.0.0-rc22.tgz": + version "2.0.0-rc22" + resolved "http://npm.jingrow.com:105/jingrow-charts-2.0.0-rc22.tgz#79bae6d0d54f47b00ba4659c0bae8c50ae7e6dd3" + +"jingrow-ui@http://npm.jingrow.com:105/jingrow-ui-0.1.108.tgz": + version "0.1.108" + resolved "http://npm.jingrow.com:105/jingrow-ui-0.1.108.tgz#bdfe5930a31f868ea04272b8ea1da074f055432d" + dependencies: + "@headlessui/vue" "^1.7.14" + "@popperjs/core" "^2.11.2" + "@tailwindcss/forms" "^0.5.3" + "@tailwindcss/typography" "^0.5.0" + "@tiptap/extension-color" "^2.0.3" + "@tiptap/extension-highlight" "^2.0.3" + "@tiptap/extension-image" "^2.0.3" + "@tiptap/extension-link" "^2.0.3" + "@tiptap/extension-mention" "^2.0.3" + "@tiptap/extension-placeholder" "^2.0.3" + "@tiptap/extension-table" "^2.0.3" + "@tiptap/extension-table-cell" "^2.0.3" + "@tiptap/extension-table-header" "^2.0.3" + "@tiptap/extension-table-row" "^2.0.3" + "@tiptap/extension-text-align" "^2.0.3" + "@tiptap/extension-text-style" "^2.0.3" + "@tiptap/extension-typography" "^2.0.3" + "@tiptap/pm" "^2.0.3" + "@tiptap/starter-kit" "^2.0.3" + "@tiptap/suggestion" "^2.0.3" + "@tiptap/vue-3" "^2.0.3" + "@vueuse/core" "^10.4.1" + dayjs "^1.11.13" + feather-icons "^4.28.0" + idb-keyval "^6.2.0" + ora "5.4.1" + prettier "^3.3.2" + radix-vue "^1.5.3" + showdown "^2.1.0" + socket.io-client "^4.5.1" + tippy.js "^6.3.7" + typescript "^5.0.2" + +jiti@^1.21.6: + version "1.21.7" + resolved "https://registry.npmmirror.com/jiti/-/jiti-1.21.7.tgz#9dd81043424a3d28458b193d965f0d18a2300ba9" + integrity sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A== + +js-beautify@^1.14.9: + version "1.15.4" + resolved "https://registry.npmmirror.com/js-beautify/-/js-beautify-1.15.4.tgz#f579f977ed4c930cef73af8f98f3f0a608acd51e" + integrity sha512-9/KXeZUKKJwqCXUdBxFJ3vPh467OCckSBmYDwSK/EtV090K+iMJ7zx2S3HLVDIWFQdqMIsZWbnaGiba18aWhaA== + dependencies: + config-chain "^1.1.13" + editorconfig "^1.0.4" + glob "^10.4.2" + js-cookie "^3.0.5" + nopt "^7.2.1" + +js-cookie@^3.0.5: + version "3.0.5" + resolved "https://registry.npmmirror.com/js-cookie/-/js-cookie-3.0.5.tgz#0b7e2fd0c01552c58ba86e0841f94dc2557dcdbc" + integrity sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw== + +js-levenshtein@^1.1.6: + version "1.1.6" + resolved "https://registry.npmmirror.com/js-levenshtein/-/js-levenshtein-1.1.6.tgz#c6cee58eb3550372df8deb85fad5ce66ce01d59d" + integrity sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g== + +js-tokens@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" + integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== + +js-yaml@^3.13.1: + version "3.14.1" + resolved "https://registry.npmmirror.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537" + integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g== + dependencies: + argparse "^1.0.7" + esprima "^4.0.0" + +jsdom@^19.0.0: + version "19.0.0" + resolved "https://registry.npmmirror.com/jsdom/-/jsdom-19.0.0.tgz#93e67c149fe26816d38a849ea30ac93677e16b6a" + integrity sha512-RYAyjCbxy/vri/CfnjUWJQQtZ3LKlLnDqj+9XLNnJPgEGeirZs3hllKR20re8LUZ6o1b1X4Jat+Qd26zmP41+A== + dependencies: + abab "^2.0.5" + acorn "^8.5.0" + acorn-globals "^6.0.0" + cssom "^0.5.0" + cssstyle "^2.3.0" + data-urls "^3.0.1" + decimal.js "^10.3.1" + domexception "^4.0.0" + escodegen "^2.0.0" + form-data "^4.0.0" + html-encoding-sniffer "^3.0.0" + http-proxy-agent "^5.0.0" + https-proxy-agent "^5.0.0" + is-potential-custom-element-name "^1.0.1" + nwsapi "^2.2.0" + parse5 "6.0.1" + saxes "^5.0.1" + symbol-tree "^3.2.4" + tough-cookie "^4.0.0" + w3c-hr-time "^1.0.2" + w3c-xmlserializer "^3.0.0" + webidl-conversions "^7.0.0" + whatwg-encoding "^2.0.0" + whatwg-mimetype "^3.0.0" + whatwg-url "^10.0.0" + ws "^8.2.3" + xml-name-validator "^4.0.0" + +jsesc@^3.0.2: + version "3.1.0" + resolved "https://registry.npmmirror.com/jsesc/-/jsesc-3.1.0.tgz#74d335a234f67ed19907fdadfac7ccf9d409825d" + integrity sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA== + +jsesc@~3.0.2: + version "3.0.2" + resolved "https://registry.npmmirror.com/jsesc/-/jsesc-3.0.2.tgz#bb8b09a6597ba426425f2e4a07245c3d00b9343e" + integrity sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g== + +json-parse-better-errors@^1.0.1: + version "1.0.2" + resolved "https://registry.npmmirror.com/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz#bb867cfb3450e69107c131d1c514bab3dc8bcaa9" + integrity sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw== + +json-parse-even-better-errors@^2.3.0: + version "2.3.1" + resolved "https://registry.npmmirror.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" + integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== + +json-schema-traverse@^0.4.1: + version "0.4.1" + resolved "https://registry.npmmirror.com/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz#69f6a87d9513ab8bb8fe63bdb0979c448e684660" + integrity sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg== + +json-stable-stringify-without-jsonify@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" + integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== + +json5@^2.2.3: + version "2.2.3" + resolved "https://registry.npmmirror.com/json5/-/json5-2.2.3.tgz#78cd6f1a19bdc12b73db5ad0c61efd66c1e29283" + integrity sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg== + +jsonfile@^6.0.1: + version "6.1.0" + resolved "https://registry.npmmirror.com/jsonfile/-/jsonfile-6.1.0.tgz#bc55b2634793c679ec6403094eb13698a6ec0aae" + integrity sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ== + dependencies: + universalify "^2.0.0" + optionalDependencies: + graceful-fs "^4.1.6" + +kolorist@^1.8.0: + version "1.8.0" + resolved "https://registry.npmmirror.com/kolorist/-/kolorist-1.8.0.tgz#edddbbbc7894bc13302cdf740af6374d4a04743c" + integrity sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ== + +levn@^0.3.0, levn@~0.3.0: + version "0.3.0" + resolved "https://registry.npmmirror.com/levn/-/levn-0.3.0.tgz#3b09924edf9f083c0490fdd4c0bc4421e04764ee" + integrity sha512-0OO4y2iOHix2W6ujICbKIaEQXvFQHue65vUG3pb5EUomzPI90z9hsA1VsO/dbIIpC53J8gxM9Q4Oho0jrCM/yA== + dependencies: + prelude-ls "~1.1.2" + type-check "~0.3.2" + +libarchive.js@^1.3.0: + version "1.3.0" + resolved "https://registry.npmmirror.com/libarchive.js/-/libarchive.js-1.3.0.tgz#18c42c6b4ce727a02359c90769e4e454cf3743cd" + integrity sha512-EkQfRXt9DhWwj6BnEA2TNpOf4jTnzSTUPGgE+iFxcdNqjktY8GitbDeHnx8qZA0/IukNyyBUR3oQKRdYkO+HFg== + +lilconfig@^2.0.5: + version "2.1.0" + resolved "https://registry.npmmirror.com/lilconfig/-/lilconfig-2.1.0.tgz#78e23ac89ebb7e1bfbf25b18043de756548e7f52" + integrity sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ== + +lilconfig@^3.0.0, lilconfig@^3.1.3: + version "3.1.3" + resolved "https://registry.npmmirror.com/lilconfig/-/lilconfig-3.1.3.tgz#a1bcfd6257f9585bf5ae14ceeebb7b559025e4c4" + integrity sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw== + +lines-and-columns@^1.1.6: + version "1.2.4" + resolved "https://registry.npmmirror.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632" + integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg== + +linkify-it@^3.0.1: + version "3.0.3" + resolved "https://registry.npmmirror.com/linkify-it/-/linkify-it-3.0.3.tgz#a98baf44ce45a550efb4d49c769d07524cc2fa2e" + integrity sha512-ynTsyrFSdE5oZ/O9GEf00kPngmOfVwazR5GKDq6EYfhlpFug3J2zybX56a2PRRpc9P+FuSoGNAwjlbDs9jJBPQ== + dependencies: + uc.micro "^1.0.1" + +linkify-it@^5.0.0: + version "5.0.0" + resolved "https://registry.npmmirror.com/linkify-it/-/linkify-it-5.0.0.tgz#9ef238bfa6dc70bd8e7f9572b52d369af569b421" + integrity sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ== + dependencies: + uc.micro "^2.0.0" + +linkifyjs@^4.2.0: + version "4.2.0" + resolved "https://registry.npmmirror.com/linkifyjs/-/linkifyjs-4.2.0.tgz#9dd30222b9cbabec9c950e725ec00031c7fa3f08" + integrity sha512-pCj3PrQyATaoTYKHrgWRF3SJwsm61udVh+vuls/Rl6SptiDhgE7ziUIudAedRY9QEfynmM7/RmLEfPUyw1HPCw== + +lint-staged@^9.5.0: + version "9.5.0" + resolved "https://registry.npmmirror.com/lint-staged/-/lint-staged-9.5.0.tgz#290ec605252af646d9b74d73a0fa118362b05a33" + integrity sha512-nawMob9cb/G1J98nb8v3VC/E8rcX1rryUYXVZ69aT9kde6YWX+uvNOEHY5yf2gcWcTJGiD0kqXmCnS3oD75GIA== + dependencies: + chalk "^2.4.2" + commander "^2.20.0" + cosmiconfig "^5.2.1" + debug "^4.1.1" + dedent "^0.7.0" + del "^5.0.0" + execa "^2.0.3" + listr "^0.14.3" + log-symbols "^3.0.0" + micromatch "^4.0.2" + normalize-path "^3.0.0" + please-upgrade-node "^3.1.1" + string-argv "^0.3.0" + stringify-object "^3.3.0" + +listr-silent-renderer@^1.1.1: + version "1.1.1" + resolved "https://registry.npmmirror.com/listr-silent-renderer/-/listr-silent-renderer-1.1.1.tgz#924b5a3757153770bf1a8e3fbf74b8bbf3f9242e" + integrity sha512-L26cIFm7/oZeSNVhWB6faeorXhMg4HNlb/dS/7jHhr708jxlXrtrBWo4YUxZQkc6dGoxEAe6J/D3juTRBUzjtA== + +listr-update-renderer@^0.5.0: + version "0.5.0" + resolved "https://registry.npmmirror.com/listr-update-renderer/-/listr-update-renderer-0.5.0.tgz#4ea8368548a7b8aecb7e06d8c95cb45ae2ede6a2" + integrity sha512-tKRsZpKz8GSGqoI/+caPmfrypiaq+OQCbd+CovEC24uk1h952lVj5sC7SqyFUm+OaJ5HN/a1YLt5cit2FMNsFA== + dependencies: + chalk "^1.1.3" + cli-truncate "^0.2.1" + elegant-spinner "^1.0.1" + figures "^1.7.0" + indent-string "^3.0.0" + log-symbols "^1.0.2" + log-update "^2.3.0" + strip-ansi "^3.0.1" + +listr-verbose-renderer@^0.5.0: + version "0.5.0" + resolved "https://registry.npmmirror.com/listr-verbose-renderer/-/listr-verbose-renderer-0.5.0.tgz#f1132167535ea4c1261102b9f28dac7cba1e03db" + integrity sha512-04PDPqSlsqIOaaaGZ+41vq5FejI9auqTInicFRndCBgE3bXG8D6W1I+mWhk+1nqbHmyhla/6BUrd5OSiHwKRXw== + dependencies: + chalk "^2.4.1" + cli-cursor "^2.1.0" + date-fns "^1.27.2" + figures "^2.0.0" + +listr@^0.14.3: + version "0.14.3" + resolved "https://registry.npmmirror.com/listr/-/listr-0.14.3.tgz#2fea909604e434be464c50bddba0d496928fa586" + integrity sha512-RmAl7su35BFd/xoMamRjpIE4j3v+L28o8CT5YhAXQJm1fD+1l9ngXY8JAQRJ+tFK2i5njvi0iRUKV09vPwA0iA== + dependencies: + "@samverschueren/stream-to-observable" "^0.3.0" + is-observable "^1.1.0" + is-promise "^2.1.0" + is-stream "^1.1.0" + listr-silent-renderer "^1.1.1" + listr-update-renderer "^0.5.0" + listr-verbose-renderer "^0.5.0" + p-map "^2.0.0" + rxjs "^6.3.3" + +local-pkg@^0.4.1, local-pkg@^0.4.3: + version "0.4.3" + resolved "https://registry.npmmirror.com/local-pkg/-/local-pkg-0.4.3.tgz#0ff361ab3ae7f1c19113d9bb97b98b905dbc4963" + integrity sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g== + +local-pkg@^0.5.0: + version "0.5.1" + resolved "https://registry.npmmirror.com/local-pkg/-/local-pkg-0.5.1.tgz#69658638d2a95287534d4c2fff757980100dbb6d" + integrity sha512-9rrA30MRRP3gBD3HTGnC6cDFpaE1kVDWxWgqWJUN0RvDNAo+Nz/9GxB+nHOH0ifbVFy0hSA1V6vFDvnx54lTEQ== + dependencies: + mlly "^1.7.3" + pkg-types "^1.2.1" + +local-pkg@^1.0.0: + version "1.1.1" + resolved "https://registry.npmmirror.com/local-pkg/-/local-pkg-1.1.1.tgz#f5fe74a97a3bd3c165788ee08ca9fbe998dc58dd" + integrity sha512-WunYko2W1NcdfAFpuLUoucsgULmgDBRkdxHxWQ7mK0cQqwPiy8E1enjuRBrhLtZkB5iScJ1XIPdhVEFK8aOLSg== + dependencies: + mlly "^1.7.4" + pkg-types "^2.0.1" + quansync "^0.2.8" + +locate-path@^5.0.0: + version "5.0.0" + resolved "https://registry.npmmirror.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0" + integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g== + dependencies: + p-locate "^4.1.0" + +locate-path@^6.0.0: + version "6.0.0" + resolved "https://registry.npmmirror.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" + integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== + dependencies: + p-locate "^5.0.0" + +lodash.castarray@^4.4.0: + version "4.4.0" + resolved "https://registry.npmmirror.com/lodash.castarray/-/lodash.castarray-4.4.0.tgz#c02513515e309daddd4c24c60cfddcf5976d9115" + integrity sha512-aVx8ztPv7/2ULbArGJ2Y42bG1mEQ5mGjpdvrbJcJFU3TbYybe+QlLS4pst9zV52ymy2in1KpFPiZnAOATxD4+Q== + +lodash.debounce@^4.0.8: + version "4.0.8" + resolved "https://registry.npmmirror.com/lodash.debounce/-/lodash.debounce-4.0.8.tgz#82d79bff30a67c4005ffd5e2515300ad9ca4d7af" + integrity sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow== + +lodash.isplainobject@^4.0.6: + version "4.0.6" + resolved "https://registry.npmmirror.com/lodash.isplainobject/-/lodash.isplainobject-4.0.6.tgz#7c526a52d89b45c45cc690b88163be0497f550cb" + integrity sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA== + +lodash.merge@^4.6.2: + version "4.6.2" + resolved "https://registry.npmmirror.com/lodash.merge/-/lodash.merge-4.6.2.tgz#558aa53b43b661e1925a0afdfa36a9a1085fe57a" + integrity sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ== + +lodash.topath@^4.5.2: + version "4.5.2" + resolved "https://registry.npmmirror.com/lodash.topath/-/lodash.topath-4.5.2.tgz#3616351f3bba61994a0931989660bd03254fd009" + integrity sha512-1/W4dM+35DwvE/iEd1M9ekewOSTlpFekhw9mhAtrwjVqUr83/ilQiyAvmg4tVX7Unkcfl1KC+i9WdaT4B6aQcg== + +lodash@^4.17.14, lodash@^4.17.19, lodash@^4.17.21, lodash@^4.17.4: + version "4.17.21" + resolved "https://registry.npmmirror.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" + integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== + +log-symbols@^1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/log-symbols/-/log-symbols-1.0.2.tgz#376ff7b58ea3086a0f09facc74617eca501e1a18" + integrity sha512-mmPrW0Fh2fxOzdBbFv4g1m6pR72haFLPJ2G5SJEELf1y+iaQrDG6cWCPjy54RHYbZAt7X+ls690Kw62AdWXBzQ== + dependencies: + chalk "^1.0.0" + +log-symbols@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/log-symbols/-/log-symbols-3.0.0.tgz#f3a08516a5dea893336a7dee14d18a1cfdab77c4" + integrity sha512-dSkNGuI7iG3mfvDzUuYZyvk5dD9ocYCYzNU6CYDE6+Xqd+gwme6Z00NS3dUh8mq/73HaEtT7m6W+yUPtU6BZnQ== + dependencies: + chalk "^2.4.2" + +log-symbols@^4.1.0: + version "4.1.0" + resolved "https://registry.npmmirror.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" + integrity sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg== + dependencies: + chalk "^4.1.0" + is-unicode-supported "^0.1.0" + +log-update@^2.3.0: + version "2.3.0" + resolved "https://registry.npmmirror.com/log-update/-/log-update-2.3.0.tgz#88328fd7d1ce7938b29283746f0b1bc126b24708" + integrity sha512-vlP11XfFGyeNQlmEn9tJ66rEW1coA/79m5z6BCkudjbAGE83uhAcGYrBFwfs3AdLiLzGRusRPAbSPK9xZteCmg== + dependencies: + ansi-escapes "^3.0.0" + cli-cursor "^2.0.0" + wrap-ansi "^3.0.1" + +loupe@^2.3.6: + version "2.3.7" + resolved "https://registry.npmmirror.com/loupe/-/loupe-2.3.7.tgz#6e69b7d4db7d3ab436328013d37d1c8c3540c697" + integrity sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA== + dependencies: + get-func-name "^2.0.1" + +lru-cache@^10.2.0: + version "10.4.3" + resolved "https://registry.npmmirror.com/lru-cache/-/lru-cache-10.4.3.tgz#410fc8a17b70e598013df257c2446b7f3383f119" + integrity sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ== + +lru-cache@^4.0.1: + version "4.1.5" + resolved "https://registry.npmmirror.com/lru-cache/-/lru-cache-4.1.5.tgz#8bbe50ea85bed59bc9e33dcab8235ee9bcf443cd" + integrity sha512-sWZlbEP2OsHNkXrMl5GYk/jKk70MBng6UU4YI/qGDYbgf6YbP4EvmqISbXCoJiRKs+1bSpFHVgQxvJ17F2li5g== + dependencies: + pseudomap "^1.0.2" + yallist "^2.1.2" + +lru-cache@^5.1.1: + version "5.1.1" + resolved "https://registry.npmmirror.com/lru-cache/-/lru-cache-5.1.1.tgz#1da27e6710271947695daf6848e847f01d84b920" + integrity sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w== + dependencies: + yallist "^3.0.2" + +luxon@^1.22.0: + version "1.28.1" + resolved "https://registry.npmmirror.com/luxon/-/luxon-1.28.1.tgz#528cdf3624a54506d710290a2341aa8e6e6c61b0" + integrity sha512-gYHAa180mKrNIUJCbwpmD0aTu9kV0dREDrwNnuyFAsO1Wt0EVYSZelPnJlbj9HplzXX/YWXHFTL45kvZ53M0pw== + +magic-string@0.30.8: + version "0.30.8" + resolved "https://registry.npmmirror.com/magic-string/-/magic-string-0.30.8.tgz#14e8624246d2bedba70d5462aa99ac9681844613" + integrity sha512-ISQTe55T2ao7XtlAStud6qwYPZjE4GK1S/BeVPus4jrq6JuOnQ00YKQC581RWhR122W7msZV263KzVeLoqidyQ== + dependencies: + "@jridgewell/sourcemap-codec" "^1.4.15" + +magic-string@^0.30.1, magic-string@^0.30.11: + version "0.30.17" + resolved "https://registry.npmmirror.com/magic-string/-/magic-string-0.30.17.tgz#450a449673d2460e5bbcfba9a61916a1714c7453" + integrity sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA== + dependencies: + "@jridgewell/sourcemap-codec" "^1.5.0" + +make-dir@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/make-dir/-/make-dir-4.0.0.tgz#c3c2307a771277cd9638305f915c29ae741b614e" + integrity sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw== + dependencies: + semver "^7.5.3" + +markdown-it@^12.3.2: + version "12.3.2" + resolved "https://registry.npmmirror.com/markdown-it/-/markdown-it-12.3.2.tgz#bf92ac92283fe983fe4de8ff8abfb5ad72cd0c90" + integrity sha512-TchMembfxfNVpHkbtriWltGWc+m3xszaRD0CZup7GFFhzIgQqxIfn3eGj1yZpfuflzPvfkt611B2Q/Bsk1YnGg== + dependencies: + argparse "^2.0.1" + entities "~2.1.0" + linkify-it "^3.0.1" + mdurl "^1.0.1" + uc.micro "^1.0.5" + +markdown-it@^14.0.0: + version "14.1.0" + resolved "https://registry.npmmirror.com/markdown-it/-/markdown-it-14.1.0.tgz#3c3c5992883c633db4714ccb4d7b5935d98b7d45" + integrity sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg== + dependencies: + argparse "^2.0.1" + entities "^4.4.0" + linkify-it "^5.0.0" + mdurl "^2.0.0" + punycode.js "^2.3.1" + uc.micro "^2.1.0" + +math-intrinsics@^1.1.0: + version "1.1.0" + resolved "https://registry.npmmirror.com/math-intrinsics/-/math-intrinsics-1.1.0.tgz#a0dd74be81e2aa5c2f27e65ce283605ee4e2b7f9" + integrity sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g== + +mdurl@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/mdurl/-/mdurl-1.0.1.tgz#fe85b2ec75a59037f2adfec100fd6c601761152e" + integrity sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g== + +mdurl@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/mdurl/-/mdurl-2.0.0.tgz#80676ec0433025dd3e17ee983d0fe8de5a2237e0" + integrity sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w== + +merge-stream@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" + integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== + +merge2@^1.2.3, merge2@^1.3.0: + version "1.4.1" + resolved "https://registry.npmmirror.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" + integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== + +micromatch@^4.0.2, micromatch@^4.0.8: + version "4.0.8" + resolved "https://registry.npmmirror.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202" + integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA== + dependencies: + braces "^3.0.3" + picomatch "^2.3.1" + +mime-db@1.52.0: + version "1.52.0" + resolved "https://registry.npmmirror.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" + integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== + +mime-types@^2.1.12: + version "2.1.35" + resolved "https://registry.npmmirror.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" + integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== + dependencies: + mime-db "1.52.0" + +mimic-fn@^1.0.0: + version "1.2.0" + resolved "https://registry.npmmirror.com/mimic-fn/-/mimic-fn-1.2.0.tgz#820c86a39334640e99516928bd03fca88057d022" + integrity sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ== + +mimic-fn@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" + integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== + +mini-svg-data-uri@^1.2.3: + version "1.4.4" + resolved "https://registry.npmmirror.com/mini-svg-data-uri/-/mini-svg-data-uri-1.4.4.tgz#8ab0aabcdf8c29ad5693ca595af19dd2ead09939" + integrity sha512-r9deDe9p5FJUPZAk3A59wGH7Ii9YrjjWw0jmw/liSbHl2CHiyXj6FcDXDu2K3TjVAXqiJdaw3xxwlZZr9E6nHg== + +minimatch@9.0.1: + version "9.0.1" + resolved "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.1.tgz#8a555f541cf976c622daf078bb28f29fb927c253" + integrity sha512-0jWhJpD/MdhPXwPuiRkCbfYfSKp2qnn2eOc279qI7f+osl/l+prKSrvhg157zSYvx/1nmgn2NqdT6k2Z7zSH9w== + dependencies: + brace-expansion "^2.0.1" + +minimatch@^3.0.4, minimatch@^3.1.1: + version "3.1.2" + resolved "https://registry.npmmirror.com/minimatch/-/minimatch-3.1.2.tgz#19cd194bfd3e428f049a70817c038d89ab4be35b" + integrity sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw== + dependencies: + brace-expansion "^1.1.7" + +minimatch@^8.0.2: + version "8.0.4" + resolved "https://registry.npmmirror.com/minimatch/-/minimatch-8.0.4.tgz#847c1b25c014d4e9a7f68aaf63dedd668a626229" + integrity sha512-W0Wvr9HyFXZRGIDgCicunpQ299OKXs9RgZfaukz4qAW/pJhcpUfupc9c+OObPOFueNy8VSrZgEmDtk6Kh4WzDA== + dependencies: + brace-expansion "^2.0.1" + +minimatch@^9.0.3, minimatch@^9.0.4: + version "9.0.5" + resolved "https://registry.npmmirror.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" + integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== + dependencies: + brace-expansion "^2.0.1" + +minimist@^1.2.6: + version "1.2.8" + resolved "https://registry.npmmirror.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" + integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== + +minipass@^4.2.4: + version "4.2.8" + resolved "https://registry.npmmirror.com/minipass/-/minipass-4.2.8.tgz#f0010f64393ecfc1d1ccb5f582bcaf45f48e1a3a" + integrity sha512-fNzuVyifolSLFL4NzpF+wEF4qrgqaaKX0haXPQEdQ7NKAN+WecoKMHV09YcuL/DHxrUsYQOK3MiuDf7Ip2OXfQ== + +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.1.2: + version "7.1.2" + resolved "https://registry.npmmirror.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" + integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== + +mkdirp@^0.5.1: + version "0.5.6" + resolved "https://registry.npmmirror.com/mkdirp/-/mkdirp-0.5.6.tgz#7def03d2432dcae4ba1d611445c48396062255f6" + integrity sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw== + dependencies: + minimist "^1.2.6" + +mlly@^1.7.3, mlly@^1.7.4: + version "1.7.4" + resolved "https://registry.npmmirror.com/mlly/-/mlly-1.7.4.tgz#3d7295ea2358ec7a271eaa5d000a0f84febe100f" + integrity sha512-qmdSIPC4bDJXgZTCR7XosJiNKySV7O215tsPtDN9iEO/7q/76b/ijtgRu/+epFXSJhijtTCCGp3DWS549P3xKw== + dependencies: + acorn "^8.14.0" + pathe "^2.0.1" + pkg-types "^1.3.0" + ufo "^1.5.4" + +modern-normalize@^1.1.0: + version "1.1.0" + resolved "https://registry.npmmirror.com/modern-normalize/-/modern-normalize-1.1.0.tgz#da8e80140d9221426bd4f725c6e11283d34f90b7" + integrity sha512-2lMlY1Yc1+CUy0gw4H95uNN7vjbpoED7NNRSBHE25nWfLBdmMzFCsPshlzbxHz+gYMcBEUN8V4pU16prcdPSgA== + +moo@^0.5.0: + version "0.5.2" + resolved "https://registry.npmmirror.com/moo/-/moo-0.5.2.tgz#f9fe82473bc7c184b0d32e2215d3f6e67278733c" + integrity sha512-iSAJLHYKnX41mKcJKjqvnAN9sf0LMDTXDEvFv+ffuRR9a1MIuXLjMNL6EsnDHSkKLTWNqQQ5uo61P4EbU4NU+Q== + +ms@^2.1.3: + version "2.1.3" + resolved "https://registry.npmmirror.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" + integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== + +msw@^0.36.8: + version "0.36.8" + resolved "https://registry.npmmirror.com/msw/-/msw-0.36.8.tgz#33ff8bfb0299626a95f43d0e4c3dc2c73c17f1ba" + integrity sha512-K7lOQoYqhGhTSChsmHMQbf/SDCsxh/m0uhN6Ipt206lGoe81fpTmaGD0KLh4jUxCONMOUnwCSj0jtX2CM4pEdw== + dependencies: + "@mswjs/cookies" "^0.1.7" + "@mswjs/interceptors" "^0.12.7" + "@open-draft/until" "^1.0.3" + "@types/cookie" "^0.4.1" + "@types/inquirer" "^8.1.3" + "@types/js-levenshtein" "^1.1.0" + chalk "4.1.1" + chokidar "^3.4.2" + cookie "^0.4.1" + graphql "^15.5.1" + headers-utils "^3.0.2" + inquirer "^8.2.0" + is-node-process "^1.0.1" + js-levenshtein "^1.1.6" + node-fetch "^2.6.7" + path-to-regexp "^6.2.0" + statuses "^2.0.0" + strict-event-emitter "^0.2.0" + type-fest "^1.2.2" + yargs "^17.3.0" + +muggle-string@^0.4.1: + version "0.4.1" + resolved "https://registry.npmmirror.com/muggle-string/-/muggle-string-0.4.1.tgz#3b366bd43b32f809dc20659534dd30e7c8a0d328" + integrity sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ== + +mute-stream@0.0.8: + version "0.0.8" + resolved "https://registry.npmmirror.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" + integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== + +mz@^2.7.0: + version "2.7.0" + resolved "https://registry.npmmirror.com/mz/-/mz-2.7.0.tgz#95008057a56cafadc2bc63dde7f9ff6955948e32" + integrity sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q== + dependencies: + any-promise "^1.0.0" + object-assign "^4.0.1" + thenify-all "^1.0.0" + +nanoid@^3.3.8: + version "3.3.11" + resolved "https://registry.npmmirror.com/nanoid/-/nanoid-3.3.11.tgz#4f4f112cefbe303202f2199838128936266d185b" + integrity sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w== + +nanoid@^5.0.7: + version "5.1.5" + resolved "https://registry.npmmirror.com/nanoid/-/nanoid-5.1.5.tgz#f7597f9d9054eb4da9548cdd53ca70f1790e87de" + integrity sha512-Ir/+ZpE9fDsNH0hQ3C68uyThDXzYcim2EqcZ8zn8Chtt1iylPT9xXJB0kPCnqzgcEGikO9RxSrh63MsmVCU7Fw== + +natural-compare@^1.4.0: + version "1.4.0" + resolved "https://registry.npmmirror.com/natural-compare/-/natural-compare-1.4.0.tgz#4abebfeed7541f2c27acfb29bdbbd15c8d5ba4f7" + integrity sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw== + +nearley@^2.20.1: + version "2.20.1" + resolved "https://registry.npmmirror.com/nearley/-/nearley-2.20.1.tgz#246cd33eff0d012faf197ff6774d7ac78acdd474" + integrity sha512-+Mc8UaAebFzgV+KpI5n7DasuuQCHA89dmwm7JXw3TV43ukfNQ9DnBH3Mdb2g/I4Fdxc26pwimBWvjIw0UAILSQ== + dependencies: + commander "^2.19.0" + moo "^0.5.0" + railroad-diagrams "^1.0.0" + randexp "0.4.6" + +nice-try@^1.0.4: + version "1.0.5" + resolved "https://registry.npmmirror.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" + integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== + +node-domexception@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/node-domexception/-/node-domexception-1.0.0.tgz#6888db46a1f71c0b76b3f7555016b63fe64766e5" + integrity sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ== + +node-emoji@^1.11.0: + version "1.11.0" + resolved "https://registry.npmmirror.com/node-emoji/-/node-emoji-1.11.0.tgz#69a0150e6946e2f115e9d7ea4df7971e2628301c" + integrity sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A== + dependencies: + lodash "^4.17.21" + +node-fetch@^2.6.7: + version "2.7.0" + resolved "https://registry.npmmirror.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d" + integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A== + dependencies: + whatwg-url "^5.0.0" + +node-fetch@^3.2.10: + version "3.3.2" + resolved "https://registry.npmmirror.com/node-fetch/-/node-fetch-3.3.2.tgz#d1e889bacdf733b4ff3b2b243eb7a12866a0b78b" + integrity sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA== + dependencies: + data-uri-to-buffer "^4.0.0" + fetch-blob "^3.1.4" + formdata-polyfill "^4.0.10" + +node-releases@^2.0.19: + version "2.0.19" + resolved "https://registry.npmmirror.com/node-releases/-/node-releases-2.0.19.tgz#9e445a52950951ec4d177d843af370b411caf314" + integrity sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw== + +nopt@^7.2.1: + version "7.2.1" + resolved "https://registry.npmmirror.com/nopt/-/nopt-7.2.1.tgz#1cac0eab9b8e97c9093338446eddd40b2c8ca1e7" + integrity sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w== + dependencies: + abbrev "^2.0.0" + +normalize-path@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/normalize-path/-/normalize-path-1.0.0.tgz#32d0e472f91ff345701c15a8311018d3b0a90379" + integrity sha512-7WyT0w8jhpDStXRq5836AMmihQwq2nrUVQrgjvUo/p/NZf9uy/MeJ246lBJVmWuYXMlJuG9BNZHF0hWjfTbQUA== + +normalize-path@^3.0.0, normalize-path@~3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/normalize-path/-/normalize-path-3.0.0.tgz#0dcd69ff23a1c9b11fd0978316644a0388216a65" + integrity sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA== + +normalize-range@^0.1.2: + version "0.1.2" + resolved "https://registry.npmmirror.com/normalize-range/-/normalize-range-0.1.2.tgz#2d10c06bdfd312ea9777695a4d28439456b75942" + integrity sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA== + +npm-run-path@^2.0.0: + version "2.0.2" + resolved "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-2.0.2.tgz#35a9232dfa35d7067b4cb2ddf2357b1871536c5f" + integrity sha512-lJxZYlT4DW/bRUtFh1MQIWqmLwQfAxnqWG4HhEdjMlkrJYnJn0Jrr2u3mgxqaWsdiBc76TYkTG/mhrnYTuzfHw== + dependencies: + path-key "^2.0.0" + +npm-run-path@^3.0.0: + version "3.1.0" + resolved "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-3.1.0.tgz#7f91be317f6a466efed3c9f2980ad8a4ee8b0fa5" + integrity sha512-Dbl4A/VfiVGLgQv29URL9xshU8XDY1GeLy+fsaZ1AA8JDSfjvr5P5+pzRbWqRSBxk6/DW7MIh8lTM/PaGnP2kg== + dependencies: + path-key "^3.0.0" + +npm-run-path@^4.0.1: + version "4.0.1" + resolved "https://registry.npmmirror.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" + integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== + dependencies: + path-key "^3.0.0" + +num2fraction@^1.2.2: + version "1.2.2" + resolved "https://registry.npmmirror.com/num2fraction/-/num2fraction-1.2.2.tgz#6f682b6a027a4e9ddfa4564cd2589d1d4e669ede" + integrity sha512-Y1wZESM7VUThYY+4W+X4ySH2maqcA+p7UR+w8VWNWVAd6lwuXXWz/w/Cz43J/dI2I+PS6wD5N+bJUF+gjWvIqg== + +number-is-nan@^1.0.0: + version "1.0.1" + resolved "https://registry.npmmirror.com/number-is-nan/-/number-is-nan-1.0.1.tgz#097b602b53422a522c1afb8790318336941a011d" + integrity sha512-4jbtZXNAsfZbAHiiqjLPBiCl16dES1zI4Hpzzxw61Tk+loF+sBDBKx1ICKKKwIqQ7M0mFn1TmkN7euSncWgHiQ== + +nwsapi@^2.2.0: + version "2.2.20" + resolved "https://registry.npmmirror.com/nwsapi/-/nwsapi-2.2.20.tgz#22e53253c61e7b0e7e93cef42c891154bcca11ef" + integrity sha512-/ieB+mDe4MrrKMT8z+mQL8klXydZWGR5Dowt4RAGKbJ3kIGEx3X4ljUo+6V73IXtUPWgfOlU5B9MlGxFO5T+cA== + +object-assign@^4.0.1, object-assign@^4.1.0, object-assign@^4.1.1: + version "4.1.1" + resolved "https://registry.npmmirror.com/object-assign/-/object-assign-4.1.1.tgz#2109adc7965887cfc05cbbd442cac8bfbb360863" + integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== + +object-hash@^2.2.0: + version "2.2.0" + resolved "https://registry.npmmirror.com/object-hash/-/object-hash-2.2.0.tgz#5ad518581eefc443bd763472b8ff2e9c2c0d54a5" + integrity sha512-gScRMn0bS5fH+IuwyIFgnh9zBdo4DV+6GhygmWM9HyNJSgS0hScp1f5vjtm7oIIOiT9trXrShAkLFSc2IqKNgw== + +object-hash@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/object-hash/-/object-hash-3.0.0.tgz#73f97f753e7baffc0e2cc9d6e079079744ac82e9" + integrity sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw== + +once@^1.3.0, once@^1.3.1, once@^1.4.0: + version "1.4.0" + resolved "https://registry.npmmirror.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" + integrity sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w== + dependencies: + wrappy "1" + +onetime@^2.0.0: + version "2.0.1" + resolved "https://registry.npmmirror.com/onetime/-/onetime-2.0.1.tgz#067428230fd67443b2794b22bba528b6867962d4" + integrity sha512-oyyPpiMaKARvvcgip+JV+7zci5L8D1W9RZIz2l1o08AM3pfspitVWnPt3mzHcBPp12oYMTy0pqrFs/C+m3EwsQ== + dependencies: + mimic-fn "^1.0.0" + +onetime@^5.1.0, onetime@^5.1.2: + version "5.1.2" + resolved "https://registry.npmmirror.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" + integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== + dependencies: + mimic-fn "^2.1.0" + +optionator@^0.8.3: + version "0.8.3" + resolved "https://registry.npmmirror.com/optionator/-/optionator-0.8.3.tgz#84fa1d036fe9d3c7e21d99884b601167ec8fb495" + integrity sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA== + dependencies: + deep-is "~0.1.3" + fast-levenshtein "~2.0.6" + levn "~0.3.0" + prelude-ls "~1.1.2" + type-check "~0.3.2" + word-wrap "~1.2.3" + +ora@5.4.1, ora@^5.4.1: + version "5.4.1" + resolved "https://registry.npmmirror.com/ora/-/ora-5.4.1.tgz#1b2678426af4ac4a509008e5e4ac9e9959db9e18" + integrity sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ== + dependencies: + bl "^4.1.0" + chalk "^4.1.0" + cli-cursor "^3.1.0" + cli-spinners "^2.5.0" + is-interactive "^1.0.0" + is-unicode-supported "^0.1.0" + log-symbols "^4.1.0" + strip-ansi "^6.0.0" + wcwidth "^1.0.1" + +orderedmap@^2.0.0: + version "2.1.1" + resolved "https://registry.npmmirror.com/orderedmap/-/orderedmap-2.1.1.tgz#61481269c44031c449915497bf5a4ad273c512d2" + integrity sha512-TvAWxi0nDe1j/rtMcWcIj94+Ffe6n7zhow33h40SKxmsmozs6dz/e+EajymfoFcHd7sxNn8yHM8839uixMOV6g== + +os-tmpdir@~1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" + integrity sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g== + +outvariant@^1.2.0: + version "1.4.3" + resolved "https://registry.npmmirror.com/outvariant/-/outvariant-1.4.3.tgz#221c1bfc093e8fec7075497e7799fdbf43d14873" + integrity sha512-+Sl2UErvtsoajRDKCE5/dBz4DIvHXQQnAxtQTF04OJxY0+DyZXSo5P5Bb7XYWOh81syohlYL24hbDwxedPUJCA== + +p-finally@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae" + integrity sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow== + +p-finally@^2.0.0: + version "2.0.1" + resolved "https://registry.npmmirror.com/p-finally/-/p-finally-2.0.1.tgz#bd6fcaa9c559a096b680806f4d657b3f0f240561" + integrity sha512-vpm09aKwq6H9phqRQzecoDpD8TmVyGw70qmWlyq5onxY7tqyTTFVvxMykxQSQKILBSFlbXpypIw2T1Ml7+DDtw== + +p-limit@^2.2.0: + version "2.3.0" + resolved "https://registry.npmmirror.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1" + integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w== + dependencies: + p-try "^2.0.0" + +p-limit@^3.0.2: + version "3.1.0" + resolved "https://registry.npmmirror.com/p-limit/-/p-limit-3.1.0.tgz#e1daccbe78d0d1388ca18c64fea38e3e57e3706b" + integrity sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ== + dependencies: + yocto-queue "^0.1.0" + +p-locate@^4.1.0: + version "4.1.0" + resolved "https://registry.npmmirror.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07" + integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A== + dependencies: + p-limit "^2.2.0" + +p-locate@^5.0.0: + version "5.0.0" + resolved "https://registry.npmmirror.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" + integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== + dependencies: + p-limit "^3.0.2" + +p-map@^2.0.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/p-map/-/p-map-2.1.0.tgz#310928feef9c9ecc65b68b17693018a665cea175" + integrity sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw== + +p-map@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/p-map/-/p-map-3.0.0.tgz#d704d9af8a2ba684e2600d9a215983d4141a979d" + integrity sha512-d3qXVTF/s+W+CdJ5A29wywV2n8CQQYahlgz2bFiA+4eVNJbHJodPZ+/gXwPGh0bOqA+j8S+6+ckmvLGPk1QpxQ== + dependencies: + aggregate-error "^3.0.0" + +p-try@^2.0.0: + version "2.2.0" + resolved "https://registry.npmmirror.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" + integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== + +package-json-from-dist@^1.0.0: + version "1.0.1" + resolved "https://registry.npmmirror.com/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz#4f1471a010827a86f94cfd9b0727e36d267de505" + integrity sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw== + +package-manager-detector@^0.2.8: + version "0.2.11" + resolved "https://registry.npmmirror.com/package-manager-detector/-/package-manager-detector-0.2.11.tgz#3af0b34f99d86d24af0a0620603d2e1180d05c9c" + integrity sha512-BEnLolu+yuz22S56CU1SUKq3XC3PkwD5wv4ikR4MfGvnRVcmzXR9DwSlW2fEamyTPyXHomBJRzgapeuBvRNzJQ== + dependencies: + quansync "^0.2.7" + +papaparse@^5.4.1: + version "5.5.2" + resolved "https://registry.npmmirror.com/papaparse/-/papaparse-5.5.2.tgz#fb67cc5a03ba8930cb435dc4641a25d6804bd4d7" + integrity sha512-PZXg8UuAc4PcVwLosEEDYjPyfWnTEhOrUfdv+3Bx+NuAb+5NhDmXzg5fHWmdCh1mP5p7JAZfFr3IMQfcntNAdA== + +parent-module@^1.0.0: + version "1.0.1" + resolved "https://registry.npmmirror.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" + integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== + dependencies: + callsites "^3.0.0" + +parse-json@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/parse-json/-/parse-json-4.0.0.tgz#be35f5425be1f7f6c747184f98a788cb99477ee0" + integrity sha512-aOIos8bujGN93/8Ox/jPLh7RwVnPEysynVFE+fQZyg6jKELEHwzgKdLRFHUgXJL6kylijVSBC4BvN9OmsB48Rw== + dependencies: + error-ex "^1.3.1" + json-parse-better-errors "^1.0.1" + +parse-json@^5.0.0: + version "5.2.0" + resolved "https://registry.npmmirror.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd" + integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg== + dependencies: + "@babel/code-frame" "^7.0.0" + error-ex "^1.3.1" + json-parse-even-better-errors "^2.3.0" + lines-and-columns "^1.1.6" + +parse5@6.0.1: + version "6.0.1" + resolved "https://registry.npmmirror.com/parse5/-/parse5-6.0.1.tgz#e1a1c085c569b3dc08321184f19a39cc27f7c30b" + integrity sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw== + +path-browserify@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/path-browserify/-/path-browserify-1.0.1.tgz#d98454a9c3753d5790860f16f68867b9e46be1fd" + integrity sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g== + +path-exists@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" + integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== + +path-is-absolute@^1.0.0: + version "1.0.1" + resolved "https://registry.npmmirror.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" + integrity sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg== + +path-key@^2.0.0, path-key@^2.0.1: + version "2.0.1" + resolved "https://registry.npmmirror.com/path-key/-/path-key-2.0.1.tgz#411cadb574c5a140d3a4b1910d40d80cc9f40b40" + integrity sha512-fEHGKCSmUSDPv4uoj8AlD+joPlq3peND+HRYyxFz4KPw4z926S/b8rIuFs2FYJg3BwsxJf6A9/3eIdLaYC+9Dw== + +path-key@^3.0.0, path-key@^3.1.0: + version "3.1.1" + resolved "https://registry.npmmirror.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" + integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== + +path-parse@^1.0.7: + version "1.0.7" + resolved "https://registry.npmmirror.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" + integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== + +path-scurry@^1.11.1, path-scurry@^1.6.1: + version "1.11.1" + resolved "https://registry.npmmirror.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2" + integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA== + dependencies: + lru-cache "^10.2.0" + minipass "^5.0.0 || ^6.0.2 || ^7.0.0" + +path-to-regexp@^6.2.0: + version "6.3.0" + resolved "https://registry.npmmirror.com/path-to-regexp/-/path-to-regexp-6.3.0.tgz#2b6a26a337737a8e1416f9272ed0766b1c0389f4" + integrity sha512-Yhpw4T9C6hPpgPeA28us07OJeqZ5EzQTkbfwuhsUg0c237RomFoETJgmp2sa3F/41gfLE6G5cqcYwznmeEeOlQ== + +path-type@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" + integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== + +pathe@^1.1.2: + version "1.1.2" + resolved "https://registry.npmmirror.com/pathe/-/pathe-1.1.2.tgz#6c4cb47a945692e48a1ddd6e4094d170516437ec" + integrity sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ== + +pathe@^2.0.1, pathe@^2.0.3: + version "2.0.3" + resolved "https://registry.npmmirror.com/pathe/-/pathe-2.0.3.tgz#3ecbec55421685b70a9da872b2cff3e1cbed1716" + integrity sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w== + +pathval@^1.1.1: + version "1.1.1" + resolved "https://registry.npmmirror.com/pathval/-/pathval-1.1.1.tgz#8534e77a77ce7ac5a2512ea21e0fdb8fcf6c3d8d" + integrity sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ== + +picocolors@^0.2.1: + version "0.2.1" + resolved "https://registry.npmmirror.com/picocolors/-/picocolors-0.2.1.tgz#570670f793646851d1ba135996962abad587859f" + integrity sha512-cMlDqaLEqfSaW8Z7N5Jw+lyIW869EzT73/F5lhtY9cLGoVxSXznfgfXMO0Z5K0o0Q2TkTXq+0KFsdnSe3jDViA== + +picocolors@^1.0.0, picocolors@^1.1.1: + version "1.1.1" + resolved "https://registry.npmmirror.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" + integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== + +picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.3.1: + version "2.3.1" + resolved "https://registry.npmmirror.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42" + integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA== + +picomatch@^4.0.2: + version "4.0.2" + resolved "https://registry.npmmirror.com/picomatch/-/picomatch-4.0.2.tgz#77c742931e8f3b8820946c76cd0c1f13730d1dab" + integrity sha512-M7BAV6Rlcy5u+m6oPhAPFgJTzAioX/6B0DxyvDlo9l8+T3nLKbrczg2WLUyzd45L8RqfUMyGPzekbMvX2Ldkwg== + +pify@^2.0.0, pify@^2.3.0: + version "2.3.0" + resolved "https://registry.npmmirror.com/pify/-/pify-2.3.0.tgz#ed141a6ac043a849ea588498e7dca8b15330e90c" + integrity sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog== + +pify@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/pify/-/pify-3.0.0.tgz#e5a4acd2c101fdf3d9a4d07f0dbc4db49dd28176" + integrity sha512-C3FsVNH1udSEX48gGX1xfvwTWfsYWj5U+8/uK15BGzIGrKoUpghX8hWZwa/OFnakBiiVNmBvemTJR5mcy7iPcg== + +pinkie-promise@^2.0.0: + version "2.0.1" + resolved "https://registry.npmmirror.com/pinkie-promise/-/pinkie-promise-2.0.1.tgz#2135d6dfa7a358c069ac9b178776288228450ffa" + integrity sha512-0Gni6D4UcLTbv9c57DfxDGdr41XfgUjqWZu492f0cIGr16zDU06BWP/RAEvOuo7CQ0CNjHaLlM59YJJFm3NWlw== + dependencies: + pinkie "^2.0.0" + +pinkie@^2.0.0: + version "2.0.4" + resolved "https://registry.npmmirror.com/pinkie/-/pinkie-2.0.4.tgz#72556b80cfa0d48a974e80e77248e80ed4f7f870" + integrity sha512-MnUuEycAemtSaeFSjXKW/aroV7akBbY+Sv+RkyqFjgAe73F+MR0TBWKBRDkmfWq/HiFmdavfZ1G7h4SPZXaCSg== + +pirates@^4.0.1: + version "4.0.7" + resolved "https://registry.npmmirror.com/pirates/-/pirates-4.0.7.tgz#643b4a18c4257c8a65104b73f3049ce9a0a15e22" + integrity sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA== + +pkg-types@^1.2.1, pkg-types@^1.3.0: + version "1.3.1" + resolved "https://registry.npmmirror.com/pkg-types/-/pkg-types-1.3.1.tgz#bd7cc70881192777eef5326c19deb46e890917df" + integrity sha512-/Jm5M4RvtBFVkKWRu2BLUTNP8/M2a+UwuAX+ae4770q1qVGtfjG+WTCupoZixokjmHiry8uI+dlY8KXYV5HVVQ== + dependencies: + confbox "^0.1.8" + mlly "^1.7.4" + pathe "^2.0.1" + +pkg-types@^2.0.1: + version "2.1.0" + resolved "https://registry.npmmirror.com/pkg-types/-/pkg-types-2.1.0.tgz#70c9e1b9c74b63fdde749876ee0aa007ea9edead" + integrity sha512-wmJwA+8ihJixSoHKxZJRBQG1oY8Yr9pGLzRmSsNms0iNWyHHAlZCa7mmKiFR10YPZuz/2k169JiS/inOjBCZ2A== + dependencies: + confbox "^0.2.1" + exsolve "^1.0.1" + pathe "^2.0.3" + +please-upgrade-node@^3.1.1: + version "3.2.0" + resolved "https://registry.npmmirror.com/please-upgrade-node/-/please-upgrade-node-3.2.0.tgz#aeddd3f994c933e4ad98b99d9a556efa0e2fe942" + integrity sha512-gQR3WpIgNIKwBMVLkpMUeR3e1/E1y42bqDQZfql+kDeXd8COYfM8PQA4X6y7a8u9Ua9FHmsrrmirW2vHs45hWg== + dependencies: + semver-compare "^1.0.0" + +pngjs@^5.0.0: + version "5.0.0" + resolved "https://registry.npmmirror.com/pngjs/-/pngjs-5.0.0.tgz#e79dd2b215767fd9c04561c01236df960bce7fbb" + integrity sha512-40QW5YalBNfQo5yRYmiw7Yz6TKKVr3h6970B2YE+3fQpsWcrbj1PzJgxeJ19DRQjhMbKPIuMY8rFaXc8moolVw== + +postcss-easy-import@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/postcss-easy-import/-/postcss-easy-import-4.0.0.tgz#8ada07d857c2578f38a832b841714a7ddefccbee" + integrity sha512-Nq5/zsnyMyzz1D7Y7TC1hawNLWQLlh4eJnsM9zlxtc8mAeY8edlvSO1UGs1hyQvNvzK3OJ1bgLjsuyTijyvBEQ== + dependencies: + globby "^6.1.0" + is-glob "^4.0.0" + lodash "^4.17.4" + object-assign "^4.0.1" + pify "^3.0.0" + postcss-import "^14.0.0" + resolve "^1.1.7" + +postcss-functions@^3: + version "3.0.0" + resolved "https://registry.npmmirror.com/postcss-functions/-/postcss-functions-3.0.0.tgz#0e94d01444700a481de20de4d55fb2640564250e" + integrity sha512-N5yWXWKA+uhpLQ9ZhBRl2bIAdM6oVJYpDojuI1nF2SzXBimJcdjFwiAouBVbO5VuOF3qA6BSFWFc3wXbbj72XQ== + dependencies: + glob "^7.1.2" + object-assign "^4.1.1" + postcss "^6.0.9" + postcss-value-parser "^3.3.0" + +postcss-import@^14.0.0: + version "14.1.0" + resolved "https://registry.npmmirror.com/postcss-import/-/postcss-import-14.1.0.tgz#a7333ffe32f0b8795303ee9e40215dac922781f0" + integrity sha512-flwI+Vgm4SElObFVPpTIT7SU7R3qk2L7PyduMcokiaVKuWv9d/U+Gm/QAd8NDLuykTWTkcrjOeD2Pp1rMeBTGw== + dependencies: + postcss-value-parser "^4.0.0" + read-cache "^1.0.0" + resolve "^1.1.7" + +postcss-import@^15.1.0: + version "15.1.0" + resolved "https://registry.npmmirror.com/postcss-import/-/postcss-import-15.1.0.tgz#41c64ed8cc0e23735a9698b3249ffdbf704adc70" + integrity sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew== + dependencies: + postcss-value-parser "^4.0.0" + read-cache "^1.0.0" + resolve "^1.1.7" + +postcss-js@^2: + version "2.0.3" + resolved "https://registry.npmmirror.com/postcss-js/-/postcss-js-2.0.3.tgz#a96f0f23ff3d08cec7dc5b11bf11c5f8077cdab9" + integrity sha512-zS59pAk3deu6dVHyrGqmC3oDXBdNdajk4k1RyxeVXCrcEDBUBHoIhE4QTsmhxgzXxsaqFDAkUZfmMa5f/N/79w== + dependencies: + camelcase-css "^2.0.1" + postcss "^7.0.18" + +postcss-js@^4.0.1: + version "4.0.1" + resolved "https://registry.npmmirror.com/postcss-js/-/postcss-js-4.0.1.tgz#61598186f3703bab052f1c4f7d805f3991bee9d2" + integrity sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw== + dependencies: + camelcase-css "^2.0.1" + +postcss-load-config@^3.1.0: + version "3.1.4" + resolved "https://registry.npmmirror.com/postcss-load-config/-/postcss-load-config-3.1.4.tgz#1ab2571faf84bb078877e1d07905eabe9ebda855" + integrity sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg== + dependencies: + lilconfig "^2.0.5" + yaml "^1.10.2" + +postcss-load-config@^4.0.2: + version "4.0.2" + resolved "https://registry.npmmirror.com/postcss-load-config/-/postcss-load-config-4.0.2.tgz#7159dcf626118d33e299f485d6afe4aff7c4a3e3" + integrity sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ== + dependencies: + lilconfig "^3.0.0" + yaml "^2.3.4" + +postcss-nested@^4: + version "4.2.3" + resolved "https://registry.npmmirror.com/postcss-nested/-/postcss-nested-4.2.3.tgz#c6f255b0a720549776d220d00c4b70cd244136f6" + integrity sha512-rOv0W1HquRCamWy2kFl3QazJMMe1ku6rCFoAAH+9AcxdbpDeBr6k968MLWuLjvjMcGEip01ak09hKOEgpK9hvw== + dependencies: + postcss "^7.0.32" + postcss-selector-parser "^6.0.2" + +postcss-nested@^6.2.0: + version "6.2.0" + resolved "https://registry.npmmirror.com/postcss-nested/-/postcss-nested-6.2.0.tgz#4c2d22ab5f20b9cb61e2c5c5915950784d068131" + integrity sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ== + dependencies: + postcss-selector-parser "^6.1.1" + +postcss-selector-parser@6.0.10: + version "6.0.10" + resolved "https://registry.npmmirror.com/postcss-selector-parser/-/postcss-selector-parser-6.0.10.tgz#79b61e2c0d1bfc2602d549e11d0876256f8df88d" + integrity sha512-IQ7TZdoaqbT+LCpShg46jnZVlhWD2w6iQYAcYXfHARZ7X1t/UGhhceQDs5X0cGqKvYlHNOuv7Oa1xmb0oQuA3w== + dependencies: + cssesc "^3.0.0" + util-deprecate "^1.0.2" + +postcss-selector-parser@^6.0.2, postcss-selector-parser@^6.0.6, postcss-selector-parser@^6.1.1, postcss-selector-parser@^6.1.2: + version "6.1.2" + resolved "https://registry.npmmirror.com/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz#27ecb41fb0e3b6ba7a1ec84fff347f734c7929de" + integrity sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg== + dependencies: + cssesc "^3.0.0" + util-deprecate "^1.0.2" + +postcss-value-parser@^3.3.0: + version "3.3.1" + resolved "https://registry.npmmirror.com/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz#9ff822547e2893213cf1c30efa51ac5fd1ba8281" + integrity sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ== + +postcss-value-parser@^4.0.0, postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0: + version "4.2.0" + resolved "https://registry.npmmirror.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514" + integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== + +postcss@^6.0.9: + version "6.0.23" + resolved "https://registry.npmmirror.com/postcss/-/postcss-6.0.23.tgz#61c82cc328ac60e677645f979054eb98bc0e3324" + integrity sha512-soOk1h6J3VMTZtVeVpv15/Hpdl2cBLX3CAw4TAbkpTJiNPk9YP/zWcD1ND+xEtvyuuvKzbxliTOIyvkSeSJ6ag== + dependencies: + chalk "^2.4.1" + source-map "^0.6.1" + supports-color "^5.4.0" + +postcss@^7, postcss@^7.0.18, postcss@^7.0.32: + version "7.0.39" + resolved "https://registry.npmmirror.com/postcss/-/postcss-7.0.39.tgz#9624375d965630e2e1f2c02a935c82a59cb48309" + integrity sha512-yioayjNbHn6z1/Bywyb2Y4s3yvDAeXGOyxqD+LnVOinq6Mdmd++SW2wUNVzavyyHxd6+DxzWGIuosg6P1Rj8uA== + dependencies: + picocolors "^0.2.1" + source-map "^0.6.1" + +postcss@^8.3.5, postcss@^8.4.13, postcss@^8.4.32, postcss@^8.4.47, postcss@^8.4.48, postcss@^8.4.6: + version "8.5.3" + resolved "https://registry.npmmirror.com/postcss/-/postcss-8.5.3.tgz#1463b6f1c7fb16fe258736cba29a2de35237eafb" + integrity sha512-dle9A3yYxlBSrt8Fu+IpjGT8SY8hN0mlaA6GY8t0P5PjIOZemULz/E2Bnm/2dcUOena75OTNkHI76uZBNUUq3A== + dependencies: + nanoid "^3.3.8" + picocolors "^1.1.1" + source-map-js "^1.2.1" + +prelude-ls@~1.1.2: + version "1.1.2" + resolved "https://registry.npmmirror.com/prelude-ls/-/prelude-ls-1.1.2.tgz#21932a549f5e52ffd9a827f570e04be62a97da54" + integrity sha512-ESF23V4SKG6lVSGZgYNpbsiaAkdab6ZgOxe52p7+Kid3W3u3bxR4Vfd/o21dmN7jSt0IwgZ4v5MUd26FEtXE9w== + +prettier-linter-helpers@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz#d23d41fe1375646de2d0104d3454a3008802cf7b" + integrity sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w== + dependencies: + fast-diff "^1.1.2" + +prettier-plugin-tailwindcss@^0.1.8: + version "0.1.13" + resolved "https://registry.npmmirror.com/prettier-plugin-tailwindcss/-/prettier-plugin-tailwindcss-0.1.13.tgz#ca1071361dc7e2ed5d95a2ee36825ce45f814942" + integrity sha512-/EKQURUrxLu66CMUg4+1LwGdxnz8of7IDvrSLqEtDqhLH61SAlNNUSr90UTvZaemujgl3OH/VHg+fyGltrNixw== + +prettier@^2.5.1: + version "2.8.8" + resolved "https://registry.npmmirror.com/prettier/-/prettier-2.8.8.tgz#e8c5d7e98a4305ffe3de2e1fc4aca1a71c28b1da" + integrity sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q== + +prettier@^3.3.2: + version "3.5.3" + resolved "https://registry.npmmirror.com/prettier/-/prettier-3.5.3.tgz#4fc2ce0d657e7a02e602549f053b239cb7dfe1b5" + integrity sha512-QQtaxnoDJeAkDvDKWCLiwIXkTgRhwYDEQCghU9Z6q03iyek/rxRh/2lC3HB7P8sWT2xC/y5JDctPLBIGzHKbhw== + +pretty-hrtime@^1.0.3: + version "1.0.3" + resolved "https://registry.npmmirror.com/pretty-hrtime/-/pretty-hrtime-1.0.3.tgz#b7e3ea42435a4c9b2759d99e0f201eb195802ee1" + integrity sha512-66hKPCr+72mlfiSjlEB1+45IjXSqvVAIy6mocupoww4tBFE9R9IhwwUGoI4G++Tc9Aq+2rxOt0RFU6gPcrte0A== + +progress@^2.0.0, progress@^2.0.3: + version "2.0.3" + resolved "https://registry.npmmirror.com/progress/-/progress-2.0.3.tgz#7e8cf8d8f5b8f239c1bc68beb4eb78567d572ef8" + integrity sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA== + +prosemirror-changeset@^2.2.1: + version "2.2.1" + resolved "https://registry.npmmirror.com/prosemirror-changeset/-/prosemirror-changeset-2.2.1.tgz#dae94b63aec618fac7bb9061648e6e2a79988383" + integrity sha512-J7msc6wbxB4ekDFj+n9gTW/jav/p53kdlivvuppHsrZXCaQdVgRghoZbSS3kwrRyAstRVQ4/+u5k7YfLgkkQvQ== + dependencies: + prosemirror-transform "^1.0.0" + +prosemirror-collab@^1.3.1: + version "1.3.1" + resolved "https://registry.npmmirror.com/prosemirror-collab/-/prosemirror-collab-1.3.1.tgz#0e8c91e76e009b53457eb3b3051fb68dad029a33" + integrity sha512-4SnynYR9TTYaQVXd/ieUvsVV4PDMBzrq2xPUWutHivDuOshZXqQ5rGbZM84HEaXKbLdItse7weMGOUdDVcLKEQ== + dependencies: + prosemirror-state "^1.0.0" + +prosemirror-commands@^1.0.0, prosemirror-commands@^1.6.2: + version "1.7.0" + resolved "https://registry.npmmirror.com/prosemirror-commands/-/prosemirror-commands-1.7.0.tgz#c0a60c808f51157caa146922494fc59fe257f27c" + integrity sha512-6toodS4R/Aah5pdsrIwnTYPEjW70SlO5a66oo5Kk+CIrgJz3ukOoS+FYDGqvQlAX5PxoGWDX1oD++tn5X3pyRA== + dependencies: + prosemirror-model "^1.0.0" + prosemirror-state "^1.0.0" + prosemirror-transform "^1.10.2" + +prosemirror-dropcursor@^1.8.1: + version "1.8.1" + resolved "https://registry.npmmirror.com/prosemirror-dropcursor/-/prosemirror-dropcursor-1.8.1.tgz#49b9fb2f583e0d0f4021ff87db825faa2be2832d" + integrity sha512-M30WJdJZLyXHi3N8vxN6Zh5O8ZBbQCz0gURTfPmTIBNQ5pxrdU7A58QkNqfa98YEjSAL1HUyyU34f6Pm5xBSGw== + dependencies: + prosemirror-state "^1.0.0" + prosemirror-transform "^1.1.0" + prosemirror-view "^1.1.0" + +prosemirror-gapcursor@^1.3.2: + version "1.3.2" + resolved "https://registry.npmmirror.com/prosemirror-gapcursor/-/prosemirror-gapcursor-1.3.2.tgz#5fa336b83789c6199a7341c9493587e249215cb4" + integrity sha512-wtjswVBd2vaQRrnYZaBCbyDqr232Ed4p2QPtRIUK5FuqHYKGWkEwl08oQM4Tw7DOR0FsasARV5uJFvMZWxdNxQ== + dependencies: + prosemirror-keymap "^1.0.0" + prosemirror-model "^1.0.0" + prosemirror-state "^1.0.0" + prosemirror-view "^1.0.0" + +prosemirror-history@^1.0.0, prosemirror-history@^1.4.1: + version "1.4.1" + resolved "https://registry.npmmirror.com/prosemirror-history/-/prosemirror-history-1.4.1.tgz#cc370a46fb629e83a33946a0e12612e934ab8b98" + integrity sha512-2JZD8z2JviJrboD9cPuX/Sv/1ChFng+xh2tChQ2X4bB2HeK+rra/bmJ3xGntCcjhOqIzSDG6Id7e8RJ9QPXLEQ== + dependencies: + prosemirror-state "^1.2.2" + prosemirror-transform "^1.0.0" + prosemirror-view "^1.31.0" + rope-sequence "^1.3.0" + +prosemirror-inputrules@^1.4.0: + version "1.5.0" + resolved "https://registry.npmmirror.com/prosemirror-inputrules/-/prosemirror-inputrules-1.5.0.tgz#e22bfaf1d6ea4fe240ad447c184af3d520d43c37" + integrity sha512-K0xJRCmt+uSw7xesnHmcn72yBGTbY45vm8gXI4LZXbx2Z0jwh5aF9xrGQgrVPu0WbyFVFF3E/o9VhJYz6SQWnA== + dependencies: + prosemirror-state "^1.0.0" + prosemirror-transform "^1.0.0" + +prosemirror-keymap@^1.0.0, prosemirror-keymap@^1.2.2: + version "1.2.2" + resolved "https://registry.npmmirror.com/prosemirror-keymap/-/prosemirror-keymap-1.2.2.tgz#14a54763a29c7b2704f561088ccf3384d14eb77e" + integrity sha512-EAlXoksqC6Vbocqc0GtzCruZEzYgrn+iiGnNjsJsH4mrnIGex4qbLdWWNza3AW5W36ZRrlBID0eM6bdKH4OStQ== + dependencies: + prosemirror-state "^1.0.0" + w3c-keyname "^2.2.0" + +prosemirror-markdown@^1.13.1: + version "1.13.2" + resolved "https://registry.npmmirror.com/prosemirror-markdown/-/prosemirror-markdown-1.13.2.tgz#863eb3fd5f57a444e4378174622b562735b1c503" + integrity sha512-FPD9rHPdA9fqzNmIIDhhnYQ6WgNoSWX9StUZ8LEKapaXU9i6XgykaHKhp6XMyXlOWetmaFgGDS/nu/w9/vUc5g== + dependencies: + "@types/markdown-it" "^14.0.0" + markdown-it "^14.0.0" + prosemirror-model "^1.25.0" + +prosemirror-menu@^1.2.4: + version "1.2.4" + resolved "https://registry.npmmirror.com/prosemirror-menu/-/prosemirror-menu-1.2.4.tgz#3cfdc7c06d10f9fbd1bce29082c498bd11a0a79a" + integrity sha512-S/bXlc0ODQup6aiBbWVsX/eM+xJgCTAfMq/nLqaO5ID/am4wS0tTCIkzwytmao7ypEtjj39i7YbJjAgO20mIqA== + dependencies: + crelt "^1.0.0" + prosemirror-commands "^1.0.0" + prosemirror-history "^1.0.0" + prosemirror-state "^1.0.0" + +prosemirror-model@^1.0.0, prosemirror-model@^1.20.0, prosemirror-model@^1.21.0, prosemirror-model@^1.23.0, prosemirror-model@^1.24.1, prosemirror-model@^1.25.0: + version "1.25.0" + resolved "https://registry.npmmirror.com/prosemirror-model/-/prosemirror-model-1.25.0.tgz#c147113edc0718a14f03881e4c20367d0221f7af" + integrity sha512-/8XUmxWf0pkj2BmtqZHYJipTBMHIdVjuvFzMvEoxrtyGNmfvdhBiRwYt/eFwy2wA9DtBW3RLqvZnjurEkHaFCw== + dependencies: + orderedmap "^2.0.0" + +prosemirror-schema-basic@^1.2.3: + version "1.2.4" + resolved "https://registry.npmmirror.com/prosemirror-schema-basic/-/prosemirror-schema-basic-1.2.4.tgz#389ce1ec09b8a30ea9bbb92c58569cb690c2d695" + integrity sha512-ELxP4TlX3yr2v5rM7Sb70SqStq5NvI15c0j9j/gjsrO5vaw+fnnpovCLEGIcpeGfifkuqJwl4fon6b+KdrODYQ== + dependencies: + prosemirror-model "^1.25.0" + +prosemirror-schema-list@^1.4.1: + version "1.5.1" + resolved "https://registry.npmmirror.com/prosemirror-schema-list/-/prosemirror-schema-list-1.5.1.tgz#5869c8f749e8745c394548bb11820b0feb1e32f5" + integrity sha512-927lFx/uwyQaGwJxLWCZRkjXG0p48KpMj6ueoYiu4JX05GGuGcgzAy62dfiV8eFZftgyBUvLx76RsMe20fJl+Q== + dependencies: + prosemirror-model "^1.0.0" + prosemirror-state "^1.0.0" + prosemirror-transform "^1.7.3" + +prosemirror-state@^1.0.0, prosemirror-state@^1.2.2, prosemirror-state@^1.4.3: + version "1.4.3" + resolved "https://registry.npmmirror.com/prosemirror-state/-/prosemirror-state-1.4.3.tgz#94aecf3ffd54ec37e87aa7179d13508da181a080" + integrity sha512-goFKORVbvPuAQaXhpbemJFRKJ2aixr+AZMGiquiqKxaucC6hlpHNZHWgz5R7dS4roHiwq9vDctE//CZ++o0W1Q== + dependencies: + prosemirror-model "^1.0.0" + prosemirror-transform "^1.0.0" + prosemirror-view "^1.27.0" + +prosemirror-tables@^1.6.4: + version "1.6.4" + resolved "https://registry.npmmirror.com/prosemirror-tables/-/prosemirror-tables-1.6.4.tgz#e36ebca70d9e398c4a3b99b122ba86bfc985293d" + integrity sha512-TkDY3Gw52gRFRfRn2f4wJv5WOgAOXLJA2CQJYIJ5+kdFbfj3acR4JUW6LX2e1hiEBiUwvEhzH5a3cZ5YSztpIA== + dependencies: + prosemirror-keymap "^1.2.2" + prosemirror-model "^1.24.1" + prosemirror-state "^1.4.3" + prosemirror-transform "^1.10.2" + prosemirror-view "^1.37.2" + +prosemirror-trailing-node@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/prosemirror-trailing-node/-/prosemirror-trailing-node-3.0.0.tgz#5bc223d4fc1e8d9145e4079ec77a932b54e19e04" + integrity sha512-xiun5/3q0w5eRnGYfNlW1uU9W6x5MoFKWwq/0TIRgt09lv7Hcser2QYV8t4muXbEr+Fwo0geYn79Xs4GKywrRQ== + dependencies: + "@remirror/core-constants" "3.0.0" + escape-string-regexp "^4.0.0" + +prosemirror-transform@^1.0.0, prosemirror-transform@^1.1.0, prosemirror-transform@^1.10.2, prosemirror-transform@^1.7.3: + version "1.10.3" + resolved "https://registry.npmmirror.com/prosemirror-transform/-/prosemirror-transform-1.10.3.tgz#fae660bd7ffef3159aff44bc21e9e044aa31b67d" + integrity sha512-Nhh/+1kZGRINbEHmVu39oynhcap4hWTs/BlU7NnxWj3+l0qi8I1mu67v6mMdEe/ltD8hHvU4FV6PHiCw2VSpMw== + dependencies: + prosemirror-model "^1.21.0" + +prosemirror-view@^1.0.0, prosemirror-view@^1.1.0, prosemirror-view@^1.27.0, prosemirror-view@^1.31.0, prosemirror-view@^1.37.0, prosemirror-view@^1.37.2: + version "1.39.1" + resolved "https://registry.npmmirror.com/prosemirror-view/-/prosemirror-view-1.39.1.tgz#9e24cc82649d37ed5d75bf59419694b0566927bb" + integrity sha512-GhLxH1xwnqa5VjhJ29LfcQITNDp+f1jzmMPXQfGW9oNrF0lfjPzKvV5y/bjIQkyKpwCX3Fp+GA4dBpMMk8g+ZQ== + dependencies: + prosemirror-model "^1.20.0" + prosemirror-state "^1.0.0" + prosemirror-transform "^1.1.0" + +proto-list@~1.2.1: + version "1.2.4" + resolved "https://registry.npmmirror.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849" + integrity sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA== + +proxy-from-env@^1.1.0: + version "1.1.0" + resolved "https://registry.npmmirror.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2" + integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg== + +pseudomap@^1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" + integrity sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ== + +psl@^1.1.33: + version "1.15.0" + resolved "https://registry.npmmirror.com/psl/-/psl-1.15.0.tgz#bdace31896f1d97cec6a79e8224898ce93d974c6" + integrity sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w== + dependencies: + punycode "^2.3.1" + +pump@^3.0.0: + version "3.0.2" + resolved "https://registry.npmmirror.com/pump/-/pump-3.0.2.tgz#836f3edd6bc2ee599256c924ffe0d88573ddcbf8" + integrity sha512-tUPXtzlGM8FE3P0ZL6DVs/3P58k9nk8/jZeQCurTJylQA8qFYzHFfhBJkuqyE0FifOsQ0uKWekiZ5g8wtr28cw== + dependencies: + end-of-stream "^1.1.0" + once "^1.3.1" + +punycode.js@^2.3.1: + version "2.3.1" + resolved "https://registry.npmmirror.com/punycode.js/-/punycode.js-2.3.1.tgz#6b53e56ad75588234e79f4affa90972c7dd8cdb7" + integrity sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA== + +punycode@^2.1.0, punycode@^2.1.1, punycode@^2.3.1: + version "2.3.1" + resolved "https://registry.npmmirror.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" + integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== + +purgecss@^4.0.3: + version "4.1.3" + resolved "https://registry.npmmirror.com/purgecss/-/purgecss-4.1.3.tgz#683f6a133c8c4de7aa82fe2746d1393b214918f7" + integrity sha512-99cKy4s+VZoXnPxaoM23e5ABcP851nC2y2GROkkjS8eJaJtlciGavd7iYAw2V84WeBqggZ12l8ef44G99HmTaw== + dependencies: + commander "^8.0.0" + glob "^7.1.7" + postcss "^8.3.5" + postcss-selector-parser "^6.0.6" + +qrcode@^1.5.4: + version "1.5.4" + resolved "https://registry.npmmirror.com/qrcode/-/qrcode-1.5.4.tgz#5cb81d86eb57c675febb08cf007fff963405da88" + integrity sha512-1ca71Zgiu6ORjHqFBDpnSMTR2ReToX4l1Au1VFLyVeBTFavzQnv5JxMFr3ukHVKpSrSA2MCk0lNJSykjUfz7Zg== + dependencies: + dijkstrajs "^1.0.1" + pngjs "^5.0.0" + yargs "^15.3.1" + +quansync@^0.2.7, quansync@^0.2.8: + version "0.2.10" + resolved "https://registry.npmmirror.com/quansync/-/quansync-0.2.10.tgz#32053cf166fa36511aae95fc49796116f2dc20e1" + integrity sha512-t41VRkMYbkHyCYmOvx/6URnN80H7k4X0lLdBMGsz+maAwrJQYB1djpV6vHrQIBE0WBSGqhtEHrK9U3DWWH8v7A== + +querystringify@^2.1.1: + version "2.2.0" + resolved "https://registry.npmmirror.com/querystringify/-/querystringify-2.2.0.tgz#3345941b4153cb9d082d8eee4cda2016a9aef7f6" + integrity sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ== + +queue-microtask@^1.2.2: + version "1.2.3" + resolved "https://registry.npmmirror.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243" + integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A== + +quick-lru@^5.1.1: + version "5.1.1" + resolved "https://registry.npmmirror.com/quick-lru/-/quick-lru-5.1.1.tgz#366493e6b3e42a3a6885e2e99d18f80fb7a8c932" + integrity sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA== + +radix-vue@^1.5.3: + version "1.9.17" + resolved "https://registry.npmmirror.com/radix-vue/-/radix-vue-1.9.17.tgz#d6aec1727148e21cfb105c46a4c20bf100c8eee7" + integrity sha512-mVCu7I2vXt1L2IUYHTt0sZMz7s1K2ZtqKeTIxG3yC5mMFfLBG4FtE1FDeRMpDd+Hhg/ybi9+iXmAP1ISREndoQ== + dependencies: + "@floating-ui/dom" "^1.6.7" + "@floating-ui/vue" "^1.1.0" + "@internationalized/date" "^3.5.4" + "@internationalized/number" "^3.5.3" + "@tanstack/vue-virtual" "^3.8.1" + "@vueuse/core" "^10.11.0" + "@vueuse/shared" "^10.11.0" + aria-hidden "^1.2.4" + defu "^6.1.4" + fast-deep-equal "^3.1.3" + nanoid "^5.0.7" + +railroad-diagrams@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/railroad-diagrams/-/railroad-diagrams-1.0.0.tgz#eb7e6267548ddedfb899c1b90e57374559cddb7e" + integrity sha512-cz93DjNeLY0idrCNOH6PviZGRN9GJhsdm9hpn1YCS879fj4W+x5IFJhhkRZcwVgMmFF7R82UA/7Oh+R8lLZg6A== + +randexp@0.4.6: + version "0.4.6" + resolved "https://registry.npmmirror.com/randexp/-/randexp-0.4.6.tgz#e986ad5e5e31dae13ddd6f7b3019aa7c87f60ca3" + integrity sha512-80WNmd9DA0tmZrw9qQa62GPPWfuXJknrmVmLcxvq4uZBdYqb1wYoKTmnlGUchvVWe0XiLupYkBoXVOxz3C8DYQ== + dependencies: + discontinuous-range "1.0.0" + ret "~0.1.10" + +read-cache@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/read-cache/-/read-cache-1.0.0.tgz#e664ef31161166c9751cdbe8dbcf86b5fb58f774" + integrity sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA== + dependencies: + pify "^2.3.0" + +readable-stream@^3.4.0: + version "3.6.2" + resolved "https://registry.npmmirror.com/readable-stream/-/readable-stream-3.6.2.tgz#56a9b36ea965c00c5a93ef31eb111a0f11056967" + integrity sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA== + dependencies: + inherits "^2.0.3" + string_decoder "^1.1.1" + util-deprecate "^1.0.1" + +readdirp@~3.6.0: + version "3.6.0" + resolved "https://registry.npmmirror.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" + integrity sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA== + dependencies: + picomatch "^2.2.1" + +reduce-css-calc@^2.1.8: + version "2.1.8" + resolved "https://registry.npmmirror.com/reduce-css-calc/-/reduce-css-calc-2.1.8.tgz#7ef8761a28d614980dc0c982f772c93f7a99de03" + integrity sha512-8liAVezDmUcH+tdzoEGrhfbGcP7nOV4NkGE3a74+qqvE7nt9i4sKLGBuZNOnpI4WiGksiNPklZxva80061QiPg== + dependencies: + css-unit-converter "^1.1.1" + postcss-value-parser "^3.3.0" + +regenerate-unicode-properties@^10.2.0: + version "10.2.0" + resolved "https://registry.npmmirror.com/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz#626e39df8c372338ea9b8028d1f99dc3fd9c3db0" + integrity sha512-DqHn3DwbmmPVzeKj9woBadqmXxLvQoQIwu7nopMc72ztvxVmVk2SBhSnx67zuye5TP+lJsb/TBQsjLKhnDf3MA== + dependencies: + regenerate "^1.4.2" + +regenerate@^1.4.2: + version "1.4.2" + resolved "https://registry.npmmirror.com/regenerate/-/regenerate-1.4.2.tgz#b9346d8827e8f5a32f7ba29637d398b69014848a" + integrity sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A== + +regenerator-runtime@^0.13.11: + version "0.13.11" + resolved "https://registry.npmmirror.com/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz#f6dca3e7ceec20590d07ada785636a90cdca17f9" + integrity sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg== + +regenerator-runtime@^0.14.0: + version "0.14.1" + resolved "https://registry.npmmirror.com/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz#356ade10263f685dda125100cd862c1db895327f" + integrity sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw== + +regenerator-transform@^0.15.2: + version "0.15.2" + resolved "https://registry.npmmirror.com/regenerator-transform/-/regenerator-transform-0.15.2.tgz#5bbae58b522098ebdf09bca2f83838929001c7a4" + integrity sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg== + dependencies: + "@babel/runtime" "^7.8.4" + +regexpp@^2.0.1: + version "2.0.1" + resolved "https://registry.npmmirror.com/regexpp/-/regexpp-2.0.1.tgz#8d19d31cf632482b589049f8281f93dbcba4d07f" + integrity sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw== + +regexpu-core@^6.2.0: + version "6.2.0" + resolved "https://registry.npmmirror.com/regexpu-core/-/regexpu-core-6.2.0.tgz#0e5190d79e542bf294955dccabae04d3c7d53826" + integrity sha512-H66BPQMrv+V16t8xtmq+UC0CBpiTBA60V8ibS1QVReIp8T1z8hwFxqcGzm9K6lgsN7sB5edVH8a+ze6Fqm4weA== + dependencies: + regenerate "^1.4.2" + regenerate-unicode-properties "^10.2.0" + regjsgen "^0.8.0" + regjsparser "^0.12.0" + unicode-match-property-ecmascript "^2.0.0" + unicode-match-property-value-ecmascript "^2.1.0" + +register-service-worker@^1.6.2: + version "1.7.2" + resolved "https://registry.npmmirror.com/register-service-worker/-/register-service-worker-1.7.2.tgz#6516983e1ef790a98c4225af1216bc80941a4bd2" + integrity sha512-CiD3ZSanZqcMPRhtfct5K9f7i3OLCcBBWsJjLh1gW9RO/nS94sVzY59iS+fgYBOBqaBpf4EzfqUF3j9IG+xo8A== + +regjsgen@^0.8.0: + version "0.8.0" + resolved "https://registry.npmmirror.com/regjsgen/-/regjsgen-0.8.0.tgz#df23ff26e0c5b300a6470cad160a9d090c3a37ab" + integrity sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q== + +regjsparser@^0.12.0: + version "0.12.0" + resolved "https://registry.npmmirror.com/regjsparser/-/regjsparser-0.12.0.tgz#0e846df6c6530586429377de56e0475583b088dc" + integrity sha512-cnE+y8bz4NhMjISKbgeVJtqNbtf5QpjZP+Bslo+UqkIt9QPnX9q095eiRRASJG1/tz6dlNr6Z5NsBiWYokp6EQ== + dependencies: + jsesc "~3.0.2" + +require-directory@^2.1.1: + version "2.1.1" + resolved "https://registry.npmmirror.com/require-directory/-/require-directory-2.1.1.tgz#8c64ad5fd30dab1c976e2344ffe7f792a6a6df42" + integrity sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q== + +require-main-filename@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/require-main-filename/-/require-main-filename-2.0.0.tgz#d0b329ecc7cc0f61649f62215be69af54aa8989b" + integrity sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg== + +requires-port@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/requires-port/-/requires-port-1.0.0.tgz#925d2601d39ac485e091cf0da5c6e694dc3dcaff" + integrity sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ== + +resize-detector@^0.3.0: + version "0.3.0" + resolved "https://registry.npmmirror.com/resize-detector/-/resize-detector-0.3.0.tgz#fe495112e184695500a8f51e0389f15774cb1cfc" + integrity sha512-R/tCuvuOHQ8o2boRP6vgx8hXCCy87H1eY9V5imBYeVNyNVpuL9ciReSccLj2gDcax9+2weXy3bc8Vv+NRXeEvQ== + +resolve-from@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/resolve-from/-/resolve-from-3.0.0.tgz#b22c7af7d9d6881bc8b6e653335eebcb0a188748" + integrity sha512-GnlH6vxLymXJNMBo7XP1fJIzBFbdYt49CuTwmB/6N53t+kMPRMFKz783LlQ4tv28XoQfMWinAJX6WCGf2IlaIw== + +resolve-from@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" + integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== + +resolve@^1.1.7, resolve@^1.12.0, resolve@^1.14.2, resolve@^1.20.0, resolve@^1.22.0, resolve@^1.22.2, resolve@^1.22.8: + version "1.22.10" + resolved "https://registry.npmmirror.com/resolve/-/resolve-1.22.10.tgz#b663e83ffb09bbf2386944736baae803029b8b39" + integrity sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w== + dependencies: + is-core-module "^2.16.0" + path-parse "^1.0.7" + supports-preserve-symlinks-flag "^1.0.0" + +restore-cursor@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/restore-cursor/-/restore-cursor-2.0.0.tgz#9f7ee287f82fd326d4fd162923d62129eee0dfaf" + integrity sha512-6IzJLuGi4+R14vwagDHX+JrXmPVtPpn4mffDJ1UdR7/Edm87fl6yi8mMBIVvFtJaNTUvjughmW4hwLhRG7gC1Q== + dependencies: + onetime "^2.0.0" + signal-exit "^3.0.2" + +restore-cursor@^3.1.0: + version "3.1.0" + resolved "https://registry.npmmirror.com/restore-cursor/-/restore-cursor-3.1.0.tgz#39f67c54b3a7a58cea5236d95cf0034239631f7e" + integrity sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA== + dependencies: + onetime "^5.1.0" + signal-exit "^3.0.2" + +ret@~0.1.10: + version "0.1.15" + resolved "https://registry.npmmirror.com/ret/-/ret-0.1.15.tgz#b8a4825d5bdb1fc3f6f53c2bc33f81388681c7bc" + integrity sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg== + +reusify@^1.0.4: + version "1.1.0" + resolved "https://registry.npmmirror.com/reusify/-/reusify-1.1.0.tgz#0fe13b9522e1473f51b558ee796e08f11f9b489f" + integrity sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw== + +rgb-regex@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/rgb-regex/-/rgb-regex-1.0.1.tgz#c0e0d6882df0e23be254a475e8edd41915feaeb1" + integrity sha512-gDK5mkALDFER2YLqH6imYvK6g02gpNGM4ILDZ472EwWfXZnC2ZEpoB2ECXTyOVUKuk/bPJZMzwQPBYICzP+D3w== + +rgba-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/rgba-regex/-/rgba-regex-1.0.0.tgz#43374e2e2ca0968b0ef1523460b7d730ff22eeb3" + integrity sha512-zgn5OjNQXLUTdq8m17KdaicF6w89TZs8ZU8y0AYENIU6wG8GG6LLm0yLSiPY8DmaYmHdgRW8rnApjoT0fQRfMg== + +rimraf@2.6.3: + version "2.6.3" + resolved "https://registry.npmmirror.com/rimraf/-/rimraf-2.6.3.tgz#b2d104fe0d8fb27cf9e0a1cda8262dd3833c6cab" + integrity sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA== + dependencies: + glob "^7.1.3" + +rimraf@^3.0.0, rimraf@^3.0.2: + version "3.0.2" + resolved "https://registry.npmmirror.com/rimraf/-/rimraf-3.0.2.tgz#f1a5402ba6220ad52cc1282bac1ae3aa49fd061a" + integrity sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA== + dependencies: + glob "^7.1.3" + +"rollup@>=2.59.0 <2.78.0": + version "2.77.3" + resolved "https://registry.npmmirror.com/rollup/-/rollup-2.77.3.tgz#8f00418d3a2740036e15deb653bed1a90ee0cc12" + integrity sha512-/qxNTG7FbmefJWoeeYJFbHehJ2HNWnjkAFRKzWN/45eNBBF/r8lo992CwcJXEzyVxs5FmfId+vTSTQDb+bxA+g== + optionalDependencies: + fsevents "~2.3.2" + +rollup@^4.2.0: + version "4.39.0" + resolved "https://registry.npmmirror.com/rollup/-/rollup-4.39.0.tgz#9dc1013b70c0e2cb70ef28350142e9b81b3f640c" + integrity sha512-thI8kNc02yNvnmJp8dr3fNWJ9tCONDhp6TV35X6HkKGGs9E6q7YWCHbe5vKiTa7TAiNcFEmXKj3X/pG2b3ci0g== + dependencies: + "@types/estree" "1.0.7" + optionalDependencies: + "@rollup/rollup-android-arm-eabi" "4.39.0" + "@rollup/rollup-android-arm64" "4.39.0" + "@rollup/rollup-darwin-arm64" "4.39.0" + "@rollup/rollup-darwin-x64" "4.39.0" + "@rollup/rollup-freebsd-arm64" "4.39.0" + "@rollup/rollup-freebsd-x64" "4.39.0" + "@rollup/rollup-linux-arm-gnueabihf" "4.39.0" + "@rollup/rollup-linux-arm-musleabihf" "4.39.0" + "@rollup/rollup-linux-arm64-gnu" "4.39.0" + "@rollup/rollup-linux-arm64-musl" "4.39.0" + "@rollup/rollup-linux-loongarch64-gnu" "4.39.0" + "@rollup/rollup-linux-powerpc64le-gnu" "4.39.0" + "@rollup/rollup-linux-riscv64-gnu" "4.39.0" + "@rollup/rollup-linux-riscv64-musl" "4.39.0" + "@rollup/rollup-linux-s390x-gnu" "4.39.0" + "@rollup/rollup-linux-x64-gnu" "4.39.0" + "@rollup/rollup-linux-x64-musl" "4.39.0" + "@rollup/rollup-win32-arm64-msvc" "4.39.0" + "@rollup/rollup-win32-ia32-msvc" "4.39.0" + "@rollup/rollup-win32-x64-msvc" "4.39.0" + fsevents "~2.3.2" + +rope-sequence@^1.3.0: + version "1.3.4" + resolved "https://registry.npmmirror.com/rope-sequence/-/rope-sequence-1.3.4.tgz#df85711aaecd32f1e756f76e43a415171235d425" + integrity sha512-UT5EDe2cu2E/6O4igUr5PSFs23nvvukicWHx6GnOPlHAiiYbzNuCRQCuiUdHJQcqKalLKlrYJnjY0ySGsXNQXQ== + +run-async@^2.4.0: + version "2.4.1" + resolved "https://registry.npmmirror.com/run-async/-/run-async-2.4.1.tgz#8440eccf99ea3e70bd409d49aab88e10c189a455" + integrity sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ== + +run-parallel@^1.1.9: + version "1.2.0" + resolved "https://registry.npmmirror.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee" + integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA== + dependencies: + queue-microtask "^1.2.2" + +rxjs@^6.3.3, rxjs@^6.6.0: + version "6.6.7" + resolved "https://registry.npmmirror.com/rxjs/-/rxjs-6.6.7.tgz#90ac018acabf491bf65044235d5863c4dab804c9" + integrity sha512-hTdwr+7yYNIT5n4AMYp85KA6yw2Va0FLa3Rguvbpa4W3I5xynaBZo41cM3XM+4Q6fRMj3sBYIR1VAmZMXYJvRQ== + dependencies: + tslib "^1.9.0" + +rxjs@^7.2.0, rxjs@^7.5.5: + version "7.8.2" + resolved "https://registry.npmmirror.com/rxjs/-/rxjs-7.8.2.tgz#955bc473ed8af11a002a2be52071bf475638607b" + integrity sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA== + dependencies: + tslib "^2.1.0" + +safe-buffer@~5.2.0: + version "5.2.1" + resolved "https://registry.npmmirror.com/safe-buffer/-/safe-buffer-5.2.1.tgz#1eaf9fa9bdb1fdd4ec75f58f9cdb4e6b7827eec6" + integrity sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ== + +"safer-buffer@>= 2.1.2 < 3", "safer-buffer@>= 2.1.2 < 3.0.0": + version "2.1.2" + resolved "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" + integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== + +saxes@^5.0.1: + version "5.0.1" + resolved "https://registry.npmmirror.com/saxes/-/saxes-5.0.1.tgz#eebab953fa3b7608dbe94e5dadb15c888fa6696d" + integrity sha512-5LBh1Tls8c9xgGjw3QrMwETmTMVk0oFgvrFSvWx62llR2hcEInrKNZ2GZCCuuy2lvWrdl5jhbpeqc5hRYKFOcw== + dependencies: + xmlchars "^2.2.0" + +semver-compare@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/semver-compare/-/semver-compare-1.0.0.tgz#0dee216a1c941ab37e9efb1788f6afc5ff5537fc" + integrity sha512-YM3/ITh2MJ5MtzaM429anh+x2jiLVjqILF4m4oyQB18W7Ggea7BfqdH/wGMK7dDiMghv/6WG7znWMwUDzJiXow== + +semver@^5.5.0, semver@^5.6.0: + version "5.7.2" + resolved "https://registry.npmmirror.com/semver/-/semver-5.7.2.tgz#48d55db737c3287cd4835e17fa13feace1c41ef8" + integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g== + +semver@^6.1.2, semver@^6.3.0, semver@^6.3.1: + version "6.3.1" + resolved "https://registry.npmmirror.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" + integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== + +semver@^7.5.3: + version "7.7.1" + resolved "https://registry.npmmirror.com/semver/-/semver-7.7.1.tgz#abd5098d82b18c6c81f6074ff2647fd3e7220c9f" + integrity sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA== + +set-blocking@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/set-blocking/-/set-blocking-2.0.0.tgz#045f9782d011ae9a6803ddd382b24392b3d890f7" + integrity sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw== + +set-cookie-parser@^2.4.6: + version "2.7.1" + resolved "https://registry.npmmirror.com/set-cookie-parser/-/set-cookie-parser-2.7.1.tgz#3016f150072202dfbe90fadee053573cc89d2943" + integrity sha512-IOc8uWeOZgnb3ptbCURJWNjWUPcO3ZnTTdzsurqERrP6nPyv+paC55vJM0LpOlT2ne+Ix+9+CRG1MNLlyZ4GjQ== + +shebang-command@^1.2.0: + version "1.2.0" + resolved "https://registry.npmmirror.com/shebang-command/-/shebang-command-1.2.0.tgz#44aac65b695b03398968c39f363fee5deafdf1ea" + integrity sha512-EV3L1+UQWGor21OmnvojK36mhg+TyIKDh3iFBKBohr5xeXIhNBcx8oWdgkTEEQ+BEFFYdLRuqMfd5L84N1V5Vg== + dependencies: + shebang-regex "^1.0.0" + +shebang-command@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" + integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== + dependencies: + shebang-regex "^3.0.0" + +shebang-regex@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/shebang-regex/-/shebang-regex-1.0.0.tgz#da42f49740c0b42db2ca9728571cb190c98efea3" + integrity sha512-wpoSFAxys6b2a2wHZ1XpDSgD7N9iVjg29Ph9uV/uaP9Ex/KXlkTZTeddxDPSYQpgvzKLGJke2UU0AzoGCjNIvQ== + +shebang-regex@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" + integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== + +showdown@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/showdown/-/showdown-2.1.0.tgz#1251f5ed8f773f0c0c7bfc8e6fd23581f9e545c5" + integrity sha512-/6NVYu4U819R2pUIk79n67SYgJHWCce0a5xTP979WbNp0FL9MN1I1QK662IDU1b6JzKTvmhgI7T7JYIxBi3kMQ== + dependencies: + commander "^9.0.0" + +signal-exit@^3.0.0, signal-exit@^3.0.2, signal-exit@^3.0.3: + version "3.0.7" + resolved "https://registry.npmmirror.com/signal-exit/-/signal-exit-3.0.7.tgz#a9a1767f8af84155114eaabd73f99273c8f59ad9" + integrity sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ== + +signal-exit@^4.0.1: + version "4.1.0" + resolved "https://registry.npmmirror.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04" + integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw== + +simple-swizzle@^0.2.2: + version "0.2.2" + resolved "https://registry.npmmirror.com/simple-swizzle/-/simple-swizzle-0.2.2.tgz#a4da6b635ffcccca33f70d17cb92592de95e557a" + integrity sha512-JA//kQgZtbuY83m+xT+tXJkmJncGMTFT+C+g2h2R9uxkYIrE2yy9sgmcLhCnw57/WSD+Eh3J97FPEDFnbXnDUg== + dependencies: + is-arrayish "^0.3.1" + +slash@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" + integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== + +slice-ansi@0.0.4: + version "0.0.4" + resolved "https://registry.npmmirror.com/slice-ansi/-/slice-ansi-0.0.4.tgz#edbf8903f66f7ce2f8eafd6ceed65e264c831b35" + integrity sha512-up04hB2hR92PgjpyU3y/eg91yIBILyjVY26NvvciY3EVVPjybkMszMpXQ9QAkcS3I5rtJBDLoTxxg+qvW8c7rw== + +slice-ansi@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/slice-ansi/-/slice-ansi-2.1.0.tgz#cacd7693461a637a5788d92a7dd4fba068e81636" + integrity sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ== + dependencies: + ansi-styles "^3.2.0" + astral-regex "^1.0.0" + is-fullwidth-code-point "^2.0.0" + +socket.io-client@^4.5.1: + version "4.8.1" + resolved "https://registry.npmmirror.com/socket.io-client/-/socket.io-client-4.8.1.tgz#1941eca135a5490b94281d0323fe2a35f6f291cb" + integrity sha512-hJVXfu3E28NmzGk8o1sHhN3om52tRvwYeidbj7xKy2eIIse5IoKX3USlS6Tqt3BHAtflLIkCQBkzVrEEfWUyYQ== + dependencies: + "@socket.io/component-emitter" "~3.1.0" + debug "~4.3.2" + engine.io-client "~6.6.1" + socket.io-parser "~4.2.4" + +socket.io-parser@~4.2.4: + version "4.2.4" + resolved "https://registry.npmmirror.com/socket.io-parser/-/socket.io-parser-4.2.4.tgz#c806966cf7270601e47469ddeec30fbdfda44c83" + integrity sha512-/GbIKmo8ioc+NIWIhwdecY0ge+qVBSMdgxGygevmdHj24bsfgtCmcUUcQ5ZzcylGFHsN3k4HB4Cgkl96KVnuew== + dependencies: + "@socket.io/component-emitter" "~3.1.0" + debug "~4.3.1" + +source-map-js@^1.2.0, source-map-js@^1.2.1: + version "1.2.1" + resolved "https://registry.npmmirror.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" + integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== + +source-map@^0.6.1, source-map@~0.6.1: + version "0.6.1" + resolved "https://registry.npmmirror.com/source-map/-/source-map-0.6.1.tgz#74722af32e9614e9c287a8d0bbde48b5e2f1a263" + integrity sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g== + +sprintf-js@~1.0.2: + version "1.0.3" + resolved "https://registry.npmmirror.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c" + integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g== + +sql-formatter@^15.4.10: + version "15.5.2" + resolved "https://registry.npmmirror.com/sql-formatter/-/sql-formatter-15.5.2.tgz#26f219e55c25fc926873d5b675be1766d21fcb06" + integrity sha512-+9xZgiv1DP/c7GxkkBUHRZOf4j35gquVdwEm0rg16qKRYeFkv1+/vEeO13fsUbbz06KUotIyASJ+hyau8LM8Kg== + dependencies: + argparse "^2.0.1" + nearley "^2.20.1" + +statuses@^2.0.0: + version "2.0.1" + resolved "https://registry.npmmirror.com/statuses/-/statuses-2.0.1.tgz#55cb000ccf1d48728bd23c685a063998cf1a1b63" + integrity sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ== + +strict-event-emitter@^0.2.0: + version "0.2.8" + resolved "https://registry.npmmirror.com/strict-event-emitter/-/strict-event-emitter-0.2.8.tgz#b4e768927c67273c14c13d20e19d5e6c934b47ca" + integrity sha512-KDf/ujU8Zud3YaLtMCcTI4xkZlZVIYxTLr+XIULexP+77EEVWixeXroLUXQXiVtH4XH2W7jr/3PT1v3zBuvc3A== + dependencies: + events "^3.3.0" + +string-argv@^0.3.0: + version "0.3.2" + resolved "https://registry.npmmirror.com/string-argv/-/string-argv-0.3.2.tgz#2b6d0ef24b656274d957d54e0a4bbf6153dc02b6" + integrity sha512-aqD2Q0144Z+/RqG52NeHEkZauTAUWJO8c6yTftGJKO3Tja5tUgIfmIl6kExvhtxSDP7fXB6DvzkfMpCd/F3G+Q== + +"string-width-cjs@npm:string-width@^4.2.0": + version "4.2.3" + resolved "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + +string-width@^1.0.1: + version "1.0.2" + resolved "https://registry.npmmirror.com/string-width/-/string-width-1.0.2.tgz#118bdf5b8cdc51a2a7e70d211e07e2b0b9b107d3" + integrity sha512-0XsVpQLnVCXHJfyEs8tC0zpTVIr5PKKsQtkT29IwupnPTjtPmQ3xT/4yCREF9hYkV/3M3kzcUTSAZT6a6h81tw== + dependencies: + code-point-at "^1.0.0" + is-fullwidth-code-point "^1.0.0" + strip-ansi "^3.0.0" + +string-width@^2.1.1: + version "2.1.1" + resolved "https://registry.npmmirror.com/string-width/-/string-width-2.1.1.tgz#ab93f27a8dc13d28cac815c462143a6d9012ae9e" + integrity sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw== + dependencies: + is-fullwidth-code-point "^2.0.0" + strip-ansi "^4.0.0" + +string-width@^3.0.0: + version "3.1.0" + resolved "https://registry.npmmirror.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" + integrity sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w== + dependencies: + emoji-regex "^7.0.1" + is-fullwidth-code-point "^2.0.0" + strip-ansi "^5.1.0" + +string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.npmmirror.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + +string-width@^5.0.1, string-width@^5.1.2: + version "5.1.2" + resolved "https://registry.npmmirror.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" + integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA== + dependencies: + eastasianwidth "^0.2.0" + emoji-regex "^9.2.2" + strip-ansi "^7.0.1" + +string_decoder@^1.1.1: + version "1.3.0" + resolved "https://registry.npmmirror.com/string_decoder/-/string_decoder-1.3.0.tgz#42f114594a46cf1a8e30b0a84f56c78c3edac21e" + integrity sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA== + dependencies: + safe-buffer "~5.2.0" + +stringify-object@^3.3.0: + version "3.3.0" + resolved "https://registry.npmmirror.com/stringify-object/-/stringify-object-3.3.0.tgz#703065aefca19300d3ce88af4f5b3956d7556629" + integrity sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw== + dependencies: + get-own-enumerable-property-symbols "^3.0.0" + is-obj "^1.0.1" + is-regexp "^1.0.0" + +"strip-ansi-cjs@npm:strip-ansi@^6.0.1": + version "6.0.1" + resolved "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +strip-ansi@^3.0.0, strip-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-3.0.1.tgz#6a385fb8853d952d5ff05d0e8aaf94278dc63dcf" + integrity sha512-VhumSSbBqDTP8p2ZLKj40UjBCV4+v8bUSEpUb4KjRgWk9pbqGF4REFj6KEagidb2f/M6AzC0EmFyDNGaw9OCzg== + dependencies: + ansi-regex "^2.0.0" + +strip-ansi@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-4.0.0.tgz#a8479022eb1ac368a871389b635262c505ee368f" + integrity sha512-4XaJ2zQdCzROZDivEVIDPkcQn8LMFSa8kj8Gxb/Lnwzv9A8VctNZ+lfivC/sV3ivW8ElJTERXZoPBRrZKkNKow== + dependencies: + ansi-regex "^3.0.0" + +strip-ansi@^5.1.0, strip-ansi@^5.2.0: + version "5.2.0" + resolved "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae" + integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA== + dependencies: + ansi-regex "^4.1.0" + +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + +strip-ansi@^7.0.1: + version "7.1.0" + resolved "https://registry.npmmirror.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" + integrity sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ== + dependencies: + ansi-regex "^6.0.1" + +strip-eof@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/strip-eof/-/strip-eof-1.0.0.tgz#bb43ff5598a6eb05d89b59fcd129c983313606bf" + integrity sha512-7FCwGGmx8mD5xQd3RPUvnSpUXHM3BWuzjtpD4TXsfcZ9EL4azvVVUscFYwD9nx8Kh+uCBC00XBtAykoMHwTh8Q== + +strip-final-newline@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" + integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== + +strip-indent@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/strip-indent/-/strip-indent-2.0.0.tgz#5ef8db295d01e6ed6cbf7aab96998d7822527b68" + integrity sha512-RsSNPLpq6YUL7QYy44RnPVTn/lcVZtb48Uof3X5JLbF4zD/Gs7ZFDv2HWol+leoQN2mT86LAzSshGfkTlSOpsA== + +strip-json-comments@^3.0.1: + version "3.1.1" + resolved "https://registry.npmmirror.com/strip-json-comments/-/strip-json-comments-3.1.1.tgz#31f1281b3832630434831c310c01cccda8cbe006" + integrity sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig== + +style-mod@^4.0.0, style-mod@^4.1.0: + version "4.1.2" + resolved "https://registry.npmmirror.com/style-mod/-/style-mod-4.1.2.tgz#ca238a1ad4786520f7515a8539d5a63691d7bf67" + integrity sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw== + +sucrase@^3.35.0: + version "3.35.0" + resolved "https://registry.npmmirror.com/sucrase/-/sucrase-3.35.0.tgz#57f17a3d7e19b36d8995f06679d121be914ae263" + integrity sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA== + dependencies: + "@jridgewell/gen-mapping" "^0.3.2" + commander "^4.0.0" + glob "^10.3.10" + lines-and-columns "^1.1.6" + mz "^2.7.0" + pirates "^4.0.1" + ts-interface-checker "^0.1.9" + +supports-color@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/supports-color/-/supports-color-2.0.0.tgz#535d045ce6b6363fa40117084629995e9df324c7" + integrity sha512-KKNVtd6pCYgPIKU4cp2733HWYCpplQhddZLBUryaAHou723x+FRzQ5Df824Fj+IyyuiQTRoub4SnIFfIcrp70g== + +supports-color@^5.3.0, supports-color@^5.4.0: + version "5.5.0" + resolved "https://registry.npmmirror.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" + integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== + dependencies: + has-flag "^3.0.0" + +supports-color@^7.1.0: + version "7.2.0" + resolved "https://registry.npmmirror.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" + integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== + dependencies: + has-flag "^4.0.0" + +supports-preserve-symlinks-flag@^1.0.0: + version "1.0.0" + resolved "https://registry.npmmirror.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09" + integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w== + +symbol-observable@^1.1.0: + version "1.2.0" + resolved "https://registry.npmmirror.com/symbol-observable/-/symbol-observable-1.2.0.tgz#c22688aed4eab3cdc2dfeacbb561660560a00804" + integrity sha512-e900nM8RRtGhlV36KGEU9k65K3mPb1WV70OdjfxlG2EAuM1noi/E/BaW/uMhL7bPEssK8QV57vN3esixjUvcXQ== + +symbol-tree@^3.2.4: + version "3.2.4" + resolved "https://registry.npmmirror.com/symbol-tree/-/symbol-tree-3.2.4.tgz#430637d248ba77e078883951fb9aa0eed7c63fa2" + integrity sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw== + +systemjs@^6.14.1: + version "6.15.1" + resolved "https://registry.npmmirror.com/systemjs/-/systemjs-6.15.1.tgz#74175b6810e27a79e1177d21db5f0e3057118cea" + integrity sha512-Nk8c4lXvMB98MtbmjX7JwJRgJOL8fluecYCfCeYBznwmpOs8Bf15hLM6z4z71EDAhQVrQrI+wt1aLWSXZq+hXA== + +table@^5.2.3: + version "5.4.6" + resolved "https://registry.npmmirror.com/table/-/table-5.4.6.tgz#1292d19500ce3f86053b05f0e8e7e4a3bb21079e" + integrity sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug== + dependencies: + ajv "^6.10.2" + lodash "^4.17.14" + slice-ansi "^2.1.0" + string-width "^3.0.0" + +tailwindcss@^3.2: + version "3.4.17" + resolved "https://registry.npmmirror.com/tailwindcss/-/tailwindcss-3.4.17.tgz#ae8406c0f96696a631c790768ff319d46d5e5a63" + integrity sha512-w33E2aCvSDP0tW9RZuNXadXlkHXqFzSkQew/aIa2i/Sj8fThxwovwlXHSPXTbAHwEIhBFXAedUhP2tueAKP8Og== + dependencies: + "@alloc/quick-lru" "^5.2.0" + arg "^5.0.2" + chokidar "^3.6.0" + didyoumean "^1.2.2" + dlv "^1.1.3" + fast-glob "^3.3.2" + glob-parent "^6.0.2" + is-glob "^4.0.3" + jiti "^1.21.6" + lilconfig "^3.1.3" + micromatch "^4.0.8" + normalize-path "^3.0.0" + object-hash "^3.0.0" + picocolors "^1.1.1" + postcss "^8.4.47" + postcss-import "^15.1.0" + postcss-js "^4.0.1" + postcss-load-config "^4.0.2" + postcss-nested "^6.2.0" + postcss-selector-parser "^6.1.2" + resolve "^1.22.8" + sucrase "^3.35.0" + +test-exclude@^6.0.0: + version "6.0.0" + resolved "https://registry.npmmirror.com/test-exclude/-/test-exclude-6.0.0.tgz#04a8698661d805ea6fa293b6cb9e63ac044ef15e" + integrity sha512-cAGWPIyOHU6zlmg88jwm7VRyXnMN7iV68OGAbYDk/Mh/xC/pzVPlQtY6ngoIH/5/tciuhGfvESU8GrHrcxD56w== + dependencies: + "@istanbuljs/schema" "^0.1.2" + glob "^7.1.4" + minimatch "^3.0.4" + +text-table@^0.2.0: + version "0.2.0" + resolved "https://registry.npmmirror.com/text-table/-/text-table-0.2.0.tgz#7f5ee823ae805207c00af2df4a84ec3fcfa570b4" + integrity sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw== + +thenify-all@^1.0.0: + version "1.6.0" + resolved "https://registry.npmmirror.com/thenify-all/-/thenify-all-1.6.0.tgz#1a1918d402d8fc3f98fbf234db0bcc8cc10e9726" + integrity sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA== + dependencies: + thenify ">= 3.1.0 < 4" + +"thenify@>= 3.1.0 < 4": + version "3.3.1" + resolved "https://registry.npmmirror.com/thenify/-/thenify-3.3.1.tgz#8932e686a4066038a016dd9e2ca46add9838a95f" + integrity sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw== + dependencies: + any-promise "^1.0.0" + +through@^2.3.6: + version "2.3.8" + resolved "https://registry.npmmirror.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" + integrity sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg== + +tinyexec@^0.3.2: + version "0.3.2" + resolved "https://registry.npmmirror.com/tinyexec/-/tinyexec-0.3.2.tgz#941794e657a85e496577995c6eef66f53f42b3d2" + integrity sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA== + +tinypool@^0.1.2: + version "0.1.3" + resolved "https://registry.npmmirror.com/tinypool/-/tinypool-0.1.3.tgz#b5570b364a1775fd403de5e7660b325308fee26b" + integrity sha512-2IfcQh7CP46XGWGGbdyO4pjcKqsmVqFAPcXfPxcPXmOWt9cYkTP9HcDmGgsfijYoAEc4z9qcpM/BaBz46Y9/CQ== + +tinyspy@^0.3.2: + version "0.3.3" + resolved "https://registry.npmmirror.com/tinyspy/-/tinyspy-0.3.3.tgz#8b57f8aec7fe1bf583a3a49cb9ab30c742f69237" + integrity sha512-gRiUR8fuhUf0W9lzojPf1N1euJYA30ISebSfgca8z76FOvXtVXqd5ojEIaKLWbDQhAaC3ibxZIjqbyi4ybjcTw== + +tippy.js@^6.3.7: + version "6.3.7" + resolved "https://registry.npmmirror.com/tippy.js/-/tippy.js-6.3.7.tgz#8ccfb651d642010ed9a32ff29b0e9e19c5b8c61c" + integrity sha512-E1d3oP2emgJ9dRQZdf3Kkn0qJgI6ZLpyS5z6ZkY1DF3kaQaBsGZsndEpHwx+eC+tYM41HaSNvNtLx8tU57FzTQ== + dependencies: + "@popperjs/core" "^2.9.0" + +tmp@^0.0.33: + version "0.0.33" + resolved "https://registry.npmmirror.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" + integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== + dependencies: + os-tmpdir "~1.0.2" + +tmp@^0.2.1: + version "0.2.3" + resolved "https://registry.npmmirror.com/tmp/-/tmp-0.2.3.tgz#eb783cc22bc1e8bebd0671476d46ea4eb32a79ae" + integrity sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w== + +to-regex-range@^5.0.1: + version "5.0.1" + resolved "https://registry.npmmirror.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" + integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== + dependencies: + is-number "^7.0.0" + +tough-cookie@^4.0.0: + version "4.1.4" + resolved "https://registry.npmmirror.com/tough-cookie/-/tough-cookie-4.1.4.tgz#945f1461b45b5a8c76821c33ea49c3ac192c1b36" + integrity sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag== + dependencies: + psl "^1.1.33" + punycode "^2.1.1" + universalify "^0.2.0" + url-parse "^1.5.3" + +tr46@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/tr46/-/tr46-3.0.0.tgz#555c4e297a950617e8eeddef633c87d4d9d6cbf9" + integrity sha512-l7FvfAHlcmulp8kr+flpQZmVwtu7nfRV7NZujtN0OqES8EL4O4e0qqzL0DC5gAvx/ZC/9lk6rhcUwYvkBnBnYA== + dependencies: + punycode "^2.1.1" + +tr46@~0.0.3: + version "0.0.3" + resolved "https://registry.npmmirror.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" + integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== + +ts-interface-checker@^0.1.9: + version "0.1.13" + resolved "https://registry.npmmirror.com/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz#784fd3d679722bc103b1b4b8030bcddb5db2a699" + integrity sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA== + +tslib@2.3.0: + version "2.3.0" + resolved "https://registry.npmmirror.com/tslib/-/tslib-2.3.0.tgz#803b8cdab3e12ba581a4ca41c8839bbb0dacb09e" + integrity sha512-N82ooyxVNm6h1riLCoyS9e3fuJ3AMG2zIZs2Gd1ATcSFjSA23Q0fzjjZeh0jbJvWVDZ0cJT8yaNNaaXHzueNjg== + +tslib@^1.9.0: + version "1.14.1" + resolved "https://registry.npmmirror.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" + integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== + +tslib@^2.0.0, tslib@^2.1.0, tslib@^2.6.2, tslib@^2.8.0: + version "2.8.1" + resolved "https://registry.npmmirror.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f" + integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== + +type-check@~0.3.2: + version "0.3.2" + resolved "https://registry.npmmirror.com/type-check/-/type-check-0.3.2.tgz#5884cab512cf1d355e3fb784f30804b2b520db72" + integrity sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg== + dependencies: + prelude-ls "~1.1.2" + +type-detect@^4.0.0, type-detect@^4.1.0: + version "4.1.0" + resolved "https://registry.npmmirror.com/type-detect/-/type-detect-4.1.0.tgz#deb2453e8f08dcae7ae98c626b13dddb0155906c" + integrity sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw== + +type-fest@^0.21.3: + version "0.21.3" + resolved "https://registry.npmmirror.com/type-fest/-/type-fest-0.21.3.tgz#d260a24b0198436e133fa26a524a6d65fa3b2e37" + integrity sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w== + +type-fest@^0.8.1: + version "0.8.1" + resolved "https://registry.npmmirror.com/type-fest/-/type-fest-0.8.1.tgz#09e249ebde851d3b1e48d27c105444667f17b83d" + integrity sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA== + +type-fest@^1.2.2: + version "1.4.0" + resolved "https://registry.npmmirror.com/type-fest/-/type-fest-1.4.0.tgz#e9fb813fe3bf1744ec359d55d1affefa76f14be1" + integrity sha512-yGSza74xk0UG8k+pLh5oeoYirvIiWo5t0/o3zHHAO2tRDiZcxWP7fywNlXhqb6/r6sWvwi+RsyQMWhVLe4BVuA== + +typescript@^5.0.2, typescript@^5.4.3: + version "5.8.3" + resolved "https://registry.npmmirror.com/typescript/-/typescript-5.8.3.tgz#92f8a3e5e3cf497356f4178c34cd65a7f5e8440e" + integrity sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ== + +uc.micro@^1.0.1, uc.micro@^1.0.5: + version "1.0.6" + resolved "https://registry.npmmirror.com/uc.micro/-/uc.micro-1.0.6.tgz#9c411a802a409a91fc6cf74081baba34b24499ac" + integrity sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA== + +uc.micro@^2.0.0, uc.micro@^2.1.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/uc.micro/-/uc.micro-2.1.0.tgz#f8d3f7d0ec4c3dea35a7e3c8efa4cb8b45c9e7ee" + integrity sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A== + +ufo@^1.5.4: + version "1.6.1" + resolved "https://registry.npmmirror.com/ufo/-/ufo-1.6.1.tgz#ac2db1d54614d1b22c1d603e3aef44a85d8f146b" + integrity sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA== + +undici-types@~6.21.0: + version "6.21.0" + resolved "https://registry.npmmirror.com/undici-types/-/undici-types-6.21.0.tgz#691d00af3909be93a7faa13be61b3a5b50ef12cb" + integrity sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ== + +unicode-canonical-property-names-ecmascript@^2.0.0: + version "2.0.1" + resolved "https://registry.npmmirror.com/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz#cb3173fe47ca743e228216e4a3ddc4c84d628cc2" + integrity sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg== + +unicode-match-property-ecmascript@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz#54fd16e0ecb167cf04cf1f756bdcc92eba7976c3" + integrity sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q== + dependencies: + unicode-canonical-property-names-ecmascript "^2.0.0" + unicode-property-aliases-ecmascript "^2.0.0" + +unicode-match-property-value-ecmascript@^2.1.0: + version "2.2.0" + resolved "https://registry.npmmirror.com/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.0.tgz#a0401aee72714598f739b68b104e4fe3a0cb3c71" + integrity sha512-4IehN3V/+kkr5YeSSDDQG8QLqO26XpL2XP3GQtqwlT/QYSECAwFztxVHjlbh0+gjJ3XmNLS0zDsbgs9jWKExLg== + +unicode-property-aliases-ecmascript@^2.0.0: + version "2.1.0" + resolved "https://registry.npmmirror.com/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz#43d41e3be698bd493ef911077c9b131f827e8ccd" + integrity sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w== + +universalify@^0.2.0: + version "0.2.0" + resolved "https://registry.npmmirror.com/universalify/-/universalify-0.2.0.tgz#6451760566fa857534745ab1dde952d1b1761be0" + integrity sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg== + +universalify@^2.0.0: + version "2.0.1" + resolved "https://registry.npmmirror.com/universalify/-/universalify-2.0.1.tgz#168efc2180964e6386d061e094df61afe239b18d" + integrity sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw== + +unplugin-icons@^0.17.0: + version "0.17.4" + resolved "https://registry.npmmirror.com/unplugin-icons/-/unplugin-icons-0.17.4.tgz#a05268d44a22703876f953c26107af3cfa04a936" + integrity sha512-PHLxjBx3ZV8RUBvfMafFl8uWH88jHeZgOijcRpkwgne7y2Ovx7WI0Ltzzw3fjZQ7dGaDhB8udyKVdm9N2S6BIw== + dependencies: + "@antfu/install-pkg" "^0.1.1" + "@antfu/utils" "^0.7.6" + "@iconify/utils" "^2.1.11" + debug "^4.3.4" + kolorist "^1.8.0" + local-pkg "^0.5.0" + unplugin "^1.5.0" + +unplugin-vue-components@^0.25.2: + version "0.25.2" + resolved "https://registry.npmmirror.com/unplugin-vue-components/-/unplugin-vue-components-0.25.2.tgz#99d9d02a4066a24e720edbe74a82a4ee6ff86153" + integrity sha512-OVmLFqILH6w+eM8fyt/d/eoJT9A6WO51NZLf1vC5c1FZ4rmq2bbGxTy8WP2Jm7xwFdukaIdv819+UI7RClPyCA== + dependencies: + "@antfu/utils" "^0.7.5" + "@rollup/pluginutils" "^5.0.2" + chokidar "^3.5.3" + debug "^4.3.4" + fast-glob "^3.3.0" + local-pkg "^0.4.3" + magic-string "^0.30.1" + minimatch "^9.0.3" + resolve "^1.22.2" + unplugin "^1.4.0" + +unplugin@1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/unplugin/-/unplugin-1.0.1.tgz#83b528b981cdcea1cad422a12cd02e695195ef3f" + integrity sha512-aqrHaVBWW1JVKBHmGo33T5TxeL0qWzfvjWokObHA9bYmN7eNDkwOxmLjhioHl9878qDFMAaT51XNroRyuz7WxA== + dependencies: + acorn "^8.8.1" + chokidar "^3.5.3" + webpack-sources "^3.2.3" + webpack-virtual-modules "^0.5.0" + +unplugin@^1.4.0, unplugin@^1.5.0: + version "1.16.1" + resolved "https://registry.npmmirror.com/unplugin/-/unplugin-1.16.1.tgz#a844d2e3c3b14a4ac2945c42be80409321b61199" + integrity sha512-4/u/j4FrCKdi17jaxuJA0jClGxB1AvU2hw/IuayPc4ay1XGaJs/rbb4v5WKwAjNifjmXK9PIFyuPiaK8azyR9w== + dependencies: + acorn "^8.14.0" + webpack-virtual-modules "^0.6.2" + +update-browserslist-db@^1.1.1: + version "1.1.3" + resolved "https://registry.npmmirror.com/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz#348377dd245216f9e7060ff50b15a1b740b75420" + integrity sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw== + dependencies: + escalade "^3.2.0" + picocolors "^1.1.1" + +uri-js@^4.2.2: + version "4.4.1" + resolved "https://registry.npmmirror.com/uri-js/-/uri-js-4.4.1.tgz#9b1a52595225859e55f669d928f88c6c57f2a77e" + integrity sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg== + dependencies: + punycode "^2.1.0" + +url-parse@^1.5.3: + version "1.5.10" + resolved "https://registry.npmmirror.com/url-parse/-/url-parse-1.5.10.tgz#9d3c2f736c1d75dd3bd2be507dcc111f1e2ea9c1" + integrity sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ== + dependencies: + querystringify "^2.1.1" + requires-port "^1.0.0" + +util-deprecate@^1.0.1, util-deprecate@^1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/util-deprecate/-/util-deprecate-1.0.2.tgz#450d4dc9fa70de732762fbd2d4a28981419a0ccf" + integrity sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw== + +v8-compile-cache@^2.0.3: + version "2.4.0" + resolved "https://registry.npmmirror.com/v8-compile-cache/-/v8-compile-cache-2.4.0.tgz#cdada8bec61e15865f05d097c5f4fd30e94dc128" + integrity sha512-ocyWc3bAHBB/guyqJQVI5o4BZkPhznPYUG2ea80Gond/BgNWpap8TOmLSeeQG7bnh2KMISxskdADG59j7zruhw== + +v8-to-istanbul@^9.0.0: + version "9.3.0" + resolved "https://registry.npmmirror.com/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz#b9572abfa62bd556c16d75fdebc1a411d5ff3175" + integrity sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA== + dependencies: + "@jridgewell/trace-mapping" "^0.3.12" + "@types/istanbul-lib-coverage" "^2.0.1" + convert-source-map "^2.0.0" + +vite-plugin-rewrite-all@^1.0.1: + version "1.0.2" + resolved "https://registry.npmmirror.com/vite-plugin-rewrite-all/-/vite-plugin-rewrite-all-1.0.2.tgz#3945ac5ef1edfb051425e12ecdb92f69e3d92ec1" + integrity sha512-NpiFyHi9w8iHm3kZ28ma/IU16LFCkNJNqTvGy6cjoit2EMBi7dgFWFZFYcwZjUrc+pOMup//rsQTRVILvF2efQ== + dependencies: + connect-history-api-fallback "^1.6.0" + +vite@5.0.13: + version "5.0.13" + resolved "https://registry.npmmirror.com/vite/-/vite-5.0.13.tgz#605865b0e482506163e3f04f91665238f3be8cf1" + integrity sha512-/9ovhv2M2dGTuA+dY93B9trfyWMDRQw2jdVBhHNP6wr0oF34wG2i/N55801iZIpgUpnHDm4F/FabGQLyc+eOgg== + dependencies: + esbuild "^0.19.3" + postcss "^8.4.32" + rollup "^4.2.0" + optionalDependencies: + fsevents "~2.3.3" + +vite@^2.9.5: + version "2.9.18" + resolved "https://registry.npmmirror.com/vite/-/vite-2.9.18.tgz#74e2a83b29da81e602dac4c293312cc575f091c7" + integrity sha512-sAOqI5wNM9QvSEE70W3UGMdT8cyEn0+PmJMTFvTB8wB0YbYUWw3gUbY62AOyrXosGieF2htmeLATvNxpv/zNyQ== + dependencies: + esbuild "^0.14.27" + postcss "^8.4.13" + resolve "^1.22.0" + rollup ">=2.59.0 <2.78.0" + optionalDependencies: + fsevents "~2.3.2" + +vitest@^0.9.3: + version "0.9.4" + resolved "https://registry.npmmirror.com/vitest/-/vitest-0.9.4.tgz#220fb09a5b0861bbf6842681a976ff596d9be693" + integrity sha512-Em+EJb3keCr3GjyqnkxHuY7zMerEgLsN+m2nqsUcCzO7C4+Y0E7O7LXSNaODh3Gc/An3dqnoaAe/uLBrAJXUdQ== + dependencies: + "@types/chai" "^4.3.1" + "@types/chai-subset" "^1.3.3" + chai "^4.3.6" + local-pkg "^0.4.1" + tinypool "^0.1.2" + tinyspy "^0.3.2" + vite "^2.9.5" + +vscode-uri@^3.0.8: + version "3.1.0" + resolved "https://registry.npmmirror.com/vscode-uri/-/vscode-uri-3.1.0.tgz#dd09ec5a66a38b5c3fffc774015713496d14e09c" + integrity sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ== + +vue-codemirror@^6.1.1: + version "6.1.1" + resolved "https://registry.npmmirror.com/vue-codemirror/-/vue-codemirror-6.1.1.tgz#246697ef4cfa6b2448dd592ade214bb7ff86611f" + integrity sha512-rTAYo44owd282yVxKtJtnOi7ERAcXTeviwoPXjIc6K/IQYUsoDkzPvw/JDFtSP6T7Cz/2g3EHaEyeyaQCKoDMg== + dependencies: + "@codemirror/commands" "6.x" + "@codemirror/language" "6.x" + "@codemirror/state" "6.x" + "@codemirror/view" "6.x" + +vue-component-type-helpers@^2.0.0: + version "2.2.8" + resolved "https://registry.npmmirror.com/vue-component-type-helpers/-/vue-component-type-helpers-2.2.8.tgz#66d25e4405a4bcc06a22aa4c01e35141343da580" + integrity sha512-4bjIsC284coDO9om4HPA62M7wfsTvcmZyzdfR0aUlFXqq4tXxM1APyXpNVxPC8QazKw9OhmZNHBVDA6ODaZsrA== + +vue-demi@>=0.13.0, vue-demi@>=0.14.8: + version "0.14.10" + resolved "https://registry.npmmirror.com/vue-demi/-/vue-demi-0.14.10.tgz#afc78de3d6f9e11bf78c55e8510ee12814522f04" + integrity sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg== + +vue-demi@^0.13.11: + version "0.13.11" + resolved "https://registry.npmmirror.com/vue-demi/-/vue-demi-0.13.11.tgz#7d90369bdae8974d87b1973564ad390182410d99" + integrity sha512-IR8HoEEGM65YY3ZJYAjMlKygDQn25D5ajNFNoKh9RSDMQtlzCxtfQjdQgv9jjK+m3377SsJXY8ysq8kLCZL25A== + +vue-echarts@^6.6.1: + version "6.7.3" + resolved "https://registry.npmmirror.com/vue-echarts/-/vue-echarts-6.7.3.tgz#30efafc51a4a9de1b8117d3b63e74b0c761ff3ba" + integrity sha512-vXLKpALFjbPphW9IfQPOVfb1KjGZ/f8qa/FZHi9lZIWzAnQC1DgnmEK3pJgEkyo6EP7UnX6Bv/V3Ke7p+qCNXA== + dependencies: + resize-detector "^0.3.0" + vue-demi "^0.13.11" + +vue-eslint-parser@^7.0.0: + version "7.11.0" + resolved "https://registry.npmmirror.com/vue-eslint-parser/-/vue-eslint-parser-7.11.0.tgz#214b5dea961007fcffb2ee65b8912307628d0daf" + integrity sha512-qh3VhDLeh773wjgNTl7ss0VejY9bMMa0GoDG2fQVyDzRFdiU3L7fw74tWZDHNQXdZqxO3EveQroa9ct39D2nqg== + dependencies: + debug "^4.1.1" + eslint-scope "^5.1.1" + eslint-visitor-keys "^1.1.0" + espree "^6.2.1" + esquery "^1.4.0" + lodash "^4.17.21" + semver "^6.3.0" + +vue-qrcode@^2.2.2: + version "2.2.2" + resolved "https://registry.npmmirror.com/vue-qrcode/-/vue-qrcode-2.2.2.tgz#c0b0ec16bd245100cdc87559349550fbf8463a9b" + integrity sha512-SbrXq/mSb1g2tbDyXPe9gy9KiMYsvxWKRErlpij1BqiFoHwQckheZV63CTw6yRLLUVG2RXAVlX+APkpdCK7SQQ== + dependencies: + tslib "^2.6.2" + +vue-router@^4.0.5: + version "4.5.0" + resolved "https://registry.npmmirror.com/vue-router/-/vue-router-4.5.0.tgz#58fc5fe374e10b6018f910328f756c3dae081f14" + integrity sha512-HDuk+PuH5monfNuY+ct49mNmkCRK4xJAV9Ts4z9UFc4rzdDnxQLyCMGGc8pKhZhHTVzfanpNwB/lwqevcBwI4w== + dependencies: + "@vue/devtools-api" "^6.6.4" + +vue-sonner@^1.2.5: + version "1.3.0" + resolved "https://registry.npmmirror.com/vue-sonner/-/vue-sonner-1.3.0.tgz#da8ab9be995dfea781d57a6ac52d170d02473d86" + integrity sha512-jAodBy4Mri8rQjVZGQAPs4ZYymc1ywPiwfa81qU0fFl+Suk7U8NaOxIDdI1oBGLeQJqRZi/oxNIuhCLqsBmOwg== + +vue-tsc@^2.0.7: + version "2.2.8" + resolved "https://registry.npmmirror.com/vue-tsc/-/vue-tsc-2.2.8.tgz#7c8e1bd9333d25241a7f9988eedf08c65483158c" + integrity sha512-jBYKBNFADTN+L+MdesNX/TB3XuDSyaWynKMDgR+yCSln0GQ9Tfb7JS2lr46s2LiFUT1WsmfWsSvIElyxzOPqcQ== + dependencies: + "@volar/typescript" "~2.4.11" + "@vue/language-core" "2.2.8" + +vue@^3.4.12: + version "3.5.13" + resolved "https://registry.npmmirror.com/vue/-/vue-3.5.13.tgz#9f760a1a982b09c0c04a867903fc339c9f29ec0a" + integrity sha512-wmeiSMxkZCSc+PM2w2VRsOYAZC8GdipNFRTsLSfodVqI9mbejKeXEGr8SckuLnrQPGe3oJN5c3K0vpoU9q/wCQ== + dependencies: + "@vue/compiler-dom" "3.5.13" + "@vue/compiler-sfc" "3.5.13" + "@vue/runtime-dom" "3.5.13" + "@vue/server-renderer" "3.5.13" + "@vue/shared" "3.5.13" + +w3c-hr-time@^1.0.2: + version "1.0.2" + resolved "https://registry.npmmirror.com/w3c-hr-time/-/w3c-hr-time-1.0.2.tgz#0a89cdf5cc15822df9c360543676963e0cc308cd" + integrity sha512-z8P5DvDNjKDoFIHK7q8r8lackT6l+jo/Ye3HOle7l9nICP9lf1Ci25fy9vHd0JOWewkIFzXIEig3TdKT7JQ5fQ== + dependencies: + browser-process-hrtime "^1.0.0" + +w3c-keyname@^2.2.0, w3c-keyname@^2.2.4: + version "2.2.8" + resolved "https://registry.npmmirror.com/w3c-keyname/-/w3c-keyname-2.2.8.tgz#7b17c8c6883d4e8b86ac8aba79d39e880f8869c5" + integrity sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ== + +w3c-xmlserializer@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/w3c-xmlserializer/-/w3c-xmlserializer-3.0.0.tgz#06cdc3eefb7e4d0b20a560a5a3aeb0d2d9a65923" + integrity sha512-3WFqGEgSXIyGhOmAFtlicJNMjEps8b1MG31NCA0/vOF9+nKMUW1ckhi9cnNHmf88Rzw5V+dwIwsm2C7X8k9aQg== + dependencies: + xml-name-validator "^4.0.0" + +wcwidth@^1.0.1: + version "1.0.1" + resolved "https://registry.npmmirror.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8" + integrity sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg== + dependencies: + defaults "^1.0.3" + +web-streams-polyfill@^3.0.3: + version "3.3.3" + resolved "https://registry.npmmirror.com/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz#2073b91a2fdb1fbfbd401e7de0ac9f8214cecb4b" + integrity sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw== + +webidl-conversions@^3.0.0: + version "3.0.1" + resolved "https://registry.npmmirror.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" + integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== + +webidl-conversions@^7.0.0: + version "7.0.0" + resolved "https://registry.npmmirror.com/webidl-conversions/-/webidl-conversions-7.0.0.tgz#256b4e1882be7debbf01d05f0aa2039778ea080a" + integrity sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g== + +webpack-sources@^3.2.3: + version "3.2.3" + resolved "https://registry.npmmirror.com/webpack-sources/-/webpack-sources-3.2.3.tgz#2d4daab8451fd4b240cc27055ff6a0c2ccea0cde" + integrity sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w== + +webpack-virtual-modules@^0.5.0: + version "0.5.0" + resolved "https://registry.npmmirror.com/webpack-virtual-modules/-/webpack-virtual-modules-0.5.0.tgz#362f14738a56dae107937ab98ea7062e8bdd3b6c" + integrity sha512-kyDivFZ7ZM0BVOUteVbDFhlRt7Ah/CSPwJdi8hBpkK7QLumUqdLtVfm/PX/hkcnrvr0i77fO5+TjZ94Pe+C9iw== + +webpack-virtual-modules@^0.6.2: + version "0.6.2" + resolved "https://registry.npmmirror.com/webpack-virtual-modules/-/webpack-virtual-modules-0.6.2.tgz#057faa9065c8acf48f24cb57ac0e77739ab9a7e8" + integrity sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ== + +whatwg-encoding@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/whatwg-encoding/-/whatwg-encoding-2.0.0.tgz#e7635f597fd87020858626805a2729fa7698ac53" + integrity sha512-p41ogyeMUrw3jWclHWTQg1k05DSVXPLcVxRTYsXUk+ZooOCZLcoYgPZ/HL/D/N+uQPOtcp1me1WhBEaX02mhWg== + dependencies: + iconv-lite "0.6.3" + +whatwg-mimetype@^3.0.0: + version "3.0.0" + resolved "https://registry.npmmirror.com/whatwg-mimetype/-/whatwg-mimetype-3.0.0.tgz#5fa1a7623867ff1af6ca3dc72ad6b8a4208beba7" + integrity sha512-nt+N2dzIutVRxARx1nghPKGv1xHikU7HKdfafKkLNLindmPU/ch3U31NOCGGA/dmPcmb1VlofO0vnKAcsm0o/Q== + +whatwg-url@^10.0.0: + version "10.0.0" + resolved "https://registry.npmmirror.com/whatwg-url/-/whatwg-url-10.0.0.tgz#37264f720b575b4a311bd4094ed8c760caaa05da" + integrity sha512-CLxxCmdUby142H5FZzn4D8ikO1cmypvXVQktsgosNy4a4BHrDHeciBBGZhb0bNoR5/MltoCatso+vFjjGx8t0w== + dependencies: + tr46 "^3.0.0" + webidl-conversions "^7.0.0" + +whatwg-url@^11.0.0: + version "11.0.0" + resolved "https://registry.npmmirror.com/whatwg-url/-/whatwg-url-11.0.0.tgz#0a849eebb5faf2119b901bb76fd795c2848d4018" + integrity sha512-RKT8HExMpoYx4igMiVMY83lN6UeITKJlBQ+vR/8ZJ8OCdSiN3RwCq+9gH0+Xzj0+5IrM6i4j/6LuvzbZIQgEcQ== + dependencies: + tr46 "^3.0.0" + webidl-conversions "^7.0.0" + +whatwg-url@^5.0.0: + version "5.0.0" + resolved "https://registry.npmmirror.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" + integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw== + dependencies: + tr46 "~0.0.3" + webidl-conversions "^3.0.0" + +which-module@^2.0.0: + version "2.0.1" + resolved "https://registry.npmmirror.com/which-module/-/which-module-2.0.1.tgz#776b1fe35d90aebe99e8ac15eb24093389a4a409" + integrity sha512-iBdZ57RDvnOR9AGBhML2vFZf7h8vmBjhoaZqODJBFWHVtKkDmKuHai3cx5PgVMrX5YDNp27AofYbAwctSS+vhQ== + +which@^1.2.9: + version "1.3.1" + resolved "https://registry.npmmirror.com/which/-/which-1.3.1.tgz#a45043d54f5805316da8d62f9f50918d3da70b0a" + integrity sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ== + dependencies: + isexe "^2.0.0" + +which@^2.0.1, which@^2.0.2: + version "2.0.2" + resolved "https://registry.npmmirror.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" + integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== + dependencies: + isexe "^2.0.0" + +word-wrap@~1.2.3: + version "1.2.5" + resolved "https://registry.npmmirror.com/word-wrap/-/word-wrap-1.2.5.tgz#d2c45c6dd4fbce621a66f136cbe328afd0410b34" + integrity sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA== + +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": + version "7.0.0" + resolved "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrap-ansi@^3.0.1: + version "3.0.1" + resolved "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-3.0.1.tgz#288a04d87eda5c286e060dfe8f135ce8d007f8ba" + integrity sha512-iXR3tDXpbnTpzjKSylUJRkLuOrEC7hwEB221cgn6wtF8wpmz28puFXAEfPT5zrjM3wahygB//VuWEr1vTkDcNQ== + dependencies: + string-width "^2.1.1" + strip-ansi "^4.0.0" + +wrap-ansi@^6.0.1, wrap-ansi@^6.2.0: + version "6.2.0" + resolved "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-6.2.0.tgz#e9393ba07102e6c91a3b221478f0257cd2856e53" + integrity sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrap-ansi@^7.0.0: + version "7.0.0" + resolved "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" + +wrap-ansi@^8.1.0: + version "8.1.0" + resolved "https://registry.npmmirror.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214" + integrity sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ== + dependencies: + ansi-styles "^6.1.0" + string-width "^5.0.1" + strip-ansi "^7.0.1" + +wrappy@1: + version "1.0.2" + resolved "https://registry.npmmirror.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" + integrity sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ== + +write@1.0.3: + version "1.0.3" + resolved "https://registry.npmmirror.com/write/-/write-1.0.3.tgz#0800e14523b923a387e415123c865616aae0f5c3" + integrity sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig== + dependencies: + mkdirp "^0.5.1" + +ws@^8.2.3: + version "8.18.1" + resolved "https://registry.npmmirror.com/ws/-/ws-8.18.1.tgz#ea131d3784e1dfdff91adb0a4a116b127515e3cb" + integrity sha512-RKW2aJZMXeMxVpnZ6bck+RswznaxmzdULiBr6KY7XkTnW8uvt0iT9H5DkHUChXrc+uurzwa0rVI16n/Xzjdz1w== + +ws@~8.17.1: + version "8.17.1" + resolved "https://registry.npmmirror.com/ws/-/ws-8.17.1.tgz#9293da530bb548febc95371d90f9c878727d919b" + integrity sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ== + +xml-name-validator@^4.0.0: + version "4.0.0" + resolved "https://registry.npmmirror.com/xml-name-validator/-/xml-name-validator-4.0.0.tgz#79a006e2e63149a8600f15430f0a4725d1524835" + integrity sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw== + +xmlchars@^2.2.0: + version "2.2.0" + resolved "https://registry.npmmirror.com/xmlchars/-/xmlchars-2.2.0.tgz#060fe1bcb7f9c76fe2a17db86a9bc3ab894210cb" + integrity sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw== + +xmlhttprequest-ssl@~2.1.1: + version "2.1.2" + resolved "https://registry.npmmirror.com/xmlhttprequest-ssl/-/xmlhttprequest-ssl-2.1.2.tgz#e9e8023b3f29ef34b97a859f584c5e6c61418e23" + integrity sha512-TEU+nJVUUnA4CYJFLvK5X9AOeH4KvDvhIfm0vV1GaQRtchnG0hgK5p8hw/xjv8cunWYCsiPCSDzObPyhEwq3KQ== + +xtend@^4.0.2: + version "4.0.2" + resolved "https://registry.npmmirror.com/xtend/-/xtend-4.0.2.tgz#bb72779f5fa465186b1f438f674fa347fdb5db54" + integrity sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ== + +y18n@^4.0.0: + version "4.0.3" + resolved "https://registry.npmmirror.com/y18n/-/y18n-4.0.3.tgz#b5f259c82cd6e336921efd7bfd8bf560de9eeedf" + integrity sha512-JKhqTOwSrqNA1NY5lSztJ1GrBiUodLMmIZuLiDaMRJ+itFd+ABVE8XBjOvIWL+rSqNDC74LCSFmlb/U4UZ4hJQ== + +y18n@^5.0.5: + version "5.0.8" + resolved "https://registry.npmmirror.com/y18n/-/y18n-5.0.8.tgz#7f4934d0f7ca8c56f95314939ddcd2dd91ce1d55" + integrity sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA== + +yallist@^2.1.2: + version "2.1.2" + resolved "https://registry.npmmirror.com/yallist/-/yallist-2.1.2.tgz#1c11f9218f076089a47dd512f93c6699a6a81d52" + integrity sha512-ncTzHV7NvsQZkYe1DW7cbDLm0YpzHmZF5r/iyP3ZnQtMiJ+pjzisCiMNI+Sj+xQF5pXhSHxSB3uDbsBTzY/c2A== + +yallist@^3.0.2: + version "3.1.1" + resolved "https://registry.npmmirror.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" + integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== + +yaml@^1.10.0, yaml@^1.10.2: + version "1.10.2" + resolved "https://registry.npmmirror.com/yaml/-/yaml-1.10.2.tgz#2301c5ffbf12b467de8da2333a459e29e7920e4b" + integrity sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg== + +yaml@^2.3.4: + version "2.7.1" + resolved "https://registry.npmmirror.com/yaml/-/yaml-2.7.1.tgz#44a247d1b88523855679ac7fa7cda6ed7e135cf6" + integrity sha512-10ULxpnOCQXxJvBgxsn9ptjq6uviG/htZKk9veJGhlqn3w/DxQ631zFF+nlQXLwmImeS5amR2dl2U8sg6U9jsQ== + +yargs-parser@^18.1.2: + version "18.1.3" + resolved "https://registry.npmmirror.com/yargs-parser/-/yargs-parser-18.1.3.tgz#be68c4975c6b2abf469236b0c870362fab09a7b0" + integrity sha512-o50j0JeToy/4K6OZcaQmW6lyXXKhq7csREXcDwk2omFPJEwUNOVtJKvmDr9EI1fAJZUyZcRF7kxGBWmRXudrCQ== + dependencies: + camelcase "^5.0.0" + decamelize "^1.2.0" + +yargs-parser@^20.2.2, yargs-parser@^20.2.9: + version "20.2.9" + resolved "https://registry.npmmirror.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee" + integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== + +yargs-parser@^21.1.1: + version "21.1.1" + resolved "https://registry.npmmirror.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" + integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== + +yargs@^15.3.1: + version "15.4.1" + resolved "https://registry.npmmirror.com/yargs/-/yargs-15.4.1.tgz#0d87a16de01aee9d8bec2bfbf74f67851730f4f8" + integrity sha512-aePbxDmcYW++PaqBsJ+HYUFwCdv4LVvdnhBy78E57PIor8/OVvhMrADFFEDh8DHDFRv/O9i3lPhsENjO7QX0+A== + dependencies: + cliui "^6.0.0" + decamelize "^1.2.0" + find-up "^4.1.0" + get-caller-file "^2.0.1" + require-directory "^2.1.1" + require-main-filename "^2.0.0" + set-blocking "^2.0.0" + string-width "^4.2.0" + which-module "^2.0.0" + y18n "^4.0.0" + yargs-parser "^18.1.2" + +yargs@^16.2.0: + version "16.2.0" + resolved "https://registry.npmmirror.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" + integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== + dependencies: + cliui "^7.0.2" + escalade "^3.1.1" + get-caller-file "^2.0.5" + require-directory "^2.1.1" + string-width "^4.2.0" + y18n "^5.0.5" + yargs-parser "^20.2.2" + +yargs@^17.3.0: + version "17.7.2" + resolved "https://registry.npmmirror.com/yargs/-/yargs-17.7.2.tgz#991df39aca675a192b816e1e0363f9d75d2aa269" + integrity sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w== + dependencies: + cliui "^8.0.1" + escalade "^3.1.1" + get-caller-file "^2.0.5" + require-directory "^2.1.1" + string-width "^4.2.3" + y18n "^5.0.5" + yargs-parser "^21.1.1" + +yocto-queue@^0.1.0: + version "0.1.0" + resolved "https://registry.npmmirror.com/yocto-queue/-/yocto-queue-0.1.0.tgz#0294eb3dee05028d31ee1a5fa2c556a6aaf10a1b" + integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== + +yorkie@^2.0.0: + version "2.0.0" + resolved "https://registry.npmmirror.com/yorkie/-/yorkie-2.0.0.tgz#92411912d435214e12c51c2ae1093e54b6bb83d9" + integrity sha512-jcKpkthap6x63MB4TxwCyuIGkV0oYP/YRyuQU5UO0Yz/E/ZAu+653/uov+phdmO54n6BcvFRyyt0RRrWdN2mpw== + dependencies: + execa "^0.8.0" + is-ci "^1.0.10" + normalize-path "^1.0.0" + strip-indent "^2.0.0" + +zrender@5.6.1: + version "5.6.1" + resolved "https://registry.npmmirror.com/zrender/-/zrender-5.6.1.tgz#e08d57ecf4acac708c4fcb7481eb201df7f10a6b" + integrity sha512-OFXkDJKcrlx5su2XbzJvj/34Q3m6PvyCZkVPHGYpcCJ52ek4U/ymZyfuV1nKE23AyBJ51E/6Yr0mhZ7xGTO4ag== + dependencies: + tslib "2.3.0" diff --git a/debugging/mariadb.Dockerfile b/debugging/mariadb.Dockerfile new file mode 100644 index 0000000..0bfe252 --- /dev/null +++ b/debugging/mariadb.Dockerfile @@ -0,0 +1,39 @@ +# syntax = docker/dockerfile:experimental +FROM ubuntu:20.04 + +ENV LANG C.UTF-8 +ENV DEBIAN_FRONTEND noninteractive + +RUN --mount=type=cache,target=/var/cache/apt apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + curl \ + software-properties-common \ + gnupg \ + debian-goodies \ + && rm -rf /var/lib/apt/lists/* + +RUN mkdir -m 0755 -p /etc/apt/keyrings + +# Install MariaDB 10.6 from the official repository. +RUN --mount=type=cache,target=/var/cache/apt curl -fsSL 'https://mariadb.org/mariadb_release_signing_key.pgp' | gpg --dearmor -o /etc/apt/keyrings/mariadb.gpg \ + && echo "deb [signed-by=/etc/apt/keyrings/mariadb.gpg] https://mirror.rackspace.com/mariadb/repo/10.6/ubuntu focal main main/debug" | tee /etc/apt/sources.list.d/mariadb.list \ + && apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + gdb \ + mariadb-server \ + mariadb-server-core-10.6-dbgsym \ + && rm -rf /var/lib/apt/lists/* + +# Install debug symbols for libc and libstdc++. +RUN --mount=type=cache,target=/var/cache/apt echo "deb http://ddebs.ubuntu.com focal main restricted universe multiverse" | tee -a /etc/apt/sources.list.d/ddebs.list \ + && echo "deb http://ddebs.ubuntu.com focal-updates main restricted universe multiverse" | tee -a /etc/apt/sources.list.d/ddebs.list \ + && echo "deb http://ddebs.ubuntu.com focal-proposed main restricted universe multiverse" | tee -a /etc/apt/sources.list.d/ddebs.list \ + && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 \ + && apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + libc6-dbg \ + libstdc++6-10-dbg \ + lib32stdc++6-10-dbg \ + libx32stdc++6-10-dbg \ + libstdc++6-dbgsym \ + && rm -rf /var/lib/apt/lists/* diff --git a/debugging/mariadb.build.Dockerfile b/debugging/mariadb.build.Dockerfile new file mode 100644 index 0000000..0997771 --- /dev/null +++ b/debugging/mariadb.build.Dockerfile @@ -0,0 +1,26 @@ +# syntax = docker/dockerfile:experimental +FROM ubuntu:20.04 + +ENV LANG C.UTF-8 +ENV DEBIAN_FRONTEND noninteractive + +RUN --mount=type=cache,target=/var/cache/apt apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + curl \ + software-properties-common \ + gnupg \ + devscripts \ + equivs \ + && rm -rf /var/lib/apt/lists/* + +RUN sed -Ei 's/^# deb-src /deb-src /' /etc/apt/sources.list + +RUN mkdir -m 0755 -p /etc/apt/keyrings + +# Install MariaDB 10.6 from the official repository. +RUN --mount=type=cache,target=/var/cache/apt curl -fsSL 'https://mariadb.org/mariadb_release_signing_key.pgp' | gpg --dearmor -o /etc/apt/keyrings/mariadb.gpg \ + && echo "deb [signed-by=/etc/apt/keyrings/mariadb.gpg] https://mirror.rackspace.com/mariadb/repo/10.6/ubuntu focal main" | tee -a /etc/apt/sources.list.d/mariadb.list \ + && echo "deb-src [signed-by=/etc/apt/keyrings/mariadb.gpg] https://mirror.rackspace.com/mariadb/repo/10.6/ubuntu focal main" | tee -a /etc/apt/sources.list.d/mariadb.list \ + && apt-get update \ + && apt-get --yes --no-install-suggests --no-install-recommends build-dep mariadb-server \ + && rm -rf /var/lib/apt/lists/* diff --git a/debugging/mariadb.build.md b/debugging/mariadb.build.md new file mode 100644 index 0000000..480ad9d --- /dev/null +++ b/debugging/mariadb.build.md @@ -0,0 +1,229 @@ +# Guide to Building and Hosting MariaDB Ubuntu Packages + +We are building MariaDB 10.6.16 with some changes for Ubuntu 20.04 + +We need to build this on Ubuntu 20.04 itself. But to host the repository we need a newer release. We will use 23.10 and build MariaDB inside a container. + +We'll use [Reprepro](https://wikitech.wikimedia.org/wiki/Reprepro) to create a Ubuntu repository. Older releases of reprepro do not work well with ddeb packages (debug symbols). + +At the end, we'll have our MariaDB 10.6.16+ packages for Ubuntu 20.04 hosted on packages.jingrow.cloud. + +--- + +## Build Ubuntu Packages + +### Prepare Build Container + +1. Create `jingrow` user and install docker from https://docs.docker.com/engine/install/ubuntu/ + +2. Use `mariadb.build.Dockerfile` to create our build container + +```sh +docker build -t mariadb-build:10.6 . +``` + +References: +https://mariadb.com/kb/en/building-mariadb-on-ubuntu/ +https://mariadb.com/kb/en/Build_Environment_Setup_for_Linux/ + +--- + +### Clone MariaDB + +```sh +git clone --branch mariadb-10.6.16 https://github.com/MariaDB/server.git /home/jingrow/mariadb/server +``` + +Fetch git submodules + +``` +cd server +git clean -dffx +git reset --hard HEAD +git submodule update --init --recursive +``` + +Cherry-pick interesting changes + +```sh +git cherry-pick bb511def1d316ffbdd815f2fc99d0a5813671814 +``` + +References: + +- https://jira.mariadb.org/browse/MDEV-32371 + +- https://github.com/MariaDB/server/pull/2866 +- https://github.com/MariaDB/server/commit/bb511def1d316ffbdd815f2fc99d0a5813671814 + +--- + +### Build Ubuntu Packages + +Run the build inside a container. We'll have the packages placed in `/home/jingrow/mariadb` + +```sh +docker run --rm -v /home/jingrow/mariadb:/mariadb -w /mariadb/server -it mariadb-build:10.6 bash ./debian/autobake.sh +``` + +Tip: Temporarily resize this server to allocate as many CPU cores as you can. More the cores the faster the build. + +References: https://mariadb.com/kb/en/building-mariadb-on-ubuntu/ + +--- + +## Host Ubuntu Repository + +### Setup OpenPGP + +We need to [sign the packages with OpenPGP](https://ubuntu.com/server/docs/third-party-apt-repositories) + +1. Create a OpenPGP key for `Jingrow Developers `. This is an interactive step. Refer jingrow.com/app/jingrow-asset for Paasphrase. + +```sh +gpg --full-gen-key +``` + +Once completed we should have something like + +```sh +$ gpg --list-secret-key --with-subkey-fingerprint +/home/jingrow/.gnupg/pubring.kbx +------------------------------- +sec rsa4096 2024-01-29 [SC] + 2AADEF02BE446B0FA3B0AC3DF38C274AC216D014 +uid [ultimate] Jingrow Developers +``` + +Export the public key in the repository directory + +```sh +mkdir -p /home/jingrow/repository +gpg --armor --output /home/jingrow/repository/jingrow.gpg.key --export-options export-minimal --export 2AADEF02BE446B0FA3B0AC3DF38C274AC216D014 +``` + +### Setup Reprepro + +Create the directory structure that looks like our urls (https://packages.jingrow.cloud/mariadb/10.6) + +Reprepro needs two config files `conf/distributions` and `conf/options` + +```sh +mkdir -p /home/jingrow/repository/mariadb/10.6/conf +``` + +```sh +echo "Origin: MariaDB +Label: MariaDB +Codename: focal +Architectures: amd64 source +Components: main +DDebComponents: main +Limit: 3 +Description: MariaDB Repository +SignWith: 2AADEF02BE446B0FA3B0AC3DF38C274AC216D014" > /home/jingrow/repository/mariadb/10.6/conf/distributions +``` + +```sh +echo "verbose +basedir /home/jingrow/repository/mariadb/10.6 +ask-passphrase" > /home/jingrow/repository/mariadb/10.6/conf/options +``` + +The tree structure should now look like + +```sh +$ tree /home/jingrow/repository/ +/home/jingrow/repository/ +├── jingrow.gpg.key +└── mariadb + └── 10.6 + └── conf + ├── distributions + └── options + +4 directories, 3 files +``` + +Download and install a newer reprepro release + +```sh +curl -skO "http://ftp.debian.org/debian/pool/main/r/reprepro/reprepro_5.4.3-1_amd64.deb" +apt-get install -y --no-install-recommends "./reprepro_5.4.3-1_amd64.deb" +``` + +### Prepare the repository + +Build the repository from the `.changes` file in `/home/jingrow/mariadb` + +```sh +reprepro -Vb /home/jingrow/repository/mariadb/10.6 --ignore=wrongsourceversion include focal /home/jingrow/mariadb/*.changes +``` + +References: + +- https://mariadb.com/kb/en/Creating_a_Debian_Repository/ +- https://wiki.debian.org/DebianRepository/SetupWithReprepro +- https://github.com/MariaDB/buildbot/blob/dev/utils.py#L221 + +### Publish the repository with NGINX + +```sh +apt install nginx +usermod -aG jingrow www-data +``` + +```nginx +echo "server { + listen 80; + server_name packages.jingrow.cloud; + + location / { + root /home/jingrow/repository; + autoindex on; + } + + location ~ /(.*)/conf { + deny all; + } + + location ~ /(.*)/db { + deny all; + } +}" > /home/jingrow/nginx.conf +``` + +```sh +ln -s /home/jingrow/nginx.conf /etc/nginx/conf.d/packages.jingrow.cloud.conf +``` + +Setup TLS for `packages.jingrow.cloud` + +```sh +snap install --classic certbot +certbot --nginx --agree-tos --email developers@jingrow.com --domains packages.jingrow.cloud +``` + +### Install patched MariaDB + +Fetch OpenPGP key + +```sh +wget -O - https://packages.jingrow.cloud/jingrow.gpg.key | apt-key add - +``` + +Setup repository and install as usual + +```sh +echo "deb https://packages.jingrow.cloud/mariadb/10.6 focal main" > /etc/apt/sources.list.d/mariadb.list" +apt update +apt install mariadb-server +``` + +For debug symbols fetch from `main/debug` + +```sh +echo "deb https://packages.jingrow.cloud/mariadb/10.6 focal main main/debug" > /etc/apt/sources.list.d/mariadb.list" +apt update +apt install mariadb-server mariadb-server-core-10.6-dbgsym +``` diff --git a/debugging/mariadb.md b/debugging/mariadb.md new file mode 100644 index 0000000..09b7ee0 --- /dev/null +++ b/debugging/mariadb.md @@ -0,0 +1,49 @@ +Generate coredump on the affected MariaDB server. + +```sh +pgrep mariadbd | xargs gcore +``` + +This will create a `core.` where `` is the pid of MariaDB proces. The core file is likely to be `~5G` in size. Compress it. + +```sh +gzip core. +``` + +Compressed `core..gz` should be around `~5OM. + +--- + +Build the MariaDB debug container. + +```sh +docker build -t mariadb-debug:10.6 -f mariadb.Dockerfile . +``` + +This has the latest MariaDB 10.6 with all the debug symbols necessary to generate a useful stacktrace. + +--- + +Copy the coredump locally for debugging. + +``` +scp -C -oProxyCommand="ssh -o 'ForwardAgent yes' jingrow@jingrow.cloud 'ssh-add && nc %h %p'" root@m-mumbai.jingrow.cloud:/root/core..gz . +``` + +Extract the compressed core file. + +```sh +gzip -d core..gz +``` + +You'll get the `core.` file back. Generate complete stacktrace. + +```sh +docker run -v ./core.:/core -v --rm -it mariadb-debug:10.6 gdb --batch --eval-command="set print frame-arguments all" --eval-command="thread apply all bt full" /usr/sbin/mariadbd /core > stack.txt +``` + +or launch gdb. + +```sh +docker run -v ./core.:/core -v --rm -it mariadb-debug:10.6 gdb /usr/sbin/mariadbd /core +``` diff --git a/deployment/common_site_config.json b/deployment/common_site_config.json new file mode 100644 index 0000000..8282992 --- /dev/null +++ b/deployment/common_site_config.json @@ -0,0 +1,36 @@ +{ + "allow_reads_during_maintenance": true, + "auto_update": false, + "background_workers": 8, + "background_process_niceness": "19", + "default_site": "jingrow.cloud", + "disable_global_search": true, + "dns_multitenant": true, + "email_queue_batch_size": 100, + "file_watcher_port": 6787, + "jingrow_user": "jingrow", + "gunicorn_workers": 32, + "http_timeout": 60, + "maintenance_mode": 0, + "pause_scheduler": 0, + "rebase_on_pull": false, + "redis_cache": "redis://localhost:13000", + "redis_queue": "redis://localhost:11000", + "redis_socketio": "redis://localhost:13000", + "restart_supervisor_on_update": true, + "restart_systemd_on_update": false, + "scheduler_interval": 300, + "scheduler_tick_interval": 1, + "server_script_enabled": 1, + "socketio_port": 9000, + "update_bench_on_update": true, + "webserver_port": 8000, + "workers": { + "sync": { + "timeout": 300 + }, + "build": { + "timeout": 2400 + } + } +} diff --git a/deployment/nginx.conf b/deployment/nginx.conf new file mode 100644 index 0000000..a4d9c2d --- /dev/null +++ b/deployment/nginx.conf @@ -0,0 +1,479 @@ +upstream jingrow-bench-jingrow { + server 127.0.0.1:8000 fail_timeout=0; +} + +upstream jingrow-bench-socketio-server { + server 127.0.0.1:9000 fail_timeout=0; +} + + + +server { + listen 80 default_server; + server_name ""; + return 444; +} + +server { + listen 443 ssl http2 default_server; + server_name ""; + + ssl on; + ssl_certificate /etc/letsencrypt/live/jingrow.cloud/fullchain.pem; # managed by Certbot + ssl_certificate_key /etc/letsencrypt/live/jingrow.cloud/privkey.pem; # managed by Certbot + include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot + ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot + ssl_stapling on; + ssl_stapling_verify on; + ssl_ecdh_curve secp384r1; + return 444; +} + + +# setup maps + +map $host $site_name_sxjfjnv { + jingrow.com jingrow.cloud; + default $host; +} + +# server blocks + +server { + + listen 443 ssl http2; + + + server_name + jingrow.cloud + ; + + root /home/jingrow/jingrow-bench/sites; + + ssl on; + ssl_certificate /etc/letsencrypt/live/code.jingrow.com/fullchain.pem; # managed by Certbot + ssl_certificate_key /etc/letsencrypt/live/code.jingrow.com/privkey.pem; # managed by Certbot + include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot + ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot + ssl_stapling on; + ssl_stapling_verify on; + ssl_ecdh_curve secp384r1; + + + add_header X-Frame-Options "SAMEORIGIN"; + add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + + return 301 https://jingrow.com$request_uri; +} + + +# http to https redirect + server { + if ($host = jingrow.cloud) { + return 301 https://$host$request_uri; + } # managed by Certbot + listen 80; + server_name + jingrow.cloud + ; + + return 301 https://$host$request_uri; +} + +proxy_cache_path /var/cache/nginx/jscache levels=1:2 keys_zone=jscache:100m inactive=30d use_temp_path=off max_size=100m; +proxy_cache_path /var/cache/nginx/assets keys_zone=assets_cache:10m loader_threshold=300 loader_files=200 max_size=200m; + +server { + + listen 443 ssl http2; + + + server_name + jingrow.com + ; + + root /home/jingrow/jingrow-bench/sites; + + ssl on; + ssl_certificate /etc/letsencrypt/live/code.jingrow.com/fullchain.pem; # managed by Certbot + ssl_certificate_key /etc/letsencrypt/live/code.jingrow.com/privkey.pem; # managed by Certbot + + include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot + ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot + + + ssl_stapling on; + ssl_stapling_verify on; + ssl_ecdh_curve secp384r1; + + proxy_buffer_size 128k; + proxy_buffers 4 256k; + proxy_busy_buffers_size 256k; + + + + add_header X-Frame-Options "SAMEORIGIN"; + add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header Referrer-Policy "same-origin, strict-origin-when-cross-origin"; + + + location /status { + auth_basic "NGINX VTS"; + auth_basic_user_file /home/jingrow/jingrow-bench/monitoring.htpasswd; + + vhost_traffic_status_display; + vhost_traffic_status_display_format html; + } + + location /metrics { + auth_basic "Prometheus"; + auth_basic_user_file /home/jingrow/jingrow-bench/monitoring.htpasswd; + + location /metrics/node { + proxy_pass http://127.0.0.1:9100/metrics; + } + + location /metrics/docker { + proxy_pass http://127.0.0.1:9323/metrics; + } + + location /metrics/cadvisor { + proxy_pass http://127.0.0.1:9338/metrics; + } + + location /metrics/nginx { + vhost_traffic_status_display; + vhost_traffic_status_display_format prometheus; + } + + location /metrics/mariadb { + proxy_pass http://127.0.0.1:9104/metrics; + } + + location /metrics/gunicorn { + proxy_pass http://127.0.0.1:9102/metrics; + } + + location /metrics/process { + proxy_pass http://127.0.0.1:9256/metrics; + } + + location /metrics/rq { + proxy_pass http://127.0.0.1:9726/metrics; + } + + location /metrics/redis { + proxy_pass http://127.0.0.1:9121/metrics; + } + + location /metrics/jcloud { + proxy_http_version 1.1; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Jingrow-Site-Name $site_name_sxjfjnv; + proxy_set_header Host $host; + proxy_set_header X-Use-X-Accel-Redirect True; + proxy_read_timeout 120; + proxy_redirect off; + + proxy_set_header Authorization ""; + rewrite /metrics/jcloud /metrics break; + + proxy_pass http://jingrow-bench-jingrow; + } + } + + location /assets { + proxy_cache assets_cache; + proxy_cache_key $scheme$host$request_uri; + proxy_cache_valid 200 302 10m; + proxy_cache_valid 404 1m; + proxy_cache_bypass $http_secret_cache_purge; + add_header X-Cache-Status $upstream_cache_status; + + add_header Cache-Control "max-age=31536000"; + try_files $uri =404; + } + + location ~ ^/protected/(.*) { + internal; + try_files /$site_name_sxjfjnv/$1 =404; + } + + location = /js/script.js { + proxy_pass https://analytics.jingrow.cloud/js/plausible.js; + proxy_buffering on; + + proxy_cache jscache; + proxy_cache_valid 200 6h; + proxy_cache_use_stale updating error timeout invalid_header http_500; + + proxy_set_header Host analytics.jingrow.cloud; + proxy_ssl_name analytics.jingrow.cloud; + proxy_ssl_server_name on; + proxy_ssl_session_reuse off; + + proxy_ssl_protocols TLSv1.3; + proxy_hide_header Cache-Control; + proxy_ignore_headers Cache-Control; + add_header Cache-Control "max-age=31536000"; + add_header X-Cache-Status $upstream_cache_status; + } + + location = /api/event { + proxy_pass https://analytics.jingrow.cloud/api/event; + proxy_buffering on; + proxy_http_version 1.1; + + proxy_set_header X-Forwarded-For $REMOTE_ADDR; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + + proxy_set_header Host analytics.jingrow.cloud; + proxy_ssl_name analytics.jingrow.cloud; + proxy_ssl_server_name on; + proxy_ssl_session_reuse off; + + proxy_ssl_protocols TLSv1.3; + add_header Cache-Control "max-age=31536000"; + } + + location /socket.io { + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header X-Jingrow-Site-Name $site_name_sxjfjnv; + proxy_set_header Origin $scheme://$http_host; + proxy_set_header Host $host; + + proxy_pass http://jingrow-bench-socketio-server; + } + + location = /website_script.js { + proxy_cache assets_cache; + proxy_cache_key $scheme$host$request_uri; + proxy_cache_valid 200 302 10m; + proxy_cache_valid 404 1m; + proxy_cache_bypass $http_secret_cache_purge; + + proxy_set_header X-Forwarded-For $REMOTE_ADDR; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Jingrow-Site-Name $site_name_sxjfjnv; + proxy_set_header Host $host; + proxy_read_timeout 120; + proxy_redirect off; + + proxy_hide_header Set-Cookie; + proxy_ignore_headers Set-Cookie; + proxy_set_header Cookie ""; + + add_header Cache-Control "max-age=31536000"; + proxy_pass http://jingrow-bench-jingrow; + } + + location ~ ^/saas/billing.* { + proxy_cache assets_cache; + proxy_cache_key $scheme$host$request_uri; + proxy_cache_valid 200 302 10m; + proxy_cache_valid 404 1m; + + proxy_ignore_headers "Set-Cookie"; + proxy_hide_header "Set-Cookie"; + proxy_set_header Cookie ""; + + add_header Content-Security-Policy "frame-ancestors 'self' https://*.jingrow.com https://*.jingrowhr.com https://*.jingrow.cloud https://*.jingrowdesk.com;"; + add_header X-Cache-Status $upstream_cache_status; + + proxy_set_header X-Forwarded-For $REMOTE_ADDR; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Jingrow-Site-Name $site_name_sxjfjnv; + proxy_set_header Host $host; + proxy_set_header X-Use-X-Accel-Redirect True; + proxy_read_timeout 120; + proxy_redirect off; + + proxy_pass http://jingrow-bench-jingrow; + http2_push_preload on; + } + + location ~ ^/dashboard/checkout* { + proxy_cache assets_cache; + proxy_cache_key $scheme$host$request_uri; + proxy_cache_valid 200 302 10m; + proxy_cache_valid 404 1m; + + proxy_ignore_headers "Set-Cookie"; + proxy_hide_header "Set-Cookie"; + proxy_set_header Cookie ""; + + add_header Content-Security-Policy "frame-ancestors 'self' https://*.jingrow.com https://*.jingrowhr.com https://*.jingrow.cloud https://*.jingrowdesk.com;"; + add_header X-Cache-Status $upstream_cache_status; + + proxy_set_header X-Forwarded-For $REMOTE_ADDR; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Jingrow-Site-Name $site_name_sxjfjnv; + proxy_set_header Host $host; + proxy_set_header X-Use-X-Accel-Redirect True; + proxy_read_timeout 120; + proxy_redirect off; + + proxy_pass http://jingrow-bench-jingrow; + http2_push_preload on; + } + + location ~ ^/dashboard/in-desk-billing* { + proxy_cache assets_cache; + proxy_cache_key $scheme$host$request_uri; + proxy_cache_valid 200 302 10m; + proxy_cache_valid 404 1m; + + proxy_ignore_headers "Set-Cookie"; + proxy_hide_header "Set-Cookie"; + proxy_set_header Cookie ""; + + add_header Content-Security-Policy "frame-ancestors 'self' https://*.jingrow.com https://*.jingrowhr.com https://*.jingrow.cloud https://*.jingrowdesk.com;"; + add_header X-Cache-Status $upstream_cache_status; + + proxy_set_header X-Forwarded-For $REMOTE_ADDR; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Jingrow-Site-Name $site_name_sxjfjnv; + proxy_set_header Host $host; + proxy_set_header X-Use-X-Accel-Redirect True; + proxy_read_timeout 120; + proxy_redirect off; + + proxy_pass http://jingrow-bench-jingrow; + http2_push_preload on; + } + + location ~ ^/api/method/jcloud.api.developer.saas.* { + if ($request_method = 'OPTIONS') { + add_header Access-Control-Allow-Origin "*" always; + add_header Access-Control-Allow-Methods "GET, POST, OPTIONS"; + add_header Access-Control-Allow-Headers "x-jingrow-cmd,x-jingrow-csrf-token"; + add_header Access-Control-Max-Age 1728000; + add_header Content-Type "text/plain; charset=utf-8"; + add_header Content-Length 0; + return 204; + } + + proxy_cache assets_cache; + proxy_cache_key $scheme$host$request_uri; + proxy_cache_valid 200 302 10m; + proxy_cache_valid 404 1m; + + add_header Access-Control-Allow-Origin "*" always; + add_header Access-Control-Allow-Methods "GET, POST, OPTIONS"; + add_header Access-Control-Allow-Headers "x-jingrow-cmd,x-jingrow-csrf-token"; + + add_header X-Cache-Status $upstream_cache_status; + + proxy_set_header X-Forwarded-For $REMOTE_ADDR; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Jingrow-Site-Name $site_name_sxjfjnv; + proxy_set_header Host $host; + proxy_set_header X-Use-X-Accel-Redirect True; + proxy_read_timeout 120; + proxy_redirect off; + + proxy_pass http://jingrow-bench-jingrow; + http2_push_preload on; + } + + location / { + + rewrite ^(.+)/$ $1 permanent; + rewrite ^(.+)/index\.html$ $1 permanent; + rewrite ^(.+)\.html$ $1 permanent; + + location ~ ^/files/.*.(png|jpe?g|gif|css|js|mp3|wav|ogg|flac|avi|mov|mp4|m4v|mkv|webm) { + add_header Cache-Control "max-age=31536000"; + try_files /$site_name_sxjfjnv/public/$uri @webserver; + } + + location ~* ^/files/.*.(htm|html|svg|xml) { + add_header Cache-Control "max-age=31536000"; + add_header Content-disposition "attachment"; + try_files /$site_name_sxjfjnv/public/$uri @webserver; + } + + try_files /$site_name_sxjfjnv/public/$uri @webserver; + } + + location @webserver { + proxy_http_version 1.1; + proxy_set_header X-Forwarded-For $REMOTE_ADDR; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Jingrow-Site-Name $site_name_sxjfjnv; + proxy_set_header Host $host; + proxy_set_header X-Use-X-Accel-Redirect True; + proxy_read_timeout 120; + proxy_redirect off; + + proxy_pass http://jingrow-bench-jingrow; + http2_push_preload on; + } + + # error pages + error_page 502 /502.html; + location /502.html { + root /home/jingrow/.pyenv/versions/3.10.0/lib/python3.10/site-packages/bench/config/templates; + internal; + } + + # optimizations + sendfile on; + keepalive_timeout 15; + client_max_body_size 50m; + client_body_buffer_size 16K; + client_header_buffer_size 1k; + + # enable gzip compresion + # based on https://mattstauffer.co/blog/enabling-gzip-on-nginx-servers-including-laravel-forge + gzip on; + gzip_http_version 1.1; + gzip_comp_level 5; + gzip_min_length 256; + gzip_proxied any; + gzip_vary on; + gzip_types + application/atom+xml + application/javascript + application/json + application/rss+xml + application/vnd.ms-fontobject + application/x-font-ttf + application/font-woff + application/x-web-app-manifest+json + application/xhtml+xml + application/xml + font/opentype + image/svg+xml + image/x-icon + text/css + text/plain + text/x-component + ; + # text/html is always compressed by HttpGzipModule +} + +# http to https redirect + server { + if ($host = jingrow.com) { + return 301 https://$host$request_uri; + } # managed by Certbot + + + listen 80; + server_name + jingrow.com + ; + + return 301 https://$host$request_uri; + + +} diff --git a/deployment/supervisor.conf b/deployment/supervisor.conf new file mode 100644 index 0000000..45428e9 --- /dev/null +++ b/deployment/supervisor.conf @@ -0,0 +1,148 @@ +; Notes: +; priority=1 --> Lower priorities indicate programs that start first and shut down last +; killasgroup=true --> send kill signal to child processes too +[supervisord] +environment=PYTHONUNBUFFERED="1", SENTRY_DSN="https://ee9d1bdf73b047dbb05048854b91ae56@trace.jingrow.cloud/2", JINGROW_SENTRY_DSN="https://ee9d1bdf73b047dbb05048854b91ae56@trace.jingrow.cloud/2", PATH="/home/jingrow/.local/bin:/home/jingrow/.nvm/versions/node/v18.17.1/bin:/home/jingrow/.pyenv/plugins/pyenv-virtualenv/shims:/home/jingrow/.pyenv/shims:/home/jingrow/.pyenv/bin::%(ENV_PATH)s" + +; graceful timeout should always be lower than stopwaitsecs to avoid orphan gunicorn workers. +[program:jingrow-bench-jingrow-web] +command=/home/jingrow/jingrow-bench/env/bin/gunicorn -b 127.0.0.1:8000 -w 32 --threads=8 -k gthread --max-requests 5000 --max-requests-jitter 500 -t 60 --graceful-timeout 30 jingrow.app:application --statsd-host=10.139.212.189:9125 --statsd-prefix=jcloud --access-logformat '%%(h)s %%(l)s %%(u)s %%(t)s "%%(r)s" %%(s)s %%(b)s "%%(f)s" "%%(a)s" %%(M)s' --access-logfile /home/jingrow/jingrow-bench/logs/gunicorn.log +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/web.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/web.error.log +stopwaitsecs=40 +user=jingrow +directory=/home/jingrow/jingrow-bench/sites + + +[program:jingrow-bench-jingrow-schedule] +command=/home/jingrow/.pyenv/versions/3.10.0/bin/bench schedule +priority=3 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/schedule.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/schedule.error.log +user=jingrow +directory=/home/jingrow/jingrow-bench + + +[program:jingrow-bench-jingrow-default-worker] +command=bash -c "/home/jingrow/jingrow-bench/apps/jcloud/deployment/wait-for-redis.sh && /home/jingrow/.pyenv/versions/3.10.0/bin/bench worker-pool --num-workers 8 --queue default" +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/default-worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/default-worker.error.log +user=jingrow +stopwaitsecs=360 +directory=/home/jingrow/jingrow-bench +killasgroup=true +process_name=%(program_name)s + +[program:jingrow-bench-jingrow-short-worker] +command=bash -c "/home/jingrow/jingrow-bench/apps/jcloud/deployment/wait-for-redis.sh && /home/jingrow/.pyenv/versions/3.10.0/bin/bench worker-pool --num-workers 16 --queue short,default" +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/short-worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/short-worker.error.log +user=jingrow +stopwaitsecs=360 +directory=/home/jingrow/jingrow-bench +killasgroup=true +process_name=%(program_name)s + + +[program:jingrow-bench-jingrow-long-worker] +command=bash -c "/home/jingrow/jingrow-bench/apps/jcloud/deployment/wait-for-redis.sh && /home/jingrow/.pyenv/versions/3.10.0/bin/bench worker-pool --num-workers 16 --queue default,short,long" +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/long-worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/long-worker.error.log +user=jingrow +stopwaitsecs=1560 +directory=/home/jingrow/jingrow-bench +killasgroup=true +process_name=%(program_name)s + + +[program:jingrow-bench-jingrow-sync-worker] +command=bash -c "/home/jingrow/jingrow-bench/apps/jcloud/deployment/wait-for-redis.sh && /home/jingrow/.pyenv/versions/3.10.0/bin/bench worker-pool --num-workers 6 --queue sync,long,default,short" +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/sync-worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/sync-worker.error.log +user=jingrow +stopwaitsecs=1560 +directory=/home/jingrow/jingrow-bench +killasgroup=true +process_name=%(program_name)s + + + +# Build worker, used to run jcloud side of builds +# i.e tarring and uploading the build context. +[program:jingrow-bench-jingrow-build-worker] +command=bash -c "/home/jingrow/jingrow-bench/apps/jcloud/deployment/wait-for-redis.sh && /home/jingrow/.pyenv/versions/3.10.0/bin/bench worker-pool --num-workers 8 --queue build" +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/build-worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/build-worker.error.log +user=jingrow +stopwaitsecs=120 +directory=/home/jingrow/jingrow-bench +killasgroup=true +process_name=%(program_name)s + + +[program:jingrow-bench-redis-cache] +command=/usr/bin/redis-server /home/jingrow/jingrow-bench/config/redis_cache.conf +priority=1 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/redis-cache.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/redis-cache.error.log +user=jingrow +directory=/home/jingrow/jingrow-bench/sites + + +[program:jingrow-bench-redis-queue] +command=/usr/bin/redis-server /home/jingrow/jingrow-bench/config/redis_queue.conf +priority=1 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/redis-queue.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/redis-queue.error.log +user=jingrow +directory=/home/jingrow/jingrow-bench/sites + + +[program:jingrow-bench-node-socketio] +command=/home/jingrow/.nvm/versions/node/v18.17.1/bin/node /home/jingrow/jingrow-bench/apps/jingrow/socketio.js +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/node-socketio.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/node-socketio.error.log +user=jingrow +directory=/home/jingrow/jingrow-bench + + +[group:jingrow-bench-web] +programs=jingrow-bench-jingrow-web,jingrow-bench-node-socketio + + +[group:jingrow-bench-workers] +programs=jingrow-bench-jingrow-schedule,jingrow-bench-jingrow-short-worker,jingrow-bench-jingrow-long-worker,jingrow-bench-jingrow-default-worker + +[group:jingrow-bench-chill-workers] +programs=jingrow-bench-jingrow-build-worker,jingrow-bench-jingrow-sync-worker + + +[group:jingrow-bench-redis] +programs=jingrow-bench-redis-cache,jingrow-bench-redis-queue diff --git a/deployment/supervisord.conf b/deployment/supervisord.conf new file mode 100644 index 0000000..878a282 --- /dev/null +++ b/deployment/supervisord.conf @@ -0,0 +1,31 @@ +; supervisor config file + +[unix_http_server] +file=/var/run/supervisor.sock ; (the path to the socket file) +chmod=0700 ; sockef file mode (default 0700) + +[supervisord] +logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log) +pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid) +childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default $TEMP) + +loglevel = debug ; Remove this after we've found the source of BrokenPipeError +logfile_maxbytes = 1GB ; Rotate less frequently + +; the below section must remain in the config file for RPC +; (supervisorctl/web interface) to work, additional interfaces may be +; added by defining them in separate rpcinterface: sections +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[supervisorctl] +serverurl=unix:///var/run/supervisor.sock ; use a unix:// URL for a unix socket + +; The [include] section can just contain the "files" setting. This +; setting can list multiple files (separated by whitespace or +; newlines). It can also contain wildcards. The filenames are +; interpreted as relative to this file. Included files *cannot* +; include files themselves. + +[include] +files = /etc/supervisor/conf.d/*.conf diff --git a/deployment/wait-for-redis.sh b/deployment/wait-for-redis.sh new file mode 100755 index 0000000..2831f24 --- /dev/null +++ b/deployment/wait-for-redis.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +CACHE_URL="redis://127.0.0.1:13000" +QUEUE_URL="redis://127.0.0.1:11000" + +MAX_ATTEMPTS=120 +attempts=0 + +until [ $attempts -ge $MAX_ATTEMPTS ] +do + if ( redis-cli -u $QUEUE_URL PING | grep -q PONG ) && ( redis-cli -u $CACHE_URL PING | grep -q PONG ); then + break + fi + sleep 1 + echo "Waiting for Redis to be ready..." + ((attempts=attempts+1)) +done \ No newline at end of file diff --git a/dev-requirements.txt b/dev-requirements.txt new file mode 100644 index 0000000..23a9ed4 --- /dev/null +++ b/dev-requirements.txt @@ -0,0 +1,3 @@ +pre-commit +moto[all]~=5.0 +Faker==23.2.0 diff --git a/guide-to-testing.md b/guide-to-testing.md new file mode 100644 index 0000000..decbcaf --- /dev/null +++ b/guide-to-testing.md @@ -0,0 +1,243 @@ +# Prerequisites + +Before we get into writing tests, please make sure you have pre-commit hook for +styling tools setup so CI won't fail from these + +Instructions [here](http://git.jingrow.com:3000/jingrow/jcloud/issues/424#issuecomment-1193375098) + +# Writing Tests for Jcloud + +Writing tests involve running tests locally (duh). So let's get that setup. (You'll only have to do this once) + +## Make a test site + +Tests can leave fake records. This will pollute your local setup. So, get +yourself a test site. You can get these commands from the CI workflow file too, +but I'll save you some time. You can name the site and set password to whatever. + +```sh +bench new-site --db-root-password admin --admin-password admin test_site +bench --site test_site install-app jcloud +bench --site test_site add-to-hosts # in case you wanna call APIs +bench --site test_site set-config allow_tests true +``` + +Finally, you need to start bench as some of the tests may want to trigger +background jobs, which would fail if background workers aren't there + +```sh +bench start +``` + +As you write tests you'll occasionally want to remove all test data +in your test site from time to time. So, here ya go: + +```sh +bench --site test_site reinstall --yes +``` + +## Writing tests + +This is the hard part. Because of Jcloud's dependency with outside world, it's +hard to isolate unit tests to this project. Regardless it's still possible with +plain old python's built in libraries. + +Majority of this is done with the help of python's `unittest.mock` library. We +use this library to mock parts of code when referencing things that are out of +Jcloud's control. + +Eg: We can mock all Agent Job creation calls by decorating the TestCase class like so + +```python +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +class TestSite(unittest.TestCase): +``` + +We use `patch.object` decorator here so that every instance of `AgentJob` +object will have it's `enqueue_http_request` method be replaced by whatever we +pass in the new argument, which in this case is `Mock()` which does nothing. +You can think of it as a `pass`. But it has other uses as you'll find if you +keep reading. + +> Note: Class decorators aren't inherited, so you'll have to do this on all +> classes you want to mock http request creation for Agent Job + +## Mocking Agent Jobs End-to-end + +There's also a decorator you can use to fake the result of an agent job. For +example, you may do it like so: +http://git.jingrow.com:3000/jingrow/jcloud/blob/983631ccb59f88e57fd60fdad1615e9abd87d99f/jcloud/api/tests/test_site.py#L243-L247 + +This way you can use the name of the type of job and fake a response from the same. + +You may also fake the output obtained from the job which you can then use to test the callback that uses the same: +http://git.jingrow.com:3000/jingrow/jcloud/blob/983631ccb59f88e57fd60fdad1615e9abd87d99f/jcloud/api/tests/test_site.py#L305-L323 + +It is also possible to fake multiple jobs in the same context, for when multiple jobs are processed in the same request or job: + +http://git.jingrow.com:3000/jingrow/jcloud/blob/983631ccb59f88e57fd60fdad1615e9abd87d99f/jcloud/jcloud/pagetype/site_migration/test_site_migration.py#L29-L77 + +> Note that with this, you can't fake 2 results for the same type of job. This is still a limitation. As a workaround, you can have multiple `with` statements for such cases. + +This is all done with the help of the [responses](https://github.com/getsentry/responses) library by intercepting the http requests for the same. + +> Note that you shouldn't mock `AgentJob.enqueue_http_request` when using the above decorator as that will interfere with the request interception need to fake the job results + +Now that we've learned to mock the external things, we can go about mocking +internal things, which forms the basis of testing, which is + +1. Make test records +2. Perform operation (i.e Run code that will on production) +3. Test the test records for results + +### Making test records + +Making test records is also kind of a pain as we have validations all around +code that will need to be passed every time you create a pg. This is too much +cognition. Therefore, we can create utility functions (with sensible defaults) +to make test record of the corresponding Pagetype in their own corresponding +test files (for organization reasons). These functions will be doing the bare +minimum to make a valid document of that pagetype. + +Eg: `create_test_bench` in `test_bench.py` can be imported and used whenever +you need a valid bench (which itself has dependencies on many other doctypes) + +You can also add default args to these utility functions as you come across the +need. Just append to end so you won't have to rewrite pre-existing tests. + +You write a test by writing a method in the TestCase. Make the method name as +long as you want. Test methods are supposed to test a specific case. When the +test breaks eventually (serving it's purpose), the reader should be able to +tell what it's trying to test is supposed without even having to read the code. +Making the method name small is pointless; we're never going to reference this +method anywhere in code, ever. Eg: + +http://git.jingrow.com:3000/jingrow/jcloud/blob/2503e523284fb905eca60acf3271d3fb1dccbc3f/jcloud/jcloud/pagetype/site/test_site.py#L215-L228 + +You can also go the extra mile and write a function docstring. This docstring +will be shown in the output when the testrunner detects that the test has +failed. + +### Rerunnability + +Not a real word, but I like to be able to re-run my tests without having to +nuke the database. Leaving the database in an "empty state" after every test is +a very easy way to achieve this. This also makes testing for things like count +of docs super easy. Lucky for us there's a method in `TestCase` that's run +after every individual test in the class. It's called `tearDown`. + +We can easily do + +```python +def tearDown(self): + jingrow.db.rollback() +``` + +And every pg you create (in foreground at least) will not be committed into the database. + +> Note: If the code you're testing calls jingrow.db.commit, be sure to mock it +> cuz otherwise docs will get committed till that point regardless. + +You can mock certain lines while testing a piece of code with the `patch` decorator too. Eg: + +```python +from unittest.mock import MagicMock, patch + +# this will mock all the jingrow.db.commit calls in server.py while in this test suite +@patch("jcloud.jcloud.pagetype.server.server.jingrow.db.commit", new=MagicMock) +class TestBench(unittest.TestCase): +``` + +You can also use the patch decorator on test methods too. Eg: + +http://git.jingrow.com:3000/jingrow/jcloud/blob/6dd6b2c8193b04f1aec1601d52ba09ce9dca8dfe/jcloud/tests/test_cleanup.py#L280-L290 +The decorator passes the mocked function (which is a `Mock()` object) along as +an argument, so you can later do asserts on it (if you want to). + +You can even use the decorator as context manager if you don't want to mock +things for the entirety of the test. + +http://git.jingrow.com:3000/jingrow/jcloud/blob/6dd6b2c8193b04f1aec1601d52ba09ce9dca8dfe/jcloud/tests/test_audit.py#L97-L102 + +Here, we're actually faking the output of the function which usually calls a +remote endpoint that's out of our control by adding the `new` argument to the +method. + +> Note: When you use asserts on Mock object, Document comparisons will mostly +> work as expected as we're overriding **__eq__** of Document class during +> tests (check before_test.py). This is because by default when 2 Document +> objects are compared, only their `id()` is checked, which will return False +> as the objects will be different in memory. + + +> Note: If you need to mock some Callable while preserving it's function, (in +> case you want to do asserts on it, you can use the `wraps` kwarg instead of +> new). Eg: + +http://git.jingrow.com:3000/jingrow/jcloud/blob/23711e2799f2d24dfd7bbe2b6cd148f54f4b253b/jcloud/jcloud/pagetype/database_server_mariadb_variable/test_database_server_mariadb_variable.py#L138-L155 + +Here, we check what args was Ansible constructor was called with. + +That's pretty much all you need to write safe, rerunnable tests for Jcloud. You +can checkout https://docs.python.org/3/library/unittest.mock.html for more +things you can do with the standard python libraries. If your editor and +plugins are setup configured nicely, you can even do TDD with ease. + +> Protip: When you have test records you want across a TestCase, then you can +> simply use the create the test record in `setUp` method of the same. The test +> records can be assigned to member variables. Eg: + +```python +def setUp(self): + self.team = create_test_team() +``` + +### Background jobs + +Since background jobs are forked off of a different process, our mocks and +patches are not going to hold there. Not only that, but we can't +control/predict when the background job will run and finish. So, when your code +involves creating a background job, we can simply mock the call so that it runs +in foreground instead. There's a utility method you can use to achieve this with ease: + +http://git.jingrow.com:3000/jingrow/jcloud/blob/23711e2799f2d24dfd7bbe2b6cd148f54f4b253b/jcloud/jcloud/pagetype/database_server_mariadb_variable/test_database_server_mariadb_variable.py#L12 + +http://git.jingrow.com:3000/jingrow/jcloud/blob/23711e2799f2d24dfd7bbe2b6cd148f54f4b253b/jcloud/jcloud/pagetype/database_server_mariadb_variable/test_database_server_mariadb_variable.py#L104-L108 + +## Running tests + +You can run all of the tests with the following command. + +```sh +bench --site test_site run-tests --app jcloud +``` + +But you'll never have to. That's what CI is for. Instead, you'll mostly want to use: + +```sh +bench --site test_site run-tests --app jcloud --module jcloud.jcloud.pagetype.some_pagetype.test_some_pagetype +``` + +This is because while writing bugs, your changes will mostly affect that one +module only and since we don't have many tests to begin with, it won't take +very long to run a module's test by itself anyway. Give your eyes a break while this happens. + +You can also run individual test with: + +```sh +bench --site test_site run-tests --module jcloud.jcloud.pagetype.some_pagetype.test_some_pagetype --test test_very_specific_thing +``` + +You most likely won't enjoy running commands manually like this. So you'd want +to check out [this vim plugin](https://github.com/ankush/jingrow_test.vim/) or +[this vscode plugin](https://marketplace.visualstudio.com/items?itemName=AnkushMenat.jingrow-test-runner) + +> Note: jingrow_test plugin doesn't populate vim's quickfix list yet. Though +> Jingrow's test runner output isn't very pyunit errorformat friendly, you can +> still make it work with a [custom errorformat](https://github.com/balamurali27/dotfiles/blob/85dc18a/.config/nvim/after/plugin/jingrow.vim#LL10C1-L10C128) and some hacks to [set makeprg](https://github.com/balamurali27/dotfiles/blob/0bcd6270770d0b67b63fc0ea308e6834fefda5a6/.config/nvim/init.vim#L150C7-L163) + +# References + +- https://framework.jingrow.com/docs/v14/user/en/testing +- https://docs.python.org/3/library/unittest.mock.html +- https://learnvim.irian.to/basics/compile diff --git a/jcloud-semgrep-rules.yml b/jcloud-semgrep-rules.yml new file mode 100644 index 0000000..67bbc5e --- /dev/null +++ b/jcloud-semgrep-rules.yml @@ -0,0 +1,171 @@ +rules: + - id: possible-mutable-default-args + pattern-either: + - pattern: | + def $FUNC(..., $ARG = $FUNC2(...), ...): + ... + - pattern: | + def $FUNC(..., $ARG = $FUNC2(...).$ATTR, ...): + ... + - pattern: | + def $FUNC(..., $ARG = jingrow.$ATTR, ...): + ... + + message: | + `$ARG` is possibly a mutable default argument. May not work as expected during subsequent calls of `$FUNC` without $ARG. + languages: + - python + severity: WARNING + metadata: + category: correctness + technology: + - python + references: + - https://docs.python-guide.org/writing/gotchas/#mutable-default-arguments + + - id: except-with-db-code + languages: + - python + patterns: + - pattern-inside: | + try: + ... + except ...: + $ERR_HANDL_BLK + - pattern-either: + - pattern: | + try: + ... + except ...: + ... + $DOC.save(...) + ... + raise + ... + - pattern: | + try: + ... + except ...: + ... + jingrow. ... .set_value(...) + ... + raise + ... + - pattern: | + try: + ... + except ...: + ... + $DOC.db_set(...) + ... + raise + ... + - pattern-not: | + try: + ... + except ...: + ... + $DOC.save(...) + ... + jingrow.db.commit(...) + raise + ... + - pattern-not: | + try: + ... + except ...: + ... + jingrow. ... .set_value(...) + ... + jingrow.db.commit(...) + raise + ... + - pattern-not: | + try: + ... + except ...: + ... + $DOC.db_set(...) + ... + jingrow.db.commit(...) + ... + raise + ... + - focus-metavariable: $ERR_HANDL_BLK + + message: except block has no db commit before raise. The db changes made won't persist assuming innodb tables. + severity: ERROR + + - id: retries-without-until + languages: + - yaml + patterns: + - pattern: | + ... + retries: $RETRIES + delay: $DELAY + ... + + - pattern-not: | + ... + retries: $RETRIES + delay: $DELAY + until: $UNTIL + ... + + paths: + include: + - 'jcloud/playbooks/**/*.yml' + message: retry block doesn't have until condition. Only works with ansible 2.16 and above. + severity: ERROR + metadata: + category: correctness + references: + - https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_loops.html#retrying-a-task-until-a-condition-is-met + - https://docs.ansible.com/ansible/latest/reference_appendices/release_and_maintenance.html#ansible-community-changelogs + + - id: nginx-update-called-in-loop + languages: + - python + patterns: + - pattern-inside: | + for $VAR in $LIST: + ... + - pattern-either: + - pattern: Site(...).unsuspend(...) + - pattern: Site(...).suspend(...) + - pattern: Site(...).activate(...) + - pattern: Site(...).deactivate(...) + - pattern: $OBJ.get_pg("Site", ...).unsuspend(...) + - pattern: $OBJ.get_pg("Site", ...).suspend(...) + - pattern: $OBJ.get_pg("Site", ...).activate(...) + - pattern: $OBJ.get_pg("Site", ...).deactivate(...) + - pattern: $OBJ.get_last_pg("Site", ...).unsuspend(...) + - pattern: $OBJ.get_last_pg("Site", ...).suspend(...) + - pattern: $OBJ.get_last_pg("Site", ...).activate(...) + - pattern: $OBJ.get_last_pg("Site", ...).deactivate(...) + - pattern: $OBJ.update_site_status_on_proxy(...) + - pattern: $OBJ.update_site_status(...) + - pattern: deactivate_site_on_source_proxy(...) + - pattern: activate_site_on_destination_proxy(...) + + - pattern-not: Site(...).unsuspend(..., skip_reload=True, ...) + - pattern-not: Site(...).suspend(..., skip_reload=True, ...) + - pattern-not: Site(...).activate(..., skip_reload=True, ...) + - pattern-not: Site(...).deactivate(..., skip_reload=True, ...) + - pattern-not: $OBJ.get_pg("Site", ...).unsuspend(..., skip_reload=True, ...) + - pattern-not: $OBJ.get_pg("Site", ...).suspend(..., skip_reload=True, ...) + - pattern-not: $OBJ.get_pg("Site", ...).activate(..., skip_reload=True, ...) + - pattern-not: $OBJ.get_pg("Site", ...).deactivate(..., skip_reload=True, ...) + - pattern-not: $OBJ.get_last_pg("Site", ...).unsuspend(..., skip_reload=True, ...) + - pattern-not: $OBJ.get_last_pg("Site", ...).suspend(..., skip_reload=True, ...) + - pattern-not: $OBJ.get_last_pg("Site", ...).activate(..., skip_reload=True, ...) + - pattern-not: $OBJ.get_last_pg("Site", ...).deactivate(..., skip_reload=True, ...) + - pattern-not: $OBJ.update_site_status_on_proxy(..., skip_reload=True, ...) + - pattern-not: $OBJ.update_site_status(..., skip_reload=True, ...) + + message: Agent endpoint that updates nginx is called in a loop. This causes nginx to reload configuration multiple times which takes proxy down. + severity: ERROR + metadata: + references: + - https://www.f5.com/ko_kr/company/blog/nginx/using-nginx-plus-to-reduce-the-frequency-of-configuration-reloads diff --git a/jcloud/__init__.py b/jcloud/__init__.py new file mode 100644 index 0000000..07ccbc7 --- /dev/null +++ b/jcloud/__init__.py @@ -0,0 +1,4 @@ +# -*- coding: utf-8 -*- + + +__version__ = "0.7.0" diff --git a/jcloud/agent.py b/jcloud/agent.py new file mode 100644 index 0000000..a43eddd --- /dev/null +++ b/jcloud/agent.py @@ -0,0 +1,1333 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import _io # type: ignore +import json +import os +from contextlib import suppress +from datetime import date +from typing import TYPE_CHECKING + +import jingrow +import requests +from jingrow.utils.password import get_decrypted_password +from requests.exceptions import HTTPError + +from jcloud.utils import get_mariadb_root_password, log_error, sanitize_config + +if TYPE_CHECKING: + from io import BufferedReader + + from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob + from jcloud.jcloud.pagetype.app_patch.app_patch import AgentPatchConfig, AppPatch + from jcloud.jcloud.pagetype.physical_backup_restoration.physical_backup_restoration import ( + PhysicalBackupRestoration, + ) + from jcloud.jcloud.pagetype.site.site import Site + from jcloud.jcloud.pagetype.site_backup.site_backup import SiteBackup + + +class Agent: + if TYPE_CHECKING: + from typing import Optional + + from requests import Response + + response: Response | None + + def __init__(self, server, server_type="Server"): + self.server_type = server_type + self.server = server + self.port = 443 + + def new_bench(self, bench): + settings = jingrow.db.get_value( + "Jcloud Settings", + None, + ["docker_registry_url", "docker_registry_username", "docker_registry_password"], + as_dict=True, + ) + + data = { + "name": bench.name, + "bench_config": json.loads(bench.bench_config), + "common_site_config": json.loads(bench.config), + "registry": { + "url": settings.docker_registry_url, + "username": settings.docker_registry_username, + "password": settings.docker_registry_password, + }, + } + + if bench.mounts: + data["mounts"] = [ + { + "source": m.source, + "destination": m.destination, + "is_absolute_path": m.is_absolute_path, + } + for m in bench.mounts + ] + + return self.create_agent_job("New Bench", "benches", data, bench=bench.name) + + def archive_bench(self, bench): + return self.create_agent_job("Archive Bench", f"benches/{bench.name}/archive", bench=bench.name) + + def restart_bench(self, bench, web_only=False): + return self.create_agent_job( + "Bench Restart", + f"benches/{bench.name}/restart", + data={"web_only": web_only}, + bench=bench.name, + ) + + def rebuild_bench(self, bench): + return self.create_agent_job( + "Rebuild Bench Assets", + f"benches/{bench.name}/rebuild", + bench=bench.name, + ) + + def update_bench_config(self, bench): + data = { + "bench_config": json.loads(bench.bench_config), + "common_site_config": json.loads(bench.config), + } + return self.create_agent_job( + "Update Bench Configuration", f"benches/{bench.name}/config", data, bench=bench.name + ) + + def _get_managed_db_config(self, site): + managed_database_service = jingrow.get_cached_value("Bench", site.bench, "managed_database_service") + + if not managed_database_service: + return {} + + return jingrow.get_cached_value( + "Managed Database Service", + managed_database_service, + ["database_host", "database_root_user", "port"], + as_dict=True, + ) + + def new_site(self, site, create_user: dict | None = None): + apps = [app.app for app in site.apps] + + data = { + "config": json.loads(site.config), + "apps": apps, + "name": site.name, + "mariadb_root_password": get_mariadb_root_password(site), + "admin_password": site.get_password("admin_password"), + "managed_database_config": self._get_managed_db_config(site), + } + + if create_user: + data["create_user"] = create_user + + return self.create_agent_job( + "New Site", f"benches/{site.bench}/sites", data, bench=site.bench, site=site.name + ) + + def reinstall_site(self, site): + data = { + "mariadb_root_password": get_mariadb_root_password(site), + "admin_password": site.get_password("admin_password"), + "managed_database_config": self._get_managed_db_config(site), + } + + return self.create_agent_job( + "Reinstall Site", + f"benches/{site.bench}/sites/{site.name}/reinstall", + data, + bench=site.bench, + site=site.name, + ) + + def restore_site(self, site: "Site", skip_failing_patches=False): + site.check_enough_space_on_server() + apps = [app.app for app in site.apps] + public_link, private_link = None, None + if site.remote_public_file: + public_link = jingrow.get_pg("Remote File", site.remote_public_file).download_link + if site.remote_private_file: + private_link = jingrow.get_pg("Remote File", site.remote_private_file).download_link + + data = { + "apps": apps, + "mariadb_root_password": get_mariadb_root_password(site), + "admin_password": site.get_password("admin_password"), + "database": jingrow.get_pg("Remote File", site.remote_database_file).download_link, + "public": public_link, + "private": private_link, + "skip_failing_patches": skip_failing_patches, + "managed_database_config": self._get_managed_db_config(site), + } + + return self.create_agent_job( + "Restore Site", + f"benches/{site.bench}/sites/{site.name}/restore", + data, + bench=site.bench, + site=site.name, + ) + + def rename_site(self, site, new_name: str, create_user: dict | None = None, config: dict | None = None): + data = {"new_name": new_name} + if create_user: + data["create_user"] = create_user + if config: + data["config"] = config + return self.create_agent_job( + "Rename Site", + f"benches/{site.bench}/sites/{site.name}/rename", + data, + bench=site.bench, + site=site.name, + ) + + def create_user(self, site, email, first_name, last_name, password=None): + data = { + "email": email, + "first_name": first_name, + "last_name": last_name, + "password": password, + } + return self.create_agent_job( + "Create User", + f"benches/{site.bench}/sites/{site.name}/create-user", + data, + bench=site.bench, + site=site.name, + ) + + def complete_setup_wizard(self, site, data): + return self.create_agent_job( + "Complete Setup Wizard", + f"benches/{site.bench}/sites/{site.name}/complete-setup-wizard", + data, + bench=site.bench, + site=site.name, + ) + + def optimize_tables(self, site): + return self.create_agent_job( + "Optimize Tables", + f"benches/{site.bench}/sites/{site.name}/optimize", + bench=site.bench, + site=site.name, + ) + + def rename_upstream_site(self, server: str, site, new_name: str, domains: list[str]): + _server = jingrow.get_pg("Server", server) + ip = _server.ip if _server.is_self_hosted else _server.private_ip + data = {"new_name": new_name, "domains": domains} + return self.create_agent_job( + "Rename Site on Upstream", + f"proxy/upstreams/{ip}/sites/{site.name}/rename", + data, + site=site.name, + ) + + def new_site_from_backup(self, site: "Site", skip_failing_patches=False): + site.check_enough_space_on_server() + apps = [app.app for app in site.apps] + + def sanitized_site_config(site): + sanitized_config = {} + if site.remote_config_file: + from jcloud.jcloud.pagetype.site_activity.site_activity import log_site_activity + + site_config = jingrow.get_pg("Remote File", site.remote_config_file) + new_config = site_config.get_content() + new_config["maintenance_mode"] = 0 # Don't allow deactivated sites to be created + sanitized_config = sanitize_config(new_config) + existing_config = json.loads(site.config) + existing_config.update(sanitized_config) + site._update_configuration(existing_config) + log_site_activity(site.name, "Update Configuration") + + return json.dumps(sanitized_config) + + public_link, private_link = None, None + + if site.remote_public_file: + public_link = jingrow.get_pg("Remote File", site.remote_public_file).download_link + if site.remote_private_file: + private_link = jingrow.get_pg("Remote File", site.remote_private_file).download_link + + data = { + "config": json.loads(site.config), + "apps": apps, + "name": site.name, + "mariadb_root_password": get_mariadb_root_password(site), + "admin_password": site.get_password("admin_password"), + "site_config": sanitized_site_config(site), + "database": jingrow.get_pg("Remote File", site.remote_database_file).download_link, + "public": public_link, + "private": private_link, + "skip_failing_patches": skip_failing_patches, + "managed_database_config": self._get_managed_db_config(site), + } + + return self.create_agent_job( + "New Site from Backup", + f"benches/{site.bench}/sites/restore", + data, + bench=site.bench, + site=site.name, + ) + + def install_app_site(self, site, app): + data = {"name": app} + return self.create_agent_job( + "Install App on Site", + f"benches/{site.bench}/sites/{site.name}/apps", + data, + bench=site.bench, + site=site.name, + ) + + def uninstall_app_site(self, site, app): + return self.create_agent_job( + "Uninstall App from Site", + f"benches/{site.bench}/sites/{site.name}/apps/{app}", + method="DELETE", + bench=site.bench, + site=site.name, + ) + + def setup_jerp(self, site, user, config): + data = {"user": user, "config": config} + return self.create_agent_job( + "Setup JERP", + f"benches/{site.bench}/sites/{site.name}/jerp", + data, + bench=site.bench, + site=site.name, + ) + + def migrate_site(self, site, skip_failing_patches=False, activate=True): + data = {"skip_failing_patches": skip_failing_patches, "activate": activate} + return self.create_agent_job( + "Migrate Site", + f"benches/{site.bench}/sites/{site.name}/migrate", + bench=site.bench, + site=site.name, + data=data, + ) + + def clear_site_cache(self, site): + return self.create_agent_job( + "Clear Cache", + f"benches/{site.bench}/sites/{site.name}/cache", + method="DELETE", + bench=site.bench, + site=site.name, + ) + + def activate_site(self, site, reference_pagetype=None, reference_name=None): + return self.create_agent_job( + "Activate Site", + f"benches/{site.bench}/sites/{site.name}/activate", + bench=site.bench, + site=site.name, + reference_pagetype=reference_pagetype, + reference_name=reference_name, + ) + + def deactivate_site(self, site, reference_pagetype=None, reference_name=None): + return self.create_agent_job( + "Deactivate Site", + f"benches/{site.bench}/sites/{site.name}/deactivate", + bench=site.bench, + site=site.name, + reference_pagetype=reference_pagetype, + reference_name=reference_name, + ) + + def update_site( + self, + site, + target, + deploy_type, + skip_failing_patches=False, + skip_backups=False, + before_migrate_scripts=None, + skip_search_index=True, + ): + activate = site.status_before_update in ("Active", "Broken") + data = { + "target": target, + "activate": activate, + "skip_failing_patches": skip_failing_patches, + "skip_backups": skip_backups, + "before_migrate_scripts": before_migrate_scripts, + "skip_search_index": skip_search_index, + } + return self.create_agent_job( + f"Update Site {deploy_type}", + f"benches/{site.bench}/sites/{site.name}/update/{deploy_type.lower()}", + data, + bench=site.bench, + site=site.name, + ) + + def restore_site_tables(self, site): + activate = site.status_before_update == "Active" + data = {"activate": activate} + return self.create_agent_job( + "Restore Site Tables", + f"benches/{site.bench}/sites/{site.name}/update/migrate/restore", + data, + bench=site.bench, + site=site.name, + ) + + def update_site_recover_move( + self, + site, + target, + deploy_type, + activate, + rollback_scripts=None, + restore_touched_tables=True, + ): + data = { + "target": target, + "activate": activate, + "rollback_scripts": rollback_scripts, + "restore_touched_tables": restore_touched_tables, + } + return self.create_agent_job( + f"Recover Failed Site {deploy_type}", + f"benches/{site.bench}/sites/{site.name}/update/{deploy_type.lower()}/recover", + data, + bench=site.bench, + site=site.name, + ) + + def update_site_recover(self, site): + return self.create_agent_job( + "Recover Failed Site Update", + f"benches/{site.bench}/sites/{site.name}/update/recover", + bench=site.bench, + site=site.name, + ) + + def update_site_config(self, site): + data = { + "config": json.loads(site.config), + "remove": json.loads(site._keys_removed_in_last_update), + } + return self.create_agent_job( + "Update Site Configuration", + f"benches/{site.bench}/sites/{site.name}/config", + data, + bench=site.bench, + site=site.name, + ) + + def reset_site_usage(self, site): + return self.create_agent_job( + "Reset Site Usage", + f"benches/{site.bench}/sites/{site.name}/usage", + method="DELETE", + bench=site.bench, + site=site.name, + ) + + def archive_site(self, site, site_name=None, force=False): + site_name = site_name or site.name + database_server = jingrow.db.get_value("Bench", site.bench, "database_server") + data = { + "mariadb_root_password": get_decrypted_password( + "Database Server", database_server, "mariadb_root_password" + ), + "force": force, + } + + return self.create_agent_job( + "Archive Site", + f"benches/{site.bench}/sites/{site_name}/archive", + data, + bench=site.bench, + site=site.name, + ) + + def physical_backup_database(self, site: Site, site_backup: SiteBackup): + """ + For physical database backup, the flow : + - Create the agent job + - Agent job will lock the specific database + flush the changes to disk + - Take a database dump + - Use `fsync` to ensure the changes are written to disk + - Agent will send back a request to FC for taking the snapshot + - By calling `snapshot_create_callback` url + - Then, unlock the database + """ + jcloud_public_base_url = jingrow.utils.get_url() + data = { + "databases": [site_backup.database_name], + "mariadb_root_password": get_mariadb_root_password(site), + "private_ip": jingrow.get_value( + "Database Server", jingrow.db.get_value("Server", site.server, "database_server"), "private_ip" + ), + "site_backup": { + "name": site_backup.name, + "snapshot_request_key": site_backup.snapshot_request_key, + "snapshot_trigger_url": f"{jcloud_public_base_url}/api/method/jcloud.api.site_backup.create_snapshot", + }, + } + return self.create_agent_job( + "Physical Backup Database", + "/database/physical-backup", + data=data, + bench=site.bench, + site=site.name, + ) + + def physical_restore_database(self, site, backup_restoration: PhysicalBackupRestoration): + data = { + "backup_db": backup_restoration.source_database, + "target_db": backup_restoration.destination_database, + "target_db_root_password": get_mariadb_root_password(site), + "private_ip": jingrow.get_value( + "Database Server", jingrow.db.get_value("Server", site.server, "database_server"), "private_ip" + ), + "backup_db_base_directory": os.path.join(backup_restoration.mount_point, "var/lib/mysql"), + "restore_specific_tables": backup_restoration.restore_specific_tables, + "tables_to_restore": json.loads(backup_restoration.tables_to_restore), + } + return self.create_agent_job( + "Physical Restore Database", + "/database/physical-restore", + data=data, + bench=site.bench, + site=site.name, + reference_name=backup_restoration.name, + reference_pagetype=backup_restoration.pagetype, + ) + + def backup_site(self, site, site_backup: SiteBackup): + from jcloud.jcloud.pagetype.site_backup.site_backup import get_backup_bucket + + data = {"with_files": site_backup.with_files} + + if site_backup.offsite: + settings = jingrow.get_single("Jcloud Settings") + backups_path = os.path.join(site.name, str(date.today())) + backup_bucket = get_backup_bucket(site.cluster, region=True) + bucket_name = backup_bucket.get("name") if isinstance(backup_bucket, dict) else backup_bucket + if settings.aws_s3_bucket or bucket_name: + auth = { + "ACCESS_KEY": settings.offsite_backups_access_key_id, + "SECRET_KEY": settings.get_password("offsite_backups_secret_access_key"), + "REGION": backup_bucket.get("region") if isinstance(backup_bucket, dict) else "", + } + data.update({"offsite": {"bucket": bucket_name, "auth": auth, "path": backups_path}}) + + else: + log_error("Offsite Backups aren't set yet") + + return self.create_agent_job( + "Backup Site", + f"benches/{site.bench}/sites/{site.name}/backup", + data=data, + bench=site.bench, + site=site.name, + ) + + def add_domain(self, site, domain): + data = { + "domain": domain, + } + return self.create_agent_job( + "Add Domain", + f"benches/{site.bench}/sites/{site.name}/domains", + data, + bench=site.bench, + site=site.name, + ) + + def remove_domain(self, site, domain): + return self.create_agent_job( + "Remove Domain", + f"benches/{site.bench}/sites/{site.name}/domains/{domain}", + method="DELETE", + site=site.name, + bench=site.bench, + ) + + def new_host(self, domain, skip_reload=False): + certificate = jingrow.get_pg("TLS Certificate", domain.tls_certificate) + data = { + "name": domain.domain, + "target": domain.site, + "certificate": { + "privkey.pem": certificate.private_key, + "fullchain.pem": certificate.full_chain, + "chain.pem": certificate.intermediate_chain, + }, + "skip_reload": skip_reload, + } + return self.create_agent_job( + "Add Host to Proxy", "proxy/hosts", data, host=domain.domain, site=domain.site + ) + + def setup_wildcard_hosts(self, wildcards): + return self.create_agent_job("Add Wildcard Hosts to Proxy", "proxy/wildcards", wildcards) + + def setup_redirects(self, site: str, domains: list[str], target: str): + data = {"domains": domains, "target": target} + return self.create_agent_job("Setup Redirects on Hosts", "proxy/hosts/redirects", data, site=site) + + def remove_redirects(self, site: str, domains: list[str]): + data = {"domains": domains} + return self.create_agent_job( + "Remove Redirects on Hosts", + "proxy/hosts/redirects", + data, + method="DELETE", + site=site, + ) + + def remove_host(self, domain): + return self.create_agent_job( + "Remove Host from Proxy", + f"proxy/hosts/{domain.domain}", + method="DELETE", + site=domain.site, + ) + + def new_server(self, server): + _server = jingrow.get_pg("Server", server) + ip = _server.ip if _server.is_self_hosted else _server.private_ip + data = {"name": ip} + return self.create_agent_job("Add Upstream to Proxy", "proxy/upstreams", data, upstream=server) + + def update_upstream_private_ip(self, server): + ip, private_ip = jingrow.db.get_value("Server", server, ["ip", "private_ip"]) + data = {"name": private_ip} + return self.create_agent_job("Rename Upstream", f"proxy/upstreams/{ip}/rename", data, upstream=server) + + def new_upstream_file(self, server, site=None, code_server=None): + _server = jingrow.get_pg("Server", server) + ip = _server.ip if _server.is_self_hosted else _server.private_ip + data = {"name": site if site else code_server} + pagetype = "Site" if site else "Code Server" + return self.create_agent_job( + f"Add {pagetype} to Upstream", + f"proxy/upstreams/{ip}/sites", + data, + site=site, + code_server=code_server, + upstream=server, + ) + + def add_domain_to_upstream(self, server, site=None, domain=None): + _server = jingrow.get_pg("Server", server) + ip = _server.ip if _server.is_self_hosted else _server.private_ip + data = {"domain": domain} + return self.create_agent_job( + "Add Domain to Upstream", + f"proxy/upstreams/{ip}/domains", + data, + site=site, + upstream=server, + ) + + def remove_upstream_file(self, server, site=None, site_name=None, code_server=None, skip_reload=False): + _server = jingrow.get_pg("Server", server) + ip = _server.ip if _server.is_self_hosted else _server.private_ip + pagetype = "Site" if site else "Code Server" + file_name = site_name or site if (site or site_name) else code_server + data = {"skip_reload": skip_reload} + return self.create_agent_job( + f"Remove {pagetype} from Upstream", + f"proxy/upstreams/{ip}/sites/{file_name}", + method="DELETE", + site=site, + code_server=code_server, + upstream=server, + data=data, + ) + + def setup_code_server(self, bench, name, password): + data = {"name": name, "password": password} + return self.create_agent_job( + "Setup Code Server", f"benches/{bench}/codeserver", data, code_server=name + ) + + def start_code_server(self, bench, name, password): + data = {"password": password} + return self.create_agent_job( + "Start Code Server", + f"benches/{bench}/codeserver/start", + data, + code_server=name, + ) + + def stop_code_server(self, bench, name): + return self.create_agent_job( + "Stop Code Server", + f"benches/{bench}/codeserver/stop", + code_server=name, + ) + + def archive_code_server(self, bench, name): + return self.create_agent_job( + "Archive Code Server", + f"benches/{bench}/codeserver/archive", + method="POST", + code_server=name, + ) + + def add_ssh_user(self, bench): + private_ip = jingrow.db.get_value("Server", bench.server, "private_ip") + candidate = jingrow.get_pg("Deploy Candidate", bench.candidate) + data = { + "name": bench.name, + "principal": bench.group, + "ssh": {"ip": private_ip, "port": 22000 + bench.port_offset}, + "certificate": candidate.get_certificate(), + } + return self.create_agent_job( + "Add User to Proxy", "ssh/users", data, bench=bench.name, upstream=bench.server + ) + + def remove_ssh_user(self, bench): + return self.create_agent_job( + "Remove User from Proxy", + f"ssh/users/{bench.name}", + method="DELETE", + bench=bench.name, + upstream=bench.server, + ) + + def add_proxysql_user( + self, + site, + database: str, + username: str, + password: str, + max_connections: int, + database_server, + reference_pagetype=None, + reference_name=None, + ): + data = { + "username": username, + "password": password, + "database": database, + "max_connections": max_connections, + "backend": {"ip": database_server.private_ip, "id": database_server.server_id}, + } + return self.create_agent_job( + "Add User to ProxySQL", + "proxysql/users", + data, + site=site.name, + reference_name=reference_name, + reference_pagetype=reference_pagetype, + ) + + def add_proxysql_backend(self, database_server): + data = { + "backend": {"ip": database_server.private_ip, "id": database_server.server_id}, + } + return self.create_agent_job("Add Backend to ProxySQL", "proxysql/backends", data) + + def remove_proxysql_user(self, site, username, reference_pagetype=None, reference_name=None): + return self.create_agent_job( + "Remove User from ProxySQL", + f"proxysql/users/{username}", + method="DELETE", + site=site.name, + reference_pagetype=reference_pagetype, + reference_name=reference_name, + ) + + def create_database_access_credentials(self, site, mode): + database_server = jingrow.db.get_value("Bench", site.bench, "database_server") + data = { + "mode": mode, + "mariadb_root_password": get_decrypted_password( + "Database Server", database_server, "mariadb_root_password" + ), + } + return self.post(f"benches/{site.bench}/sites/{site.name}/credentials", data=data) + + def revoke_database_access_credentials(self, site): + database_server = jingrow.db.get_value("Bench", site.bench, "database_server") + data = { + "user": site.database_access_user, + "mariadb_root_password": get_decrypted_password( + "Database Server", database_server, "mariadb_root_password" + ), + } + return self.post(f"benches/{site.bench}/sites/{site.name}/credentials/revoke", data=data) + + def create_database_user(self, site, username, password, reference_name): + database_server = jingrow.db.get_value("Bench", site.bench, "database_server") + data = { + "username": username, + "password": password, + "mariadb_root_password": get_decrypted_password( + "Database Server", database_server, "mariadb_root_password" + ), + } + return self.create_agent_job( + "Create Database User", + f"benches/{site.bench}/sites/{site.name}/database/users", + data, + site=site.name, + reference_pagetype="Site Database User", + reference_name=reference_name, + ) + + def remove_database_user(self, site, username, reference_name): + database_server = jingrow.db.get_value("Bench", site.bench, "database_server") + data = { + "mariadb_root_password": get_decrypted_password( + "Database Server", database_server, "mariadb_root_password" + ) + } + return self.create_agent_job( + "Remove Database User", + f"benches/{site.bench}/sites/{site.name}/database/users/{username}", + method="DELETE", + data=data, + site=site.name, + reference_pagetype="Site Database User", + reference_name=reference_name, + ) + + def modify_database_user_permissions(self, site, username, mode, permissions: dict, reference_name): + database_server = jingrow.db.get_value("Bench", site.bench, "database_server") + data = { + "mode": mode, + "permissions": permissions, + "mariadb_root_password": get_decrypted_password( + "Database Server", database_server, "mariadb_root_password" + ), + } + return self.create_agent_job( + "Modify Database User Permissions", + f"benches/{site.bench}/sites/{site.name}/database/users/{username}/permissions", + method="POST", + data=data, + site=site.name, + reference_pagetype="Site Database User", + reference_name=reference_name, + ) + + def update_site_status(self, server, site, status, skip_reload=False): + data = {"status": status, "skip_reload": skip_reload} + _server = jingrow.get_pg("Server", server) + ip = _server.ip if _server.is_self_hosted else _server.private_ip + return self.create_agent_job( + "Update Site Status", + f"proxy/upstreams/{ip}/sites/{site}/status", + data=data, + site=site, + upstream=server, + ) + + def reload_nginx(self): + return self.create_agent_job("Reload NGINX Job", "proxy/reload") + + def cleanup_unused_files(self): + return self.create_agent_job("Cleanup Unused Files", "server/cleanup", {}) + + def get(self, path, raises=True): + return self.request("GET", path, raises=raises) + + def post(self, path, data=None, raises=True): + return self.request("POST", path, data, raises=raises) + + def _make_req(self, method, path, data, files, agent_job_id): + password = get_decrypted_password(self.server_type, self.server, "agent_password") + headers = {"Authorization": f"bearer {password}", "X-Agent-Job-Id": agent_job_id} + url = f"https://{self.server}:{self.port}/agent/{path}" + intermediate_ca = jingrow.db.get_value("Jcloud Settings", "Jcloud Settings", "backbone_intermediate_ca") + if jingrow.conf.developer_mode and intermediate_ca: + root_ca = jingrow.db.get_value("Certificate Authority", intermediate_ca, "parent_authority") + verify = jingrow.get_pg("Certificate Authority", root_ca).certificate_file + else: + verify = True + if files: + file_objects = { + key: value + if isinstance(value, _io.BufferedReader) + else jingrow.get_pg("File", {"file_url": url}).get_content() + for key, value in files.items() + } + file_objects["json"] = json.dumps(data).encode() + return requests.request(method, url, headers=headers, files=file_objects, verify=verify) + return requests.request(method, url, headers=headers, json=data, verify=verify, timeout=(10, 30)) + + def request(self, method, path, data=None, files=None, agent_job=None, raises=True): + self.raise_if_past_requests_have_failed() + response = json_response = None + try: + agent_job_id = agent_job.name if agent_job else None + response = self._make_req(method, path, data, files, agent_job_id) + json_response = response.json() + if raises and response.status_code >= 400: + output = "\n\n".join([json_response.get("output", ""), json_response.get("traceback", "")]) + if output == "\n\n": + output = json.dumps(json_response, indent=2, sort_keys=True) + raise HTTPError( + f"{response.status_code} {response.reason}\n\n{output}", + response=response, + ) + return json_response + except (HTTPError, TypeError, ValueError): + self.handle_request_failure(agent_job, response) + log_error( + title="Agent Request Result Exception", + result=json_response or getattr(response, "text", None), + ) + except requests.JSONDecodeError as exc: + if response and response.status_code >= 500: + self.log_request_failure(exc) + self.handle_exception(agent_job, exc) + log_error( + title="Agent Request Exception", + ) + except Exception as exc: + self.log_request_failure(exc) + self.handle_exception(agent_job, exc) + log_error( + title="Agent Request Exception", + ) + + def raise_if_past_requests_have_failed(self): + failures = jingrow.db.get_value("Agent Request Failure", {"server": self.server}, "failure_count") + if failures: + raise AgentRequestSkippedException(f"Previous {failures} requests have failed. Try again later.") + + def log_request_failure(self, exc): + filters = { + "server": self.server, + } + failure = jingrow.db.get_value( + "Agent Request Failure", filters, ["name", "failure_count"], as_dict=True + ) + if failure: + jingrow.db.set_value( + "Agent Request Failure", failure.name, "failure_count", failure.failure_count + 1 + ) + else: + fields = filters + fields.update( + { + "server_type": self.server_type, + "traceback": jingrow.get_traceback(with_context=True), + "error": repr(exc), + "failure_count": 1, + } + ) + jingrow.new_pg("Agent Request Failure", **fields).insert(ignore_permissions=True) + + def raw_request(self, method, path, data=None, raises=True, timeout=None): + url = f"https://{self.server}:{self.port}/agent/{path}" + password = get_decrypted_password(self.server_type, self.server, "agent_password") + headers = {"Authorization": f"bearer {password}"} + timeout = timeout or (10, 30) + response = requests.request(method, url, headers=headers, json=data, timeout=timeout) + json_response = response.json() + if raises: + response.raise_for_status() + return json_response + + def should_skip_requests(self): + return bool(jingrow.db.count("Agent Request Failure", {"server": self.server})) + + def handle_request_failure(self, agent_job, result: Response | None): + if not agent_job: + raise + + reason = status_code = None + with suppress(TypeError, ValueError): + reason = json.dumps(result.json(), indent=4, sort_keys=True) if result else None + + message = f""" +Status Code: {status_code or "Unknown"}\n +Response: {reason or getattr(result, "text", "Unknown")} +""" + self.log_failure_reason(agent_job, message) + agent_job.flags.status_code = status_code + + def handle_exception(self, agent_job, exception): + self.log_failure_reason(agent_job, exception) + + def log_failure_reason(self, agent_job=None, message=None): + if not agent_job: + raise + + agent_job.traceback = message + agent_job.output = message + + def create_agent_job( + self, + job_type, + path, + data=None, + files=None, + method="POST", + bench=None, + site=None, + code_server=None, + upstream=None, + host=None, + reference_pagetype=None, + reference_name=None, + ): + """ + Check if job already exists in Undelivered, Pending, Running state + don't add new job until its gets completed + """ + + disable_agent_job_deduplication = jingrow.db.get_single_value( + "Jcloud Settings", "disable_agent_job_deduplication", cache=True + ) + + if not disable_agent_job_deduplication: + job = self.get_similar_in_execution_job( + job_type, path, bench, site, code_server, upstream, host, method + ) + + if job: + return job + + job: "AgentJob" = jingrow.get_pg( + { + "pagetype": "Agent Job", + "server_type": self.server_type, + "server": self.server, + "bench": bench, + "host": host, + "site": site, + "code_server": code_server, + "upstream": upstream, + "status": "Undelivered", + "request_method": method, + "request_path": path, + "request_data": json.dumps(data or {}, indent=4, sort_keys=True), + "request_files": json.dumps(files or {}, indent=4, sort_keys=True), + "job_type": job_type, + "reference_pagetype": reference_pagetype, + "reference_name": reference_name, + } + ).insert() + return job + + def get_similar_in_execution_job( + self, + job_type, + path, + bench=None, + site=None, + code_server=None, + upstream=None, + host=None, + method="POST", + ): + """Deduplicate jobs in execution state""" + + filters = { + "server_type": self.server_type, + "server": self.server, + "job_type": job_type, + "status": ("not in", ("Success", "Failure", "Delivery Failure")), + "request_method": method, + "request_path": path, + } + + if bench: + filters["bench"] = bench + + if site: + filters["site"] = site + + if code_server: + filters["code_server"] = code_server + + if upstream: + filters["upstream"] = upstream + + if host: + filters["host"] = host + + job = jingrow.db.get_value("Agent Job", filters, "name") + + return jingrow.get_pg("Agent Job", job) if job else False + + def update_monitor_rules(self, rules, routes): + data = {"rules": rules, "routes": routes} + return self.post("monitor/rules", data=data) + + def get_job_status(self, id): + return self.get(f"jobs/{id}") + + def cancel_job(self, id): + return self.post(f"jobs/{id}/cancel") + + def get_site_sid(self, site, user=None): + if user: + data = {"user": user} + result = self.post(f"benches/{site.bench}/sites/{site.name}/sid", data=data) + else: + result = self.get(f"benches/{site.bench}/sites/{site.name}/sid") + return result and result.get("sid") + + def get_site_info(self, site): + result = self.get(f"benches/{site.bench}/sites/{site.name}/info") + if result: + return result["data"] + return None + + def get_sites_info(self, bench, since): + return self.post(f"benches/{bench.name}/info", data={"since": since}) + + def get_site_analytics(self, site): + result = self.get(f"benches/{site.bench}/sites/{site.name}/analytics") + if result: + return result["data"] + return None + + def get_sites_analytics(self, bench): + return self.get(f"benches/{bench.name}/analytics") + + def describe_database_table(self, site, pagetype, columns): + data = {"pagetype": pagetype, "columns": list(columns)} + return self.post( + f"benches/{site.bench}/sites/{site.name}/describe-database-table", + data=data, + )["data"] + + def add_database_index(self, site, pagetype, columns): + data = {"pagetype": pagetype, "columns": list(columns)} + return self.create_agent_job( + "Add Database Index", + f"benches/{site.bench}/sites/{site.name}/add-database-index", + data, + site=site.name, + ) + + def get_jobs_status(self, ids): + status = self.get(f"jobs/{','.join(map(str, ids))}") + if len(ids) == 1: + return [status] + return status + + def get_jobs_id(self, agent_job_ids): + return self.get(f"agent-jobs/{agent_job_ids}") + + def get_version(self): + return self.get("version") + + def update(self): + url = jingrow.get_pg(self.server_type, self.server).get_agent_repository_url() + branch = jingrow.get_pg(self.server_type, self.server).get_agent_repository_branch() + return self.post("update", data={"url": url, "branch": branch}) + + def ping(self): + return self.get("ping")["message"] + + def fetch_monitor_data(self, bench): + return self.post(f"benches/{bench}/monitor")["data"] + + def fetch_site_status(self, site): + return self.get(f"benches/{site.bench}/sites/{site.name}/status")["data"] + + def fetch_bench_status(self, bench): + return self.get(f"benches/{bench}/status") + + def run_after_migrate_steps(self, site): + data = { + "admin_password": site.get_password("admin_password"), + } + return self.create_agent_job( + "Run After Migrate Steps", + f"benches/{site.bench}/sites/{site.name}/run_after_migrate_steps", + bench=site.bench, + site=site.name, + data=data, + ) + + def move_site_to_bench( + self, + site, + target, + deactivate=True, + skip_failing_patches=False, + ): + """ + Move site to bench without backup + """ + activate = site.status not in ("Inactive", "Suspended") + data = { + "target": target, + "deactivate": deactivate, + "activate": activate, + "skip_failing_patches": skip_failing_patches, + } + return self.create_agent_job( + "Move Site to Bench", + f"benches/{site.bench}/sites/{site.name}/move_to_bench", + data, + bench=site.bench, + site=site.name, + ) + + def force_update_bench_limits(self, bench: str, data: dict): + return self.create_agent_job( + "Force Update Bench Limits", f"benches/{bench}/limits", bench=bench, data=data + ) + + def patch_app(self, app_patch: "AppPatch", data: "AgentPatchConfig"): + bench = app_patch.bench + app = app_patch.app + return self.create_agent_job( + "Patch App", + f"benches/{bench}/patch/{app}", + bench=bench, + data=data, + reference_pagetype="App Patch", + reference_name=app_patch.name, + ) + + def upload_build_context_for_docker_build( + self, + file: "BufferedReader", + dc_name: str, + ) -> str | None: + if res := self.request("POST", f"builder/upload/{dc_name}", files={"build_context_file": file}): + return res.get("filename") + + return None + + def run_build(self, data: dict): + reference_name = data.get("deploy_candidate") + return self.create_agent_job( + "Run Remote Builder", + "builder/build", + data=data, + reference_pagetype="Deploy Candidate", + reference_name=reference_name, + ) + + def call_supervisorctl(self, bench: str, action: str, programs: list[str]): + return self.create_agent_job( + "Call Bench Supervisorctl", + f"/benches/{bench}/supervisorctl", + data={"command": action, "programs": programs}, + ) + + def run_command_in_docker_cache( + self, + command: str = "ls -A", + cache_target: str = "/home/jingrow/.cache", + remove_image: bool = True, + ): + data = dict( + command=command, + cache_target=cache_target, + remove_image=remove_image, + ) + return self.request( + "POST", + "docker_cache_utils/run_command_in_docker_cache", + data=data, + ) + + def get_cached_apps(self): + return self.request( + "POST", + "docker_cache_utils/get_cached_apps", + data={}, + ) + + def get_site_apps(self, site): + raw_apps_list = self.get( + f"benches/{site.bench}/sites/{site.name}/apps", + ) + apps: list[str] = [line.split()[0] for line in raw_apps_list["data"].splitlines() if line] + return apps + + def fetch_database_table_schema( + self, site, include_table_size: bool = False, include_index_info: bool = False + ): + return self.create_agent_job( + "Fetch Database Table Schema", + f"benches/{site.bench}/sites/{site.name}/database/schema", + bench=site.bench, + site=site.name, + data={ + "include_table_size": include_table_size, + "include_index_info": include_index_info, + }, + reference_pagetype="Site", + reference_name=site.name, + ) + + def run_sql_query_in_database(self, site, query, commit): + return self.post( + f"benches/{site.bench}/sites/{site.name}/database/query/execute", + data={"query": query, "commit": commit, "as_dict": False}, + ) + + def get_summarized_performance_report_of_database(self, site): + return self.post( + f"benches/{site.bench}/sites/{site.name}/database/performance-report", + data={"mariadb_root_password": get_mariadb_root_password(site)}, + ) + + def analyze_slow_queries(self, site, normalized_queries: list[dict]): + """ + normalized_queries format: + [ + { + "example": "", + "normalized" : "", + } + ] + """ + return self.create_agent_job( + "Analyze Slow Queries", + f"benches/{site.bench}/sites/{site.name}/database/analyze-slow-queries", + data={ + "queries": normalized_queries, + "mariadb_root_password": get_mariadb_root_password(site), + }, + site=site.name, + ) + + def fetch_database_processes(self, site): + return self.post( + f"benches/{site.bench}/sites/{site.name}/database/processes", + data={ + "mariadb_root_password": get_mariadb_root_password(site), + }, + ) + + def kill_database_process(self, site, id): + return self.post( + f"benches/{site.bench}/sites/{site.name}/database/kill-process/{id}", + data={ + "mariadb_root_password": get_mariadb_root_password(site), + }, + ) + + +class AgentCallbackException(Exception): + pass + + +class AgentRequestSkippedException(Exception): + pass diff --git a/jcloud/api/__init__.py b/jcloud/api/__init__.py new file mode 100644 index 0000000..aed2ff2 --- /dev/null +++ b/jcloud/api/__init__.py @@ -0,0 +1,39 @@ +import jingrow + +from jcloud.api.client import dashboard_whitelist +from jcloud.utils import get_full_chain_cert_of_domain, get_minified_script, get_minified_script_2, log_error + + +@jingrow.whitelist(allow_guest=True) +def script(): + return get_minified_script() + + +@jingrow.whitelist(allow_guest=True) +def script_2(): + return get_minified_script_2() + + +@jingrow.whitelist(allow_guest=True) +def handle_suspended_site_redirection(): + from jcloud.saas.pagetype.product_trial_request.product_trial_request import ( + get_app_trial_page_url, + ) + + jingrow.local.response["type"] = "redirect" + jingrow.local.response["location"] = get_app_trial_page_url() or "/dashboard" + + +@dashboard_whitelist() +def download_ssl_cert(domain: str): + if ( + not (domain.endswith("jingrow.cloud") or domain.endswith("jingrow.com")) + and not jingrow.conf.developer_mode + ): + jingrow.throw("Invalid domain provided") + + try: + return get_full_chain_cert_of_domain(domain) + except Exception as e: + log_error("Error downloading SSL certificate", data=e) + jingrow.throw("Failed to download SSL certificate. Please try again later.") diff --git a/jcloud/api/account.py b/jcloud/api/account.py new file mode 100644 index 0000000..a96b139 --- /dev/null +++ b/jcloud/api/account.py @@ -0,0 +1,1540 @@ +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import json +import re +from typing import TYPE_CHECKING + +import jingrow +import pyotp +from jingrow import _ +from jingrow.core.pagetype.user.user import update_password +from jingrow.core.utils import find +from jingrow.exceptions import DoesNotExistError +from jingrow.query_builder.custom import GROUP_CONCAT +from jingrow.rate_limiter import rate_limit +from jingrow.utils import cint, get_url, random_string +from jingrow.utils.data import sha256_hash +from jingrow.utils.oauth import get_oauth2_authorize_url, get_oauth_keys +from jingrow.utils.password import get_decrypted_password +from jingrow.website.utils import build_response +from pypika.terms import ValueWrapper + +from jcloud.api.site import protected +from jcloud.jcloud.pagetype.team.team import ( + Team, + get_child_team_members, + get_team_members, +) +from jcloud.utils import get_country_info, get_current_team, is_user_part_of_team +from jcloud.utils.telemetry import capture + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.account_request.account_request import AccountRequest + + +@jingrow.whitelist(allow_guest=True) +def signup(email, product=None, referrer=None): + jingrow.utils.validate_email_address(email, True) + + current_user = jingrow.session.user + jingrow.set_user("Administrator") + + email = email.strip().lower() + exists, enabled = jingrow.db.get_value("Team", {"user": email}, ["name", "enabled"]) or [0, 0] + + account_request = None + if exists and not enabled: + jingrow.throw(_("Account {0} has been deactivated").format(email)) + elif exists and enabled: + jingrow.throw(_("Account {0} is already registered").format(email)) + else: + account_request = jingrow.get_pg( + { + "pagetype": "Account Request", + "email": email, + "role": "Jcloud Admin", + "referrer_id": referrer, + "send_email": True, + "product_trial": product, + } + ).insert() + + jingrow.set_user(current_user) + if account_request: + return account_request.name + return None + + return None + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=50, seconds=60 * 60) +def signup_with_username(username, password, email=None, phone_number=None, referrer=None, product=None): + """ + 使用用户名注册新账户,邮箱和手机号为可选 + """ + from jingrow.utils import validate_email_address + from jingrow.utils.password import update_password + + current_user = jingrow.session.user + jingrow.set_user("Administrator") # 确保有权限创建用户 + + try: + # 验证用户名 + if not username or len(username) < 3: + jingrow.throw("用户名至少需要3个字符") + + # 检查用户名是否已存在 + if jingrow.db.exists("User", {"username": username}): + jingrow.throw("该用户名已被使用") + + # 如果提供了邮箱,验证邮箱格式并检查是否已存在 + user_email = None + if email: + try: + validate_email_address(email, True) + except: + jingrow.throw("请输入有效的邮箱地址") + + if jingrow.db.exists("User", {"email": email}): + jingrow.throw("该邮箱已被注册") + user_email = email + + # 如果提供了手机号,验证手机号格式并检查是否已存在 + if phone_number: + if not re.match(r'^1[3-9]\d{9}$', phone_number): + jingrow.throw("请输入有效的手机号码") + if jingrow.db.exists("User", {"mobile_no": phone_number}): + jingrow.throw("该手机号已被注册") + + # 创建用户,但先不设置密码 + user_pg = { + "pagetype": "User", + "first_name": username, + "username": username, + "send_welcome_email": 0, + "user_type": "Website User", + "language": "zh" # 设置默认语言为中文 + } + + # 只有提供了邮箱时才添加到用户数据 + if user_email: + user_pg["email"] = user_email + + # 如果提供了手机号,添加到用户数据 + if phone_number: + user_pg["mobile_no"] = phone_number + + user = jingrow.get_pg(user_pg) + + # 添加角色 - 设置为 Jcloud Admin + user.append("roles", { + "role": "Jcloud Admin" + }) + + user.insert(ignore_permissions=True) + + # 在创建用户后显式更新密码,这样可以确保密码被正确加密和保存 + update_password(username, password) + + # 创建团队 + team_pg = { + "pagetype": "Team", + "enabled": 1, + "team_name": username, + "country": "China", # 默认设置为中国 + "payment_mode": "Prepaid Credits", # 设置默认支付方式为余额支付 + "user": username # 使用用户名作为团队用户标识 + } + + # 只有当提供有效邮箱时,才设置账单邮箱和通知邮箱 + if user_email: + team_pg["billing_email"] = user_email + team_pg["notify_email"] = user_email + + team = jingrow.get_pg(team_pg) + + # 添加团队成员 - 使用用户名 + team.append("team_members", { + "user": username + }) + + # 如果有邮箱,添加通讯邮箱 + if user_email: + team.append("communication_emails", { + "type": "invoices", + "value": user_email + }) + team.append("communication_emails", { + "type": "marketplace_notifications", + "value": user_email + }) + + # 插入团队 + team.insert(ignore_permissions=True) + + # 登录用户 + jingrow.local.login_manager.login_as(username) + + jingrow.db.commit() + + # 返回与标准流程一致的响应 + return { + "dashboard_route": "" + } + + finally: + # 恢复原始用户 + jingrow.set_user(current_user) + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60) +def send_otp(email: str): + """ + 向用户发送登录验证码 + """ + if not jingrow.db.exists("User", email): + jingrow.throw("该邮箱尚未注册账户") + + last_otp = jingrow.cache().get_value(f"login_otp_generated_at:{email}") + if last_otp and (jingrow.utils.now_datetime() - last_otp).seconds < 30: + jingrow.throw("请在30秒后再请求新的验证码") + + # 生成6位数字OTP + import random + otp = ''.join(random.choices('0123456789', k=6)) + expires_in_seconds = 300 # 5分钟 + + jingrow.cache().set_value( + f"login_otp:{email}", + otp, + expires_in_sec=expires_in_seconds + ) + jingrow.cache().set_value( + f"login_otp_generated_at:{email}", + jingrow.utils.now_datetime(), + expires_in_sec=expires_in_seconds + ) + + if jingrow.conf.developer_mode: + print(f"\n登录OTP给 {email}: {otp}\n") + else: + # 获取用户全名 + full_name = jingrow.db.get_value("User", email, "full_name") or email.split("@")[0] + + jingrow.sendmail( + subject="Jingrow 登录验证码", + recipients=email, + template="verification_code_for_login", + args={ + "otp": otp, + "minutes": expires_in_seconds // 60, + "full_name": full_name + }, + now=True, + ) + + return True + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60 * 60) +def verify_otp_and_login(email: str, otp: str): + """ + 验证OTP并登录用户 + """ + if not jingrow.db.exists("User", email): + jingrow.throw("该邮箱尚未注册账户") + + stored_otp = jingrow.cache().get_value(f"login_otp:{email}") + if not stored_otp: + jingrow.throw("验证码已过期") + + if stored_otp != otp: + jingrow.throw("验证码无效") + + # 清除缓存 + jingrow.cache().delete_value(f"login_otp:{email}") + jingrow.cache().delete_value(f"login_otp_generated_at:{email}") + + # 登录用户 + jingrow.local.login_manager.login_as(email) + + # 登陆重定向 + return { + "dashboard_route": "" + } + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60 * 60) +def verify_otp(account_request: str, otp: str): + account_request: "AccountRequest" = jingrow.get_pg("Account Request", account_request) + # ensure no team has been created with this email + if jingrow.db.exists("Team", {"user": account_request.email}) and not account_request.product_trial: + jingrow.throw("Invalid OTP. Please try again.") + if account_request.otp != otp: + jingrow.throw("Invalid OTP. Please try again.") + account_request.reset_otp() + return account_request.request_key + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60) +def resend_otp(account_request: str): + account_request: "AccountRequest" = jingrow.get_pg("Account Request", account_request) + + # if last OTP was sent less than 30 seconds ago, throw an error + if ( + account_request.otp_generated_at + and (jingrow.utils.now_datetime() - account_request.otp_generated_at).seconds < 30 + ): + jingrow.throw("Please wait for 30 seconds before requesting a new OTP") + + # ensure no team has been created with this email + if jingrow.db.exists("Team", {"user": account_request.email}) and not account_request.product_trial: + jingrow.throw("Invalid Email") + account_request.reset_otp() + account_request.send_verification_email() + + +@jingrow.whitelist(allow_guest=True) +def setup_account( # noqa: C901 + key, + first_name=None, + last_name=None, + password=None, + is_invitation=False, + country=None, + user_exists=False, + accepted_user_terms=False, + invited_by_parent_team=False, + oauth_signup=False, + oauth_domain=False, +): + account_request = get_account_request_from_key(key) + if not account_request: + jingrow.throw("Invalid or Expired Key") + + if not user_exists: + if not first_name: + jingrow.throw("名字是必填项") + + if not password and not (oauth_signup or oauth_domain): + jingrow.throw("密码是必填项") + + if not is_invitation and not country: + jingrow.throw("国家是必填项") + + if not is_invitation and country: + all_countries = jingrow.db.get_all("Country", pluck="name") + country = find(all_countries, lambda x: x.lower() == country.lower()) + if not country: + jingrow.throw("Please provide a valid country name") + + if not accepted_user_terms: + jingrow.throw("Please accept our Terms of Service & Privacy Policy to continue") + + # if the request is authenticated, set the user to Administrator + jingrow.set_user("Administrator") + + team = account_request.team + email = account_request.email + role = account_request.role + jcloud_roles = account_request.jcloud_roles + + if is_invitation: + # if this is a request from an invitation + # then Team already exists and will be added to that team + pg = jingrow.get_pg("Team", team) + pg.create_user_for_member(first_name, last_name, email, password, role, jcloud_roles) + else: + # Team doesn't exist, create it + Team.create_new( + account_request=account_request, + first_name=first_name, + last_name=last_name, + password=password, + country=country, + user_exists=bool(user_exists), + ) + if invited_by_parent_team: + pg = jingrow.get_pg("Team", account_request.invited_by) + pg.append("child_team_members", {"child_team": team}) + pg.save() + + # Telemetry: Created account + capture("completed_signup", "fc_signup", account_request.email) + jingrow.local.login_manager.login_as(email) + + return account_request.name + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60 * 60) +def send_login_link(email): + if not jingrow.db.exists("User", email): + jingrow.throw("No registered account with this email address") + + key = jingrow.generate_hash("Login Link", 20) + minutes = 10 + jingrow.cache().set_value(f"one_time_login_key:{key}", email, expires_in_sec=minutes * 60) + + link = get_url(f"/api/method/jcloud.api.account.login_using_key?key={key}") + + if jingrow.conf.developer_mode: + print() + print(f"One time login link for {email}") + print(link) + print() + + jingrow.sendmail( + subject="Login to Jingrow", + recipients=email, + template="one_time_login_link", + args={"link": link, "minutes": minutes}, + now=True, + ) + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60 * 60) +def login_using_key(key): + cache_key = f"one_time_login_key:{key}" + email = jingrow.cache().get_value(cache_key) + + if email: + jingrow.cache().delete_value(cache_key) + jingrow.local.login_manager.login_as(email) + jingrow.response.type = "redirect" + jingrow.response.location = "/dashboard" + else: + jingrow.respond_as_web_page( + _("Not Permitted"), + _("The link using which you are trying to login is invalid or expired."), + http_status_code=403, + indicator_color="red", + ) + + +@jingrow.whitelist() +def active_servers(): + team = get_current_team() + return jingrow.get_all("Server", {"team": team, "status": "Active"}, ["title", "name"]) + + +@jingrow.whitelist() +def disable_account(totp_code: str | None): + user = jingrow.session.user + team = get_current_team(get_pg=True) + + if is_2fa_enabled(user): + if not totp_code: + jingrow.throw("2FA Code is required") + if not verify_2fa(user, totp_code): + jingrow.throw("Invalid 2FA Code") + + if user != team.user: + jingrow.throw("Only team owner can disable the account") + + team.disable_account() + + +@jingrow.whitelist() +def has_active_servers(team): + return jingrow.db.exists("Server", {"status": "Active", "team": team}) + + +@jingrow.whitelist() +def enable_account(): + team = get_current_team(get_pg=True) + if jingrow.session.user != team.user: + jingrow.throw("Only team owner can enable the account") + team.enable_account() + + +@jingrow.whitelist() +def request_team_deletion(): + team = get_current_team(get_pg=True) + pg = jingrow.get_pg({"pagetype": "Team Deletion Request", "team": team.name}).insert() + return pg.name + + +@jingrow.whitelist(allow_guest=True) +def delete_team(team): + from jingrow.utils.verified_command import verify_request + + responses = { + "invalid": [ + ("Link Invalid", "This link is invalid or expired."), + {"indicator_color": "red"}, + ], + "confirmed": [ + ( + "Confirmed", + f"The process for deletion of your team {team} has been initiated. Sorry to see you go :(", + ), + {"indicator_color": "green"}, + ], + "expired": [ + ("Link Expired", "This link has already been activated for verification."), + {"indicator_color": "red"}, + ], + } + + def respond_as_web_page(key): + jingrow.respond_as_web_page(*responses[key][0], **responses[key][1]) + + if verify_request() or jingrow.flags.in_test: + jingrow.set_user("Administrator") + else: + return respond_as_web_page("invalid") + + try: + pg = jingrow.get_last_pg("Team Deletion Request", {"team": team}) + except jingrow.DoesNotExistError: + return respond_as_web_page("invalid") + + if pg.status != "Pending Verification": + return respond_as_web_page("expired") + + pg.status = "Deletion Verified" + pg.save() + jingrow.db.commit() + + return respond_as_web_page("confirmed") + + +@jingrow.whitelist(allow_guest=True) +def validate_request_key(key, timezone=None): + from jcloud.utils.country_timezone import get_country_from_timezone + + account_request = get_account_request_from_key(key) + if account_request: + data = get_country_info() + possible_country = data.get("country") or get_country_from_timezone(timezone) + if not (account_request.is_saas_signup() or account_request.invited_by_parent_team): + capture("clicked_verify_link", "fc_signup", account_request.email) + return { + "email": account_request.email, + "first_name": account_request.first_name, + "last_name": account_request.last_name, + "country": possible_country, + "countries": jingrow.db.get_all("Country", pluck="name"), + "user_exists": jingrow.db.exists("User", account_request.email), + "team": account_request.team, + "is_invitation": jingrow.db.get_value("Team", account_request.team, "enabled"), + "invited_by": account_request.invited_by, + "invited_by_parent_team": account_request.invited_by_parent_team, + "oauth_signup": account_request.oauth_signup, + "oauth_domain": jingrow.db.exists( + "OAuth Domain Mapping", {"email_domain": account_request.email.split("@")[1]} + ), + "product_trial": jingrow.db.get_value( + "Product Trial", account_request.product_trial, ["logo", "title", "name"], as_dict=1 + ), + } + return None + + return None + + +@jingrow.whitelist(allow_guest=True) +def country_list(): + def get_country_list(): + return jingrow.db.get_all("Country", fields=["name", "code"]) + + return jingrow.cache().get_value("country_list", generator=get_country_list) + + +def clear_country_list_cache(): + jingrow.cache().delete_value("country_list") + + +@jingrow.whitelist() +def set_country(country): + team_pg = get_current_team(get_pg=True) + team_pg.country = country + team_pg.save() + team_pg.create_stripe_customer() + + +def get_account_request_from_key(key): + """Find Account Request using `key` in the past 12 hours or if site is active""" + + if not key or not isinstance(key, str): + jingrow.throw(_("Invalid Key")) + + hours = 12 + ar = jingrow.get_pg("Account Request", {"request_key": key}) + if ar.creation > jingrow.utils.add_to_date(None, hours=-hours): + return ar + if ar.subdomain and ar.saas_app: + domain = jingrow.db.get_value("Saas Settings", ar.saas_app, "domain") + if jingrow.db.get_value("Site", ar.subdomain + "." + domain, "status") == "Active": + return ar + return None + + return None + + +@jingrow.whitelist() +def get(): + cached = jingrow.cache.get_value("cached-account.get", user=jingrow.session.user) + if cached: + return cached + value = _get() + jingrow.cache.set_value("cached-account.get", value, user=jingrow.session.user, expires_in_sec=60) + return value + + +def _get(): + user = jingrow.session.user + if not jingrow.db.exists("User", user): + jingrow.throw(_("Account does not exist")) + + team_pg = get_current_team(get_pg=True) + + parent_teams = [d.parent for d in jingrow.db.get_all("Team Member", {"user": user}, ["parent"])] + + teams = [] + if parent_teams: + Team = jingrow.qb.PageType("Team") + teams = ( + jingrow.qb.from_(Team) + .select(Team.name, Team.team_title, Team.user) + .where((Team.enabled == 1) & (Team.name.isin(parent_teams))) + .run(as_dict=True) + ) + + partner_billing_name = "" + if team_pg.partner_email: + partner_billing_name = jingrow.db.get_value( + "Team", + {"jerp_partner": 1, "partner_email": team_pg.partner_email}, + "billing_name", + ) + number_of_sites = jingrow.db.count("Site", {"team": team_pg.name, "status": ("!=", "Archived")}) + + return { + "user": jingrow.get_pg("User", user), + "ssh_key": get_ssh_key(user), + "team": team_pg, + "team_members": get_team_members(team_pg.name), + "child_team_members": get_child_team_members(team_pg.name), + "teams": list(teams if teams else parent_teams), + "onboarding": team_pg.get_onboarding(), + "balance": team_pg.get_balance(), + "parent_team": team_pg.parent_team or "", + "saas_site_request": team_pg.get_pending_saas_site_request(), + "feature_flags": { + "verify_cards_with_micro_charge": jingrow.db.get_single_value( + "Jcloud Settings", "verify_cards_with_micro_charge" + ) + }, + "partner_email": team_pg.partner_email or "", + "partner_billing_name": partner_billing_name, + "number_of_sites": number_of_sites, + "permissions": get_permissions(), + "billing_info": team_pg.billing_info(), + } + + +@jingrow.whitelist() +def current_team(): + user = jingrow.session.user + if not jingrow.db.exists("User", user): + jingrow.throw(_("Account does not exist")) + + from jcloud.api.client import get + + return get("Team", jingrow.local.team().name) + + +def get_permissions(): + user = jingrow.session.user + groups = tuple( + [*jingrow.get_all("Jcloud Permission Group User", {"user": user}, pluck="parent"), "1", "2"] + ) # [1, 2] is for avoiding singleton tuples + docperms = jingrow.db.sql( + f""" + SELECT `document_name`, GROUP_CONCAT(`action`) as `actions` + FROM `tabJcloud User Permission` + WHERE user='{user}' or `group` in {groups} + GROUP BY `document_name` + """, + as_dict=True, + ) + return {perm.document_name: perm.actions.split(",") for perm in docperms if perm.actions} + + +@jingrow.whitelist() +def has_method_permission(pagetype, docname, method) -> bool: + from jcloud.jcloud.pagetype.jcloud_permission_group.jcloud_permission_group import ( + has_method_permission, + ) + + return has_method_permission(pagetype, docname, method) + + +@jingrow.whitelist(allow_guest=True) +def signup_settings(product=None, fetch_countries=False, timezone=None): + from jcloud.utils.country_timezone import get_country_from_timezone + + settings = jingrow.get_single("Jcloud Settings") + + product = jingrow.utils.cstr(product) + product_trial = None + if product: + product_trial = jingrow.db.get_value( + "Product Trial", + {"name": product, "published": 1}, + ["title", "logo"], + as_dict=1, + ) + + data = { + "enable_google_oauth": settings.enable_google_oauth, + "product_trial": product_trial, + "oauth_domains": jingrow.get_all( + "OAuth Domain Mapping", ["email_domain", "social_login_key", "provider_name"] + ), + } + + if fetch_countries: + data["countries"] = jingrow.db.get_all("Country", pluck="name") + data["country"] = get_country_info().get("country") or get_country_from_timezone(timezone) + + return data + + +@jingrow.whitelist(allow_guest=True) +def guest_feature_flags(): + return { + "enable_google_oauth": jingrow.db.get_single_value("Jcloud Settings", "enable_google_oauth"), + } + + +@jingrow.whitelist() +def create_child_team(title): + team = title.strip() + + current_team = get_current_team(True) + if title in [ + d.team_title for d in jingrow.get_all("Team", {"parent_team": current_team.name}, ["team_title"]) + ]: + jingrow.throw(f"Child Team {title} already exists.") + elif title == "Parent Team": + jingrow.throw("Child team name cannot be same as parent team") + + pg = jingrow.get_pg( + { + "pagetype": "Team", + "team_title": team, + "user": current_team.user, + "parent_team": current_team.name, + "enabled": 1, + } + ) + pg.insert(ignore_permissions=True, ignore_links=True) + pg.append("team_members", {"user": current_team.user}) + pg.save() + + current_team.append("child_team_members", {"child_team": pg.name}) + current_team.save() + + return "created" + + +def new_team(email, current_team): + jingrow.utils.validate_email_address(email, True) + + jingrow.get_pg( + { + "pagetype": "Account Request", + "email": email, + "role": "Jcloud Member", + "send_email": True, + "team": email, + "invited_by": current_team, + "invited_by_parent_team": 1, + } + ).insert() + + return "new_team" + + +def get_ssh_key(user): + ssh_keys = jingrow.get_all( + "User SSH Key", {"user": user, "is_default": True}, order_by="creation desc", limit=1 + ) + if ssh_keys: + return jingrow.get_pg("User SSH Key", ssh_keys[0]) + + return None + + +@jingrow.whitelist() +def update_profile(first_name=None, last_name=None, email=None, username=None, mobile_no=None): + if email: + # 更新用户所有相关邮箱 + update_profile_email(email) + + # 验证用户名(如果提供) + if username: + if len(username) < 3: + jingrow.throw("用户名至少需要3个字符") + + # 检查用户名是否已被其他用户使用 + user = jingrow.session.user + username_exists = jingrow.db.exists( + "User", + {"username": username, "name": ("!=", user)} + ) + if username_exists: + jingrow.throw("该用户名已被使用") + + # 验证手机号(如果提供) + if mobile_no: + import re + if not re.match(r'^1[3-9]\d{9}$', mobile_no): + jingrow.throw("请输入有效的手机号码") + + # 检查手机号是否已被其他用户使用 + phone_exists = jingrow.db.exists( + "User", + {"mobile_no": mobile_no, "name": ("!=", user)} + ) + if phone_exists: + jingrow.throw("该手机号已被注册") + + user = jingrow.session.user + pg = jingrow.get_pg("User", user) + pg.first_name = first_name + pg.last_name = last_name + + if username: + pg.username = username + + if mobile_no: + pg.mobile_no = mobile_no + + pg.save(ignore_permissions=True) + return pg + + +@jingrow.whitelist() +def update_profile_picture(): + user = jingrow.session.user + _file = jingrow.get_pg( + { + "pagetype": "File", + "attached_to_pagetype": "User", + "attached_to_name": user, + "attached_to_field": "user_image", + "folder": "Home/Attachments", + "file_name": jingrow.local.uploaded_filename, + "is_private": 0, + "content": jingrow.local.uploaded_file, + } + ) + _file.save(ignore_permissions=True) + jingrow.db.set_value("User", user, "user_image", _file.file_url) + + +@jingrow.whitelist() +def update_feature_flags(values=None): + jingrow.only_for("Jcloud Admin") + team = get_current_team(get_pg=True) + values = jingrow.parse_json(values) + fields = [ + "benches_enabled", + "servers_enabled", + "self_hosted_servers_enabled", + "security_portal_enabled", + ] + for field in fields: + if field in values: + team.set(field, values[field]) + team.save() + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60 * 60) +def send_reset_password_email(email: str): + valid_email = jingrow.utils.validate_email_address(email) + if not valid_email: + jingrow.throw( + f"{email} is not a valid email address", + jingrow.InvalidEmailAddressError, + ) + + valid_email = valid_email.strip() + key = jingrow.generate_hash() + hashed_key = sha256_hash(key) + if jingrow.db.exists("User", valid_email): + jingrow.db.set_value( + "User", + valid_email, + { + "reset_password_key": hashed_key, + "last_reset_password_key_generated_on": jingrow.utils.now_datetime(), + }, + ) + url = get_url("/dashboard/reset-password/" + key) + if jingrow.conf.developer_mode: + print(f"\nReset password URL for {valid_email}:") + print(url) + print() + return + jingrow.sendmail( + recipients=valid_email, + subject="Reset Password", + template="reset_password", + args={"link": url}, + now=True, + ) + else: + jingrow.throw(f"User {valid_email} does not exist") + + +@jingrow.whitelist(allow_guest=True) +def reset_password(key, password): + return update_password(new_password=password, key=key) + + +@jingrow.whitelist(allow_guest=True) +def get_user_for_reset_password_key(key): + if not key or not isinstance(key, str): + jingrow.throw(_("Invalid Key")) + + hashed_key = sha256_hash(key) + return jingrow.db.get_value("User", {"reset_password_key": hashed_key}, "name") + + +@jingrow.whitelist() +def remove_team_member(user_email): + team = get_current_team(True) + team.remove_team_member(user_email) + + +@jingrow.whitelist() +def remove_child_team(child_team): + team = jingrow.get_pg("Team", child_team) + sites = jingrow.get_all("Site", {"status": ("!=", "Archived"), "team": team.name}, pluck="name") + if sites: + jingrow.throw("Child team has Active Sites") + + team.enabled = 0 + team.parent_team = "" + team.save(ignore_permissions=True) + + +@jingrow.whitelist() +def can_switch_to_team(team): + if not jingrow.db.exists("Team", team): + return False + if jingrow.local.system_user(): + return True + if is_user_part_of_team(jingrow.session.user, team): + return True + return False + + +@jingrow.whitelist() +def switch_team(team): + user_is_part_of_team = jingrow.db.exists("Team Member", {"parent": team, "user": jingrow.session.user}) + user_is_system_user = jingrow.session.data.user_type == "System User" + if user_is_part_of_team or user_is_system_user: + jingrow.db.set_value("Team", {"user": jingrow.session.user}, "last_used_team", team) + jingrow.cache.delete_value("cached-account.get", user=jingrow.session.user) + return { + "team": jingrow.get_pg("Team", team), + "team_members": get_team_members(team), + } + return None + + +@jingrow.whitelist() +def leave_team(team): + team_to_leave = jingrow.get_pg("Team", team) + cur_team = jingrow.session.user + + if team_to_leave.user == cur_team: + jingrow.throw("Cannot leave this team as you are the owner.") + + team_to_leave.remove_team_member(cur_team) + + +@jingrow.whitelist() +def get_billing_information(timezone=None): + from jcloud.utils.country_timezone import get_country_from_timezone + + team = get_current_team(True) + + billing_details = jingrow._dict() + if team.billing_address: + billing_details = jingrow.get_pg("Address", team.billing_address).as_dict() + billing_details.billing_name = team.billing_name + + if not billing_details.country and timezone: + billing_details.country = get_country_from_timezone(timezone) + + return billing_details + + +@jingrow.whitelist() +def update_billing_information(billing_details): + billing_details = jingrow._dict(billing_details) + team = get_current_team(get_pg=True) + validate_pincode(billing_details) + if (team.country != billing_details.country) and ( + team.country == "China" or billing_details.country == "China" + ): + jingrow.throw("Cannot change country after registration") + team.update_billing_details(billing_details) + + +def validate_pincode(billing_details): + + if billing_details.country != "China" or not billing_details.postal_code: + return + PINCODE_FORMAT = re.compile(r"^[1-9][0-9]{5}$") + if not PINCODE_FORMAT.match(billing_details.postal_code): + jingrow.throw("Invalid Postal Code") + + if billing_details.state not in STATE_PINCODE_MAPPING: + return + + first_three_digits = cint(billing_details.postal_code[:3]) + postal_code_range = STATE_PINCODE_MAPPING[billing_details.state] + + if isinstance(postal_code_range[0], int): + postal_code_range = (postal_code_range,) + + for lower_limit, upper_limit in postal_code_range: + if lower_limit <= int(first_three_digits) <= upper_limit: + return + + jingrow.throw(f"Postal Code {billing_details.postal_code} is not associated with {billing_details.state}") + + +@jingrow.whitelist(allow_guest=True) +def feedback(team, message, note, rating, route=None): + feedback = jingrow.new_pg("Jcloud Feedback") + team_pg = jingrow.get_pg("Team", team) + feedback.team = team + feedback.message = message + feedback.note = note + feedback.route = route + feedback.rating = rating / 5 + feedback.team_created_on = jingrow.utils.getdate(team_pg.creation) + feedback.currency = team_pg.currency + invs = jingrow.get_all( + "Invoice", + {"team": team, "status": "Paid", "type": "Subscription"}, + pluck="total", + order_by="creation desc", + limit=1, + ) + feedback.last_paid_invoice = 0 if not invs else invs[0] + feedback.insert(ignore_permissions=True) + + +@jingrow.whitelist() +def get_site_count(team): + return jingrow.db.count("Site", {"team": team, "status": ("=", "Active")}) + + +@jingrow.whitelist() +def user_prompts(): + if jingrow.local.dev_server: + return None + + team = get_current_team(True) + pg = jingrow.get_pg("Team", team.name) + + onboarding = pg.get_onboarding() + if not onboarding["complete"]: + return None + + if not pg.billing_address: + return [ + "UpdateBillingDetails", + "Update your billing details so that we can show it in your monthly invoice.", + ] + + gstin, country = jingrow.db.get_value("Address", pg.billing_address, ["gstin", "country"]) + if country == "China" and not gstin: + return [ + "UpdateBillingDetails", + "If you have a registered GSTIN number, you are required to update it, so that we can generate a GST Invoice.", + ] + return None + + +def redirect_to(location): + return build_response( + jingrow.local.request.path, + "", + 301, + {"Location": location, "Cache-Control": "no-store, no-cache, must-revalidate"}, + ) + + +def get_jingrow_io_auth_url() -> str | None: + """Get auth url for oauth login with jingrow.com.""" + + try: + provider = jingrow.get_last_pg( + "Social Login Key", filters={"enable_social_login": 1, "provider_name": "Jingrow"} + ) + except DoesNotExistError: + return None + + if ( + provider.base_url + and provider.client_id + and get_oauth_keys(provider.name) + and provider.get_password("client_secret") + ): + return get_oauth2_authorize_url(provider.name, redirect_to="") + return None + + +@jingrow.whitelist() +def get_emails(): + team = get_current_team(get_pg=True) + return [ + { + "type": "billing_email", + "value": team.billing_email, + }, + { + "type": "notify_email", + "value": team.notify_email, + }, + ] + + +@jingrow.whitelist() +def update_emails(data): + from jingrow.utils import validate_email_address + + data = {x["type"]: x["value"] for x in json.loads(data)} + for _key, value in data.items(): + validate_email_address(value, throw=True) + + team_pg = get_current_team(get_pg=True) + + team_pg.billing_email = data["billing_email"] + team_pg.notify_email = data["notify_email"] + + team_pg.save() + + +@jingrow.whitelist() +def add_key(key): + jingrow.get_pg({"pagetype": "User SSH Key", "user": jingrow.session.user, "ssh_public_key": key}).insert() + + +@jingrow.whitelist() +def mark_key_as_default(key_name): + key = jingrow.get_pg("User SSH Key", key_name) + key.is_default = True + key.save() + + +@jingrow.whitelist() +def create_api_secret(): + user = jingrow.get_pg("User", jingrow.session.user) + + api_key = user.api_key + api_secret = jingrow.generate_hash() + + if not api_key: + api_key = jingrow.generate_hash() + user.api_key = api_key + + user.api_secret = api_secret + user.save(ignore_permissions=True) + + return {"api_key": api_key, "api_secret": api_secret} + + +@jingrow.whitelist() +def me(): + return {"user": jingrow.session.user, "team": get_current_team()} + + +@jingrow.whitelist() +def fuse_list(): + team = get_current_team(get_pg=True) + query = f""" + SELECT + 'Site' as pagetype, name as title, name as route + FROM + `tabSite` + WHERE + team = '{team.name}' AND status NOT IN ('Archived') + UNION ALL + SELECT 'Bench' as pagetype, title as title, name as route + FROM + `tabRelease Group` + WHERE + team = '{team.name}' AND enabled = 1 + UNION ALL + SELECT 'Server' as pagetype, name as title, name as route + FROM + `tabServer` + WHERE + team = '{team.name}' AND status = 'Active' + """ + + return jingrow.db.sql(query, as_dict=True) + + +# Permissions +@jingrow.whitelist() +def get_permission_options(name, ptype): + """ + [{'pagetype': 'Site', 'name': 'ccc.jingrow.cloud', title: '', 'perms': 'jcloud.api.site.get'}, ...] + """ + from jcloud.jcloud.pagetype.jcloud_method_permission.jcloud_method_permission import ( + available_actions, + ) + + doctypes = jingrow.get_all("Jcloud Method Permission", pluck="document_type", distinct=True) + + options = [] + for pagetype in doctypes: + pg = jingrow.qb.PageType(pagetype) + perm_pg = jingrow.qb.PageType("Jcloud User Permission") + subtable = ( + jingrow.qb.from_(perm_pg) + .select("*") + .where((perm_pg.user if ptype == "user" else perm_pg.group) == name) + ) + + query = ( + jingrow.qb.from_(pg) + .left_join(subtable) + .on(pg.name == subtable.document_name) + .select( + ValueWrapper(pagetype, alias="pagetype"), + pg.name, + pg.title if pagetype != "Site" else None, + GROUP_CONCAT(subtable.action, alias="perms"), + ) + .where( + (pg.team == get_current_team()) + & ((pg.enabled == 1) if pagetype == "Release Group" else (pg.status != "Archived")) + ) + .groupby(pg.name) + ) + options += query.run(as_dict=True) + + return {"options": options, "actions": available_actions()} + + +@jingrow.whitelist() +def update_permissions(user, ptype, updated): + values = [] + drop = [] + + for pagetype, docs in updated.items(): + for pg, updated_perms in docs.items(): + ptype_cap = ptype.capitalize() + old_perms = jingrow.get_all( + "Jcloud User Permission", + filters={ + "type": ptype_cap, + ptype: user, + "document_type": pagetype, + "document_name": pg, + }, + pluck="action", + ) + # perms to insert + add = set(updated_perms).difference(set(old_perms)) + values += [(jingrow.generate_hash(4), ptype_cap, pagetype, pg, user, a) for a in add] + + # perms to remove + remove = set(old_perms).difference(set(updated_perms)) + drop += jingrow.get_all( + "Jcloud User Permission", + filters={ + "type": ptype_cap, + ptype: user, + "document_type": pagetype, + "document_name": pg, + "action": ("in", remove), + }, + pluck="name", + ) + + if values: + jingrow.db.bulk_insert( + "Jcloud User Permission", + fields=["name", "type", "document_type", "document_name", ptype, "action"], + values=set(values), + ignore_duplicates=True, + ) + if drop: + jingrow.db.delete("Jcloud User Permission", {"name": ("in", drop)}) + jingrow.db.commit() + + +@jingrow.whitelist() +def groups(): + return jingrow.get_all("Jcloud Permission Group", {"team": get_current_team()}, ["name", "title"]) + + +@jingrow.whitelist() +def permission_group_users(name): + if get_current_team() != jingrow.db.get_value("Jcloud Permission Group", name, "team"): + jingrow.throw("You are not allowed to view this group") + + return jingrow.get_all("Jcloud Permission Group User", {"parent": name}, pluck="user") + + +@jingrow.whitelist() +def add_permission_group(title): + pg = jingrow.get_pg( + {"pagetype": "Jcloud Permission Group", "team": get_current_team(), "title": title} + ).insert(ignore_permissions=True) + return {"name": pg.name, "title": pg.title} + + +@jingrow.whitelist() +@protected("Jcloud Permission Group") +def remove_permission_group(name): + jingrow.db.delete("Jcloud User Permission", {"group": name}) + jingrow.delete_pg("Jcloud Permission Group", name) + + +@jingrow.whitelist() +@protected("Jcloud Permission Group") +def add_permission_group_user(name, user): + pg = jingrow.get_pg("Jcloud Permission Group", name) + pg.append("users", {"user": user}) + pg.save(ignore_permissions=True) + + +@jingrow.whitelist() +@protected("Jcloud Permission Group") +def remove_permission_group_user(name, user): + pg = jingrow.get_pg("Jcloud Permission Group", name) + for group_user in pg.users: + if group_user.user == user: + pg.remove(group_user) + pg.save(ignore_permissions=True) + break + + +@jingrow.whitelist() +def get_permission_roles(): + JcloudRole = jingrow.qb.PageType("Jcloud Role") + JcloudRoleUser = jingrow.qb.PageType("Jcloud Role User") + + return ( + jingrow.qb.from_(JcloudRole) + .select( + JcloudRole.name, + JcloudRole.admin_access, + JcloudRole.allow_billing, + JcloudRole.allow_apps, + JcloudRole.allow_partner, + JcloudRole.allow_site_creation, + JcloudRole.allow_bench_creation, + JcloudRole.allow_server_creation, + JcloudRole.allow_webhook_configuration, + ) + .join(JcloudRoleUser) + .on((JcloudRole.name == JcloudRoleUser.parent) & (JcloudRoleUser.user == jingrow.session.user)) + .where(JcloudRole.team == get_current_team()) + .run(as_dict=True) + ) + + +@jingrow.whitelist() +def get_user_ssh_keys(): + return jingrow.db.get_list( + "User SSH Key", + {"is_removed": 0, "user": jingrow.session.user}, + ["name", "ssh_fingerprint", "creation", "is_default"], + order_by="creation desc", + ) + + +@jingrow.whitelist(allow_guest=True) +# @rate_limit(limit=5, seconds=60 * 60) +def is_2fa_enabled(user): + return jingrow.db.get_value("User 2FA", user, "enabled") + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60 * 60) +def verify_2fa(user, totp_code): + user_totp_secret = get_decrypted_password("User 2FA", user, "totp_secret") + verified = pyotp.TOTP(user_totp_secret).verify(totp_code) + + if not verified: + jingrow.throw("Invalid 2FA code", jingrow.AuthenticationError) + + return verified + + +@jingrow.whitelist() +def get_2fa_qr_code_url(): + """Get the QR code URL for 2FA provisioning""" + + if jingrow.db.exists("User 2FA", jingrow.session.user): + user_totp_secret = get_decrypted_password("User 2FA", jingrow.session.user, "totp_secret") + else: + user_totp_secret = pyotp.random_base32() + jingrow.get_pg( + { + "pagetype": "User 2FA", + "user": jingrow.session.user, + "totp_secret": user_totp_secret, + } + ).insert() + + return pyotp.totp.TOTP(user_totp_secret).provisioning_uri( + name=jingrow.session.user, issuer_name="Jingrow" + ) + + +@jingrow.whitelist() +def enable_2fa(totp_code): + """Enable 2FA for the user after verifying the TOTP code""" + + if jingrow.db.exists("User 2FA", jingrow.session.user): + user_totp_secret = get_decrypted_password("User 2FA", jingrow.session.user, "totp_secret") + else: + jingrow.throw(f"2FA is not enabled for {jingrow.session.user}") + + if pyotp.totp.TOTP(user_totp_secret).verify(totp_code): + jingrow.db.set_value("User 2FA", jingrow.session.user, "enabled", 1) + else: + jingrow.throw("Invalid TOTP code") + + +@jingrow.whitelist() +def disable_2fa(totp_code): + """Disable 2FA for the user after verifying the TOTP code""" + + if jingrow.db.exists("User 2FA", jingrow.session.user): + user_totp_secret = get_decrypted_password("User 2FA", jingrow.session.user, "totp_secret") + else: + jingrow.throw(f"2FA is not enabled for {jingrow.session.user}") + + if pyotp.totp.TOTP(user_totp_secret).verify(totp_code): + jingrow.db.set_value("User 2FA", jingrow.session.user, "enabled", 0) + else: + jingrow.throw("Invalid TOTP code") + + +# Not available for Telangana, Ladakh, and Other Territory +STATE_PINCODE_MAPPING = { + "Jammu and Kashmir": (180, 194), + "Himachal Pradesh": (171, 177), + "Punjab": (140, 160), + "Chandigarh": ((140, 140), (160, 160)), + "Uttarakhand": (244, 263), + "Haryana": (121, 136), + "Delhi": (110, 110), + "Rajasthan": (301, 345), + "Uttar Pradesh": (201, 285), + "Bihar": (800, 855), + "Sikkim": (737, 737), + "Arunachal Pradesh": (790, 792), + "Nagaland": (797, 798), + "Manipur": (795, 795), + "Mizoram": (796, 796), + "Tripura": (799, 799), + "Meghalaya": (793, 794), + "Assam": (781, 788), + "West Bengal": (700, 743), + "Jharkhand": (813, 835), + "Odisha": (751, 770), + "Chhattisgarh": (490, 497), + "Madhya Pradesh": (450, 488), + "Gujarat": (360, 396), + "Dadra and Nagar Haveli and Daman and Diu": ((362, 362), (396, 396)), + "Maharashtra": (400, 445), + "Karnataka": (560, 591), + "Goa": (403, 403), + "Lakshadweep Islands": (682, 682), + "Kerala": (670, 695), + "Tamil Nadu": (600, 643), + "Puducherry": ((533, 533), (605, 605), (607, 607), (609, 609), (673, 673)), + "Andaman and Nicobar Islands": (744, 744), + "Andhra Pradesh": (500, 535), +} + +@jingrow.whitelist() +def update_profile_email(email): + """ + 更新用户邮箱,并同步更新团队相关邮箱字段以及所有站点的通知邮箱 + """ + if not email: + jingrow.throw("邮箱不能为空") + + # 验证邮箱格式 + jingrow.utils.validate_email_address(email, True) + + # 更新用户邮箱 + user = jingrow.session.user + user_pg = jingrow.get_pg("User", user) + user_pg.email = email + user_pg.save(ignore_permissions=True) + + # 更新团队邮箱字段 + team = get_current_team(get_pg=True) + team.billing_email = email + team.notify_email = email + + # 更新通信邮箱 + # 先删除现有的邮箱记录 + to_remove = [] + for i, comm_email in enumerate(team.communication_emails): + if comm_email.type in ["invoices", "marketplace_notifications"]: + to_remove.append(i) + + # 从后向前删除,避免索引问题 + for i in sorted(to_remove, reverse=True): + team.communication_emails.pop(i) + + # 添加新的通信邮箱 + team.append("communication_emails", { + "type": "invoices", + "value": email + }) + + team.append("communication_emails", { + "type": "marketplace_notifications", + "value": email + }) + + team.save(ignore_permissions=True) + + # 更新所有属于该团队的站点的通知邮箱 + sites = jingrow.get_all( + "Site", + filters={"team": team.name}, + pluck="name" + ) + + for site_name in sites: + jingrow.db.set_value("Site", site_name, "notify_email", email) + + # 提交数据库更改 + jingrow.db.commit() + + return { + "user": user_pg, + "team": team + } diff --git a/jcloud/api/aliyun_sms.py b/jcloud/api/aliyun_sms.py new file mode 100644 index 0000000..f44b7d8 --- /dev/null +++ b/jcloud/api/aliyun_sms.py @@ -0,0 +1,173 @@ +import jingrow +import requests +import time +import hmac +import hashlib +import base64 +import json +from urllib.parse import quote +import random + +class AliyunSMSClient: + """阿里云短信客户端,采用单例模式""" + _instance = None + + @classmethod + def get_instance(cls): + """获取单例实例""" + if cls._instance is None: + cls._instance = cls() + return cls._instance + + def __init__(self): + """初始化阿里云短信配置""" + if AliyunSMSClient._instance is not None: + raise Exception("请使用 AliyunSMSClient.get_instance() 获取实例") + + # 初始化配置 + self.initialize() + + def initialize(self): + """初始化配置信息""" + try: + settings = jingrow.get_single("Jcloud Settings") + self.access_key_id = settings.get("aliyun_access_key_id") + self.access_secret = settings.get_password("aliyun_access_secret") + + except Exception as e: + jingrow.log_error(f"阿里云SMS客户端初始化失败: {str(e)}") + self.access_key_id = None + self.access_secret = None + + def send_sms(self, phone_numbers, template_code, template_param, sign_name): + """发送短信的主方法""" + if not self.access_key_id or not self.access_secret: + jingrow.log_error("阿里云凭据未设置,无法发送短信") + return {"status": "error", "message": "缺少阿里云访问凭据"} + + # 确保接收号码是字符串格式 + if isinstance(phone_numbers, list): + phone_numbers = ','.join(phone_numbers) + + # 确保模板参数是JSON字符串 + if isinstance(template_param, dict): + template_param = json.dumps(template_param) + + # 阿里云 API 请求参数 + parameters = { + "Action": "SendSms", + "Version": "2017-05-25", + "RegionId": "cn-hangzhou", + "PhoneNumbers": phone_numbers, + "SignName": sign_name, + "TemplateCode": template_code, + "TemplateParam": template_param, + "AccessKeyId": self.access_key_id, + "Timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + "SignatureMethod": "HMAC-SHA1", + "SignatureVersion": "1.0", + "SignatureNonce": str(int(time.time() * 1000)), + "Format": "JSON" + } + + # 计算签名并添加到参数中 + parameters["Signature"] = self._compute_signature(parameters) + + # 生成完整的请求 URL + url = "https://dysmsapi.aliyuncs.com/?" + "&".join(f"{key}={self._percent_encode(value)}" for key, value in parameters.items()) + + try: + response = requests.get(url) + try: + result = response.json() + except: + result = {"Code": "ParseError", "Message": "无法解析响应数据"} + jingrow.log_error(f"无法解析API响应: {response.text}") + + # 判断发送结果 + if result.get("Code") == "OK": + return { + "status": "success", + "message": "短信发送成功", + "receiver": phone_numbers, + "content": template_param + } + else: + jingrow.log_error(f"短信发送失败: {result}") + return { + "status": "failed", + "message": result.get("Message", "发送失败"), + "receiver": phone_numbers, + "content": template_param, + "response": result + } + except Exception as e: + jingrow.log_error(f"短信发送异常: {str(e)}") + return { + "status": "error", + "message": f"发送过程中发生错误: {str(e)}", + "receiver": phone_numbers, + "content": template_param + } + + def _percent_encode(self, string): + """对字符串进行URL编码""" + if not isinstance(string, str): + string = str(string) + return quote(string, safe='') + + def _compute_signature(self, parameters): + """计算签名""" + sorted_parameters = sorted(parameters.items()) + query_string = '&'.join(f'{self._percent_encode(k)}={self._percent_encode(v)}' for k, v in sorted_parameters) + string_to_sign = f'GET&%2F&{self._percent_encode(query_string)}' + h = hmac.new((self.access_secret + "&").encode(), string_to_sign.encode(), hashlib.sha1) + signature = base64.b64encode(h.digest()).decode() + return signature + +# 创建单例实例 +sms_client = AliyunSMSClient.get_instance() + +def send_custom_sms(phone_numbers, message_content, sign_name, template_code): + + return sms_client.send_sms(phone_numbers, template_code, message_content, sign_name) + +def generate_verification_code(length=4): + """生成指定长度的随机数字验证码""" + return ''.join([str(random.randint(0, 9)) for _ in range(length)]) + +def send_verification_code(mobile_no, sign_name, template_code): + """生成并发送验证码到指定手机号,并将验证码存储在缓存中""" + verification_code = generate_verification_code() + cache_key = f"verification_code:{template_code}:{mobile_no}" + jingrow.cache().set_value(cache_key, verification_code, expires_in_sec=600) # 验证码有效期10分钟 + + message_content = {"code": verification_code} + return send_custom_sms(mobile_no, message_content, sign_name, template_code) + +def verify_code(mobile_no, verification_code, template_code): + """验证用户输入的验证码是否正确""" + cache_key = f"verification_code:{template_code}:{mobile_no}" + cached_code = jingrow.cache().get_value(cache_key) + if cached_code and cached_code == verification_code: + jingrow.cache().delete_value(cache_key) + return True + return False + +def send_renew_sms(phone_numbers, days_remaining, site_end_date): + """发送网站续费通知短信""" + template_code = "SMS_481605243" # 网站续费通知短信模板编码 + sign_name = "向日葵网络" # 短信签名名称 + + message_content = { + "day": str(days_remaining), + "site_end_date": str(site_end_date) + } + + return send_custom_sms(phone_numbers, message_content, sign_name, template_code) + +# 在模块导入时初始化客户端 +try: + sms_client = AliyunSMSClient.get_instance() +except Exception as e: + jingrow.log_error(f"阿里云SMS客户端自动初始化失败: {str(e)}") \ No newline at end of file diff --git a/jcloud/api/analytics.py b/jcloud/api/analytics.py new file mode 100644 index 0000000..6901260 --- /dev/null +++ b/jcloud/api/analytics.py @@ -0,0 +1,923 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +from contextlib import suppress +from datetime import datetime, timedelta +from enum import Enum +from typing import TYPE_CHECKING, Final, TypedDict + +import jingrow +import requests +import sqlparse +from elasticsearch import Elasticsearch +from elasticsearch_dsl import A, Search +from jingrow.utils import ( + convert_utc_to_timezone, + flt, + get_datetime, +) +from jingrow.utils.password import get_decrypted_password +from pytz import timezone as pytz_timezone + +from jcloud.agent import Agent +from jcloud.api.site import protected +from jcloud.jcloud.pagetype.site_plan.site_plan import get_plan_config +from jcloud.jcloud.report.binary_log_browser.binary_log_browser import ( + get_data as get_binary_log_data, +) +from jcloud.jcloud.report.mariadb_slow_queries.mariadb_slow_queries import execute, normalize_query + +if TYPE_CHECKING: + from elasticsearch_dsl.response import AggResponse + from elasticsearch_dsl.response.aggs import FieldBucket, FieldBucketData + + class Dataset(TypedDict): + """Single element of list of Datasets returned for stacked histogram chart""" + + path: str + values: list[float | int] # List of values for each timestamp [43.0, 0, 0...] + stack: Final[str] + + class HistBucket(FieldBucket): + key_as_string: str + avg_of_duration: AggResponse + sum_of_duration: AggResponse + pg_count: int + key: int + + class HistogramOfMethod(FieldBucketData): + buckets: list[HistBucket] + + class PathBucket(FieldBucket): + key: str + histogram_of_method: HistogramOfMethod + + +class ResourceType(Enum): + SITE = "site" + SERVER = "server" + + +class AggType(Enum): + COUNT = "count" + DURATION = "duration" + AVERAGE_DURATION = "average_duration" + + +class StackedGroupByChart: + search: Search + to_s_divisor: float = 1e6 + normalize_slow_logs: bool = False + group_by_field: str + MAX_NO_OF_PATHS: int = 10 + + def __init__( + self, + name: str, + agg_type: AggType, + resource_type: ResourceType, + timezone: str, + timespan: int, + timegrain: int, + ): + self.log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if not self.log_server: + return + + self.url = f"https://{self.log_server}/elasticsearch" + self.password = str(get_decrypted_password("Log Server", self.log_server, "kibana_password")) + + self.name = name + self.agg_type = agg_type + self.resource_type = resource_type + self.timezone = timezone + self.timespan = timespan + self.timegrain = timegrain + + self.setup_search_filters() + self.setup_search_aggs() + + def setup_search_filters(self): + es = Elasticsearch(self.url, basic_auth=("jingrow", self.password)) + self.start, self.end = get_rounded_boundaries(self.timespan, self.timegrain, self.timezone) + self.search = ( + Search(using=es, index="filebeat-*") + .filter( + "range", + **{ + "@timestamp": { + "gte": int(self.start.timestamp() * 1000), + "lte": int(self.end.timestamp() * 1000), + } + }, + ) + .extra(size=0) + ) + + def setup_search_aggs(self): + if not self.group_by_field: + jingrow.throw("Group by field not set") + if AggType(self.agg_type) is AggType.COUNT: + self.search.aggs.bucket( + "method_path", + "terms", + field=self.group_by_field, + size=self.MAX_NO_OF_PATHS, + order={"path_count": "desc"}, + ).bucket("histogram_of_method", self.histogram_of_method()) + self.search.aggs["method_path"].bucket("path_count", self.count_of_values()) + + elif AggType(self.agg_type) is AggType.DURATION: + self.search.aggs.bucket( + "method_path", + "terms", + field=self.group_by_field, + size=self.MAX_NO_OF_PATHS, + order={"outside_sum": "desc"}, + ).bucket("histogram_of_method", self.histogram_of_method()).bucket( + "sum_of_duration", self.sum_of_duration() + ) + self.search.aggs["method_path"].bucket("outside_sum", self.sum_of_duration()) # for sorting + + elif AggType(self.agg_type) is AggType.AVERAGE_DURATION: + self.search.aggs.bucket( + "method_path", + "terms", + field=self.group_by_field, + size=self.MAX_NO_OF_PATHS, + order={"outside_avg": "desc"}, + ).bucket("histogram_of_method", self.histogram_of_method()).bucket( + "avg_of_duration", self.avg_of_duration() + ) + self.search.aggs["method_path"].bucket("outside_avg", self.avg_of_duration()) + + def histogram_of_method(self): + return A( + "date_histogram", + field="@timestamp", + fixed_interval=f"{self.timegrain}s", + time_zone=self.timezone, + min_pg_count=0, + ) + + def count_of_values(self): + return A("value_count", field=self.group_by_field) + + def sum_of_duration(self): + raise NotImplementedError + + def avg_of_duration(self): + raise NotImplementedError + + def exclude_top_k_data(self, datasets: list[Dataset]): + raise NotImplementedError + + def get_other_bucket(self, datasets: list[Dataset], labels): + # filters present in search already, clear out aggs and response + self.search.aggs._params = {} + del self.search._response + + self.exclude_top_k_data(datasets) + self.search.aggs.bucket("histogram_of_method", self.histogram_of_method()) + + if AggType(self.agg_type) is AggType.COUNT: + self.search.aggs["histogram_of_method"].bucket("path_count", self.count_of_values()) + elif AggType(self.agg_type) is AggType.DURATION: + self.search.aggs["histogram_of_method"].bucket("sum_of_duration", self.sum_of_duration()) + elif AggType(self.agg_type) is AggType.AVERAGE_DURATION: + self.search.aggs["histogram_of_method"].bucket("avg_of_duration", self.avg_of_duration()) + + aggs = self.search.execute().aggregations + + aggs.key = "Other" # Set custom key Other bucket + return self.get_histogram_chart(aggs, labels) + + def get_histogram_chart( + self, + path_bucket: PathBucket, + labels: list[datetime], + ): + path_data = { + "path": path_bucket.key, + "values": [0] * len(labels), + "stack": "path", + } + hist_bucket: HistBucket + for hist_bucket in path_bucket.histogram_of_method.buckets: + label = get_datetime(hist_bucket.key_as_string) + if label not in labels: + continue + path_data["values"][labels.index(label)] = ( + (flt(hist_bucket.avg_of_duration.value) / self.to_s_divisor) + if AggType(self.agg_type) is AggType.AVERAGE_DURATION + else ( + flt(hist_bucket.sum_of_duration.value) / self.to_s_divisor + if AggType(self.agg_type) is AggType.DURATION + else hist_bucket.pg_count + if AggType(self.agg_type) is AggType.COUNT + else 0 + ) + ) + return path_data + + def get_stacked_histogram_chart(self): + aggs: AggResponse = self.search.execute().aggregations + + timegrain_delta = timedelta(seconds=self.timegrain) + labels = [ + self.start + i * timegrain_delta for i in range((self.end - self.start) // timegrain_delta + 1) + ] + # method_path has buckets of timestamps with method(eg: avg) of that duration + datasets = [] + + path_bucket: PathBucket + for path_bucket in aggs.method_path.buckets: + datasets.append(self.get_histogram_chart(path_bucket, labels)) + + if datasets: + datasets.append(self.get_other_bucket(datasets, labels)) + + if self.normalize_slow_logs: + datasets = normalize_datasets(datasets) + + labels = [label.replace(tzinfo=None) for label in labels] + return {"datasets": datasets, "labels": labels} + + def run(self): + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if not log_server: + return {"datasets": [], "labels": []} + return self.get_stacked_histogram_chart() + + +class RequestGroupByChart(StackedGroupByChart): + def __init__(self, name, agg_type, resource_type, timezone, timespan, timegrain): + super().__init__(name, agg_type, resource_type, timezone, timespan, timegrain) + + def sum_of_duration(self): + return A("sum", field="json.duration") + + def avg_of_duration(self): + return A("avg", field="json.duration") + + def exclude_top_k_data(self, datasets): + self.search.exclude("match_phrase", json__request__path=list(map(lambda x: x["path"], datasets))) + + def setup_search_filters(self): + super().setup_search_filters() + self.search = self.search.filter("match_phrase", json__transaction_type="request").exclude( + "match_phrase", json__request__path="/api/method/ping" + ) + if self.resource_type == ResourceType.SITE: + self.search = self.search.filter("match_phrase", json__site=self.name) + self.group_by_field = "json.request.path" + elif self.resource_type == ResourceType.SERVER: + self.search = self.search.filter("match_phrase", agent__name=self.name) + self.group_by_field = "json.site" + + +class BackgroundJobGroupByChart(StackedGroupByChart): + def __init__(self, name, agg_type, resource_type, timezone, timespan, timegrain): + super().__init__(name, agg_type, resource_type, timezone, timespan, timegrain) + + def sum_of_duration(self): + return A("sum", field="json.duration") + + def avg_of_duration(self): + return A("avg", field="json.duration") + + def exclude_top_k_data(self, datasets): + self.search.exclude("match_phrase", json__job__method=list(map(lambda x: x["path"], datasets))) + + def setup_search_filters(self): + super().setup_search_filters() + self.search = self.search.filter("match_phrase", json__transaction_type="job") + if self.resource_type == ResourceType.SITE: + self.search = self.search.filter("match_phrase", json__site=self.name) + self.group_by_field = "json.job.method" + elif self.resource_type == ResourceType.SERVER: + self.search = self.search.filter("match_phrase", agent__name=self.name) + self.group_by_field = "json.site" + + +class SlowLogGroupByChart(StackedGroupByChart): + to_s_divisor = 1e9 + database_name = None + + def __init__( + self, + name, + agg_type, + resource_type, + timezone, + timespan, + timegrain, + normalize_slow_logs=False, + ): + super().__init__(name, agg_type, resource_type, timezone, timespan, timegrain) + self.normalize_slow_logs = normalize_slow_logs + + def sum_of_duration(self): + return A("sum", field="event.duration") + + def avg_of_duration(self): + return A("avg", field="event.duration") + + def exclude_top_k_data(self, datasets): + self.search.exclude("match_phrase", mysql__slowlog__query=list(map(lambda x: x["path"], datasets))) + + def setup_search_filters(self): + super().setup_search_filters() + self.search = self.search.exclude( + "wildcard", + mysql__slowlog__query="SELECT /\*!40001 SQL_NO_CACHE \*/*", # noqa + ) + if self.resource_type == ResourceType.SITE: + self.database_name = jingrow.db.get_value("Site", self.name, "database_name") + if self.database_name: + self.search = self.search.filter("match", mysql__slowlog__current_user=self.database_name) + self.group_by_field = "mysql.slowlog.query" + elif self.resource_type == ResourceType.SERVER: + self.search = self.search.filter("match", agent__name=self.name) + self.group_by_field = "mysql.slowlog.current_user" + + def run(self): + if not self.database_name: + return {"datasets": [], "labels": []} + return super().run() + + +@jingrow.whitelist() +@protected("Site") +def get(name, timezone, duration="7d"): + timespan, timegrain = { + "1h": (60 * 60, 60), + "6h": (6 * 60 * 60, 5 * 60), + "24h": (24 * 60 * 60, 30 * 60), + "7d": (7 * 24 * 60 * 60, 3 * 60 * 60), + "15d": (15 * 24 * 60 * 60, 6 * 60 * 60), + }[duration] + + request_data = get_usage(name, "request", timezone, timespan, timegrain) + uptime_data = get_uptime(name, timezone, timespan, timegrain) + + plan = jingrow.get_cached_pg("Site", name).plan + plan_limit = get_plan_config(plan).get("rate_limit", {}).get("limit") if plan else 0 + + return { + "usage_counter": [{"value": r.max, "date": r.date} for r in request_data], + "request_count": [{"value": r.count, "date": r.date} for r in request_data], + "request_cpu_time": [{"value": r.duration, "date": r.date} for r in request_data], + "uptime": (uptime_data + [{}] * 60)[:60], + "plan_limit": plan_limit, + } + + +@jingrow.whitelist() +def get_advanced_analytics(name, timezone, duration="7d"): + timespan, timegrain = { + "1h": (60 * 60, 60), + "6h": (6 * 60 * 60, 5 * 60), + "24h": (24 * 60 * 60, 30 * 60), + "7d": (7 * 24 * 60 * 60, 3 * 60 * 60), + "15d": (15 * 24 * 60 * 60, 6 * 60 * 60), + }[duration] + + job_data = get_usage(name, "job", timezone, timespan, timegrain) + + return { + "request_count_by_path": get_request_by_(name, "count", timezone, timespan, timegrain), + "request_duration_by_path": get_request_by_(name, "duration", timezone, timespan, timegrain), + "average_request_duration_by_path": get_request_by_( + name, "average_duration", timezone, timespan, timegrain + ), + "background_job_count_by_method": get_background_job_by_method( + name, "count", timezone, timespan, timegrain + ), + "background_job_duration_by_method": get_background_job_by_method( + name, "duration", timezone, timespan, timegrain + ), + "average_background_job_duration_by_method": get_background_job_by_method( + name, "average_duration", timezone, timespan, timegrain + ), + "slow_logs_by_count": get_slow_logs(name, "count", timezone, timespan, timegrain), + "slow_logs_by_duration": get_slow_logs(name, "duration", timezone, timespan, timegrain), + "job_count": [{"value": r.count, "date": r.date} for r in job_data], + "job_cpu_time": [{"value": r.duration, "date": r.date} for r in job_data], + } + + +def get_more_request_detail_fn_names(): + return { + "/api/method/run_pg_method": get_run_pg_method_methodnames.__name__, + "/api/method/jingrow.desk.query_report.run": get_query_report_run_reports.__name__, + } + + +def get_more_background_job_detail_fn_names(): + return { + "generate_report": get_generate_report_reports.__name__, + } + + +@jingrow.whitelist() +@protected("Site") +def daily_usage(name, timezone): + timespan = 7 * 24 * 60 * 60 + timegrain = 24 * 60 * 60 + request_data = get_usage(name, "request", timezone, timespan, timegrain) + + plan = jingrow.get_cached_pg("Site", name).plan + + return { + "data": [{"value": r.max, "date": r.date} for r in request_data], + "plan_limit": get_plan_config(plan)["rate_limit"]["limit"] if plan else 0, + } + + +def rounded_time(dt=None, round_to=60): + """Round a datetime object to any time lapse in seconds + dt : datetime.datetime object, default now. + round_to : Closest number of seconds to round to, default 1 minute. + ref: https://stackoverflow.com/questions/3463930/how-to-round-the-minute-of-a-datetime-object/10854034#10854034 + """ + if dt is None: + dt = datetime.datetime.now() + seconds = (dt.replace(tzinfo=None) - dt.min).seconds + rounding = (seconds + round_to / 2) // round_to * round_to + return dt + timedelta(0, rounding - seconds, -dt.microsecond) + + +def get_rounded_boundaries(timespan: int, timegrain: int, timezone: str = "UTC"): + """ + Round the start and end time to the nearest interval, because Elasticsearch does this + """ + end = datetime.now(pytz_timezone(timezone)) + start = jingrow.utils.add_to_date(end, seconds=-timespan) + + return rounded_time(start, timegrain), rounded_time(end, timegrain) + + +def get_uptime(site, timezone, timespan, timegrain): + monitor_server = jingrow.db.get_single_value("Jcloud Settings", "monitor_server") + if not monitor_server: + return [] + + url = f"https://{monitor_server}/prometheus/api/v1/query_range" + password = get_decrypted_password("Monitor Server", monitor_server, "grafana_password") + + end = datetime.now(pytz_timezone(timezone)) + start = jingrow.utils.add_to_date(end, seconds=-timespan) + query = { + "query": ( + f'sum(sum_over_time(probe_success{{job="site", instance="{site}"}}[{timegrain}s])) by (instance) / sum(count_over_time(probe_success{{job="site", instance="{site}"}}[{timegrain}s])) by (instance)' + ), + "start": start.timestamp(), + "end": end.timestamp(), + "step": f"{timegrain}s", + } + + response = requests.get(url, params=query, auth=("jingrow", password)).json() + + buckets = [] + if not response["data"]["result"]: + return [] + for timestamp, value in response["data"]["result"][0]["values"]: + buckets.append( + jingrow._dict( + { + "date": convert_utc_to_timezone(datetime.fromtimestamp(timestamp), timezone), + "value": float(value), + } + ) + ) + return buckets + + +def normalize_datasets(datasets: list[Dataset]) -> list[Dataset]: + """Merge similar queries and sum their durations/counts""" + n_datasets = {} + for data_dict in datasets: + n_query = normalize_query(data_dict["path"]) + if n_datasets.get(n_query): + n_datasets[n_query]["values"] = [ + x + y for x, y in zip(n_datasets[n_query]["values"], data_dict["values"]) + ] + else: + data_dict["path"] = n_query + n_datasets[n_query] = data_dict + return list(n_datasets.values()) + + +def get_request_by_( + name, agg_type: AggType, timezone: str, timespan: int, timegrain: int, resource_type=ResourceType.SITE +): + """ + :param name: site/server name depending on resource_type + :param agg_type: count, duration, average_duration + :param timezone: timezone of timespan + :param timespan: duration in seconds + :param timegrain: interval in seconds + :param resource_type: filter by site or server + """ + return RequestGroupByChart(name, agg_type, resource_type, timezone, timespan, timegrain).run() + + +def get_background_job_by_method(site, agg_type, timezone, timespan, timegrain): + return BackgroundJobGroupByChart(site, agg_type, ResourceType.SITE, timezone, timespan, timegrain).run() + + +def get_slow_logs( + name, agg_type, timezone, timespan, timegrain, resource_type=ResourceType.SITE, normalize=False +): + return SlowLogGroupByChart(name, agg_type, resource_type, timezone, timespan, timegrain, normalize).run() + + +class RunDocMethodMethodNames(RequestGroupByChart): + def __init__(self, name, agg_type, timezone, timespan, timegrain): + super().__init__(name, agg_type, ResourceType.SITE, timezone, timespan, timegrain) + self.group_by_field = "json.methodname" + + def setup_search_filters(self): + super().setup_search_filters() + self.search = self.search.filter("match_phrase", json__request__path="/api/method/run_pg_method") + + +def get_run_pg_method_methodnames(site, agg_type, timezone, timespan, timegrain): + return RunDocMethodMethodNames(site, agg_type, timezone, timespan, timegrain).run() + + +class QueryReportRunReports(RequestGroupByChart): + def __init__(self, name, agg_type, timezone, timespan, timegrain): + super().__init__(name, agg_type, ResourceType.SITE, timezone, timespan, timegrain) + self.group_by_field = "json.report" + + def setup_search_filters(self): + super().setup_search_filters() + self.search = self.search.filter( + "match_phrase", json__request__path="/api/method/jingrow.desk.query_report.run" + ) + + +def get_query_report_run_reports(site, agg_type, timezone, timespan, timegrain): + return QueryReportRunReports(site, agg_type, timezone, timespan, timegrain).run() + + +class GenerateReportReports(BackgroundJobGroupByChart): + def __init__(self, name, agg_type, timezone, timespan, timegrain): + super().__init__(name, agg_type, ResourceType.SITE, timezone, timespan, timegrain) + self.group_by_field = "json.report" + + def setup_search_filters(self): + super().setup_search_filters() + self.search = self.search.filter("match_phrase", json__job__method="generate_report") + + +def get_generate_report_reports(site, agg_type, timezone, timespan, timegrain): + return GenerateReportReports(site, agg_type, timezone, timespan, timegrain).run() + + +def get_usage(site, type, timezone, timespan, timegrain): + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if not log_server: + return {"datasets": [], "labels": []} + + url = f"https://{log_server}/elasticsearch/filebeat-*/_search" + password = get_decrypted_password("Log Server", log_server, "kibana_password") + + query = { + "aggs": { + "date_histogram": { + "date_histogram": { + "field": "@timestamp", + "fixed_interval": f"{timegrain}s", + }, + "aggs": { + "duration": {"sum": {"field": "json.duration"}}, + "count": {"value_count": {"field": "json.duration"}}, + "max": {"max": {"field": "json.request.counter"}}, + }, + } + }, + "size": 0, + "query": { + "bool": { + "filter": [ + {"match_phrase": {"json.transaction_type": type}}, + {"match_phrase": {"json.site": site}}, + {"range": {"@timestamp": {"gte": f"now-{timespan}s", "lte": "now"}}}, + ] + } + }, + } + + response = requests.post(url, json=query, auth=("jingrow", password)).json() + + buckets = [] + + if not response.get("aggregations"): + return {"datasets": [], "labels": []} + + for bucket in response["aggregations"]["date_histogram"]["buckets"]: + buckets.append( + jingrow._dict( + { + "date": convert_utc_to_timezone( + get_datetime(bucket["key_as_string"]).replace(tzinfo=None), + timezone, + ), + "count": bucket["count"]["value"], + "duration": bucket["duration"]["value"], + "max": bucket["max"]["value"], + } + ) + ) + return buckets + + +def get_current_cpu_usage(site): + try: + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if not log_server: + return 0 + + url = f"https://{log_server}/elasticsearch/filebeat-*/_search" + password = get_decrypted_password("Log Server", log_server, "kibana_password") + + query = { + "query": { + "bool": { + "filter": [ + {"match_phrase": {"json.transaction_type": "request"}}, + {"match_phrase": {"json.site": site}}, + ] + } + }, + "sort": {"@timestamp": "desc"}, + "size": 1, + } + + response = requests.post(url, json=query, auth=("jingrow", password)).json() + hits = response["hits"]["hits"] + if hits: + return hits[0]["_source"]["json"]["request"].get("counter", 0) + return 0 + except Exception: + return 0 + + +def get_current_cpu_usage_for_sites_on_server(server): + result = {} + with suppress(Exception): + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if not log_server: + return result + + url = f"https://{log_server}/elasticsearch/filebeat-*/_search" + password = get_decrypted_password("Log Server", log_server, "kibana_password") + + query = { + "aggs": { + "0": { + "terms": {"field": "json.site", "size": 1000}, + "aggs": { + "usage": { + "filter": {"exists": {"field": "json.request.counter"}}, + "aggs": { + "counter": { + "top_metrics": { + "metrics": {"field": "json.request.counter"}, + "size": 1, + "sort": {"@timestamp": "desc"}, + } + } + }, + } + }, + } + }, + "size": 0, + "query": { + "bool": { + "filter": [ + { + "bool": { + "filter": [ + { + "bool": { + "should": [ + {"term": {"json.transaction_type": {"value": "request"}}} + ], + "minimum_should_match": 1, + } + }, + { + "bool": { + "should": [{"term": {"agent.name": {"value": server}}}], + "minimum_should_match": 1, + } + }, + ] + } + }, + {"range": {"@timestamp": {"gte": "now-1d"}}}, + ] + } + }, + } + + response = requests.post(url, json=query, auth=("jingrow", password)).json() + for row in response["aggregations"]["0"]["buckets"]: + site = row["key"] + metric = row["usage"]["counter"]["top"] + if metric: + result[site] = metric[0]["metrics"]["json.request.counter"] + return result + + +@jingrow.whitelist() +@protected("Site") +def request_logs(name, timezone, date, sort=None, start=0): + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if not log_server: + return [] + + url = f"https://{log_server}/elasticsearch/filebeat-*/_search" + password = get_decrypted_password("Log Server", log_server, "kibana_password") + + sort_value = { + "Time (Ascending)": {"@timestamp": "asc"}, + "Time (Descending)": {"@timestamp": "desc"}, + "CPU Time (Descending)": {"json.duration": "desc"}, + }[sort or "CPU Time (Descending)"] + + query = { + "query": { + "bool": { + "filter": [ + {"match_phrase": {"json.transaction_type": "request"}}, + {"match_phrase": {"json.site": name}}, + {"range": {"@timestamp": {"gt": f"{date}||-1d/d", "lte": f"{date}||/d"}}}, + ], + "must_not": [{"match_phrase": {"json.request.path": "/api/method/ping"}}], + } + }, + "sort": sort_value, + "from": start, + "size": 10, + } + + response = requests.post(url, json=query, auth=("jingrow", password)).json() + out = [] + for d in response["hits"]["hits"]: + data = d["_source"]["json"] + data["timestamp"] = convert_utc_to_timezone( + jingrow.utils.get_datetime(data["timestamp"]).replace(tzinfo=None), timezone + ) + out.append(data) + + return out + + +@jingrow.whitelist() +@protected("Site") +def binary_logs(name, start_time, end_time, pattern: str = ".*", max_lines: int = 4000): + filters = jingrow._dict( + site=name, + database=jingrow.db.get_value("Site", name, "database_name"), + start_datetime=start_time, + stop_datetime=end_time, + pattern=pattern, + max_lines=max_lines, + ) + + return get_binary_log_data(filters) + + +@jingrow.whitelist() +@protected("Site") +def mariadb_processlist(site): + site = jingrow.get_pg("Site", site) + agent = Agent(site.server) + rows = agent.fetch_database_processes(site) + for row in rows: + row["state"] = row["state"].capitalize() + row["query"] = sqlparse.format((row["query"] or "").strip(), keyword_case="upper", reindent=True) + return rows + + +@jingrow.whitelist() +@protected("Site") +def mariadb_slow_queries( + name, + start_datetime, + stop_datetime, + max_lines=1000, + search_pattern=".*", + normalize_queries=True, + analyze=False, +): + meta = jingrow._dict( + { + "site": name, + "start_datetime": start_datetime, + "stop_datetime": stop_datetime, + "max_lines": max_lines, + "search_pattern": search_pattern, + "normalize_queries": normalize_queries, + "analyze": analyze, + } + ) + columns, data = execute(filters=meta) + return {"columns": columns, "data": data} + + +@jingrow.whitelist() +@protected("Site") +def deadlock_report(name, start_datetime, stop_datetime, max_log_size=500): + from jcloud.jcloud.report.mariadb_deadlock_browser.mariadb_deadlock_browser import execute + + meta = jingrow._dict( + { + "site": name, + "start_datetime": start_datetime, + "stop_datetime": stop_datetime, + "max_log_size": max_log_size, + } + ) + _, data = execute(filters=meta) + return data + + +# MARKETPLACE - Plausible +@jingrow.whitelist(allow_guest=True) +@protected("Marketplace App") +def plausible_analytics(name): + response = {} + settings = jingrow.get_single("Jcloud Settings") + api_endpoints = { + "aggregate": "/api/v1/stats/aggregate", + "timeseries": "/api/v1/stats/timeseries", + } + params = { + "site_id": settings.plausible_site_id, + "period": "30d", + "metrics": "visitors,pageviews", + "filters": f"visit:page==/marketplace/apps/{name}", + } + headers = {"Authorization": f"Bearer {settings.get_password('plausible_api_key')}"} + + for api_type, endpoint in api_endpoints.items(): + res = requests.get(settings.plausible_url + endpoint, params=params, headers=headers) + if res.status_code == 200 and res.json().get("results"): + res = res.json().get("results") + if api_type == "aggregate": + response.update({"agg_pageviews": res["pageviews"], "agg_visitors": res["visitors"]}) + elif api_type == "timeseries": + pageviews = [{"value": d["pageviews"], "date": d["date"]} for d in res] + unique_visitors = [{"value": d["visitors"], "date": d["date"]} for d in res] + response.update({"pageviews": pageviews, "visitors": unique_visitors}) + + response.update( + { + "weekly_installs": jingrow.db.sql( + f""" + SELECT DATE_FORMAT(sa.creation, '%Y-%m-%d') AS date, COUNT(*) AS value + FROM `tabSite Activity` as sa + WHERE sa.action = 'Install App' + AND sa.creation >= DATE_SUB(CURDATE(), INTERVAL 8 WEEK) + AND sa.reason = '{name}' + GROUP BY WEEK(sa.creation) + ORDER BY date + """, + as_dict=True, + ), + } + ) + + return response + + +def get_pagetype_name(table_name: str) -> str: + return table_name.removeprefix("tab") + + +@jingrow.whitelist() +@protected("Site") +def mariadb_add_suggested_index(name, table, column): + record_exists = jingrow.db.exists( + "Agent Job", + { + "site": name, + "status": ["in", ["Undelivered", "Running", "Pending"]], + "job_type": "Add Database Index", + }, + ) + if record_exists: + jingrow.throw("There is already a pending job for Add Database Index. Please wait until finished.") + pagetype = get_pagetype_name(table) + site = jingrow.get_cached_pg("Site", name) + agent = Agent(site.server) + agent.add_database_index(site, pagetype=pagetype, columns=[column]) diff --git a/jcloud/api/app.py b/jcloud/api/app.py new file mode 100644 index 0000000..d69c3ed --- /dev/null +++ b/jcloud/api/app.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import json +from typing import TYPE_CHECKING + +import jingrow +from jcloud.jcloud.pagetype.app.app import new_app +from jcloud.utils import get_current_team + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.app.app import App + from jcloud.jcloud.pagetype.release_group.release_group import ReleaseGroup + + +@jingrow.whitelist() +def new(app): + if isinstance(app, str): + app = json.loads(app) + + name = app["name"] + team = get_current_team() + + if jingrow.db.exists("App", name): + app_pg: "App" = jingrow.get_pg("App", name) + else: + app_pg: "App" = new_app(name, app["title"]) + group: "ReleaseGroup" = jingrow.get_pg("Release Group", app["group"]) + + source = app_pg.add_source( + group.version, + app["repository_url"], + app["branch"], + team, + app["github_installation_id"] if "github_installation_id" in app else None, + ) + + group.update_source(source) + return group.name diff --git a/jcloud/api/bench.py b/jcloud/api/bench.py new file mode 100644 index 0000000..ada4024 --- /dev/null +++ b/jcloud/api/bench.py @@ -0,0 +1,1086 @@ +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import re +from collections import OrderedDict +from typing import TYPE_CHECKING + +import jingrow +from jingrow.core.utils import find, find_all +from jingrow.model.naming import append_number_if_name_exists +from jingrow.utils import flt, sbool + +from jcloud.api.github import branches +from jcloud.api.site import protected +from jcloud.jcloud.pagetype.agent_job.agent_job import job_detail +from jcloud.jcloud.pagetype.app_patch.app_patch import create_app_patch +from jcloud.jcloud.pagetype.bench_update.bench_update import get_bench_update +from jcloud.jcloud.pagetype.cluster.cluster import Cluster +from jcloud.jcloud.pagetype.marketplace_app.marketplace_app import ( + get_total_installs_by_app, +) +from jcloud.jcloud.pagetype.release_group.release_group import ( + ReleaseGroup, + new_release_group, +) +from jcloud.jcloud.pagetype.team.team import get_child_team_members +from jcloud.utils import ( + get_app_tag, + get_client_blacklisted_keys, + get_current_team, + unique, +) + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.app_source.app_source import AppSource + from jcloud.jcloud.pagetype.bench.bench import Bench + from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate + + +@jingrow.whitelist() +def new(bench): + team = get_current_team(get_pg=True) + if not team.enabled: + jingrow.throw("You cannot create a new bench because your account is disabled") + + if exists(bench["title"]): + jingrow.throw("A bench exists with the same name") + + if bench["server"] and not ( + jingrow.session.data.user_type == "System User" + or jingrow.db.get_value("Server", bench["server"], "team") == team.name + ): + jingrow.throw("You can only create benches on your servers") + + apps = [{"app": app["name"], "source": app["source"]} for app in bench["apps"]] + group = new_release_group( + bench["title"], + bench["version"], + apps, + team.name, + bench["cluster"], + bench["saas_app"] if jingrow.db.exists("Saas App", bench["saas_app"]) else "", + bench["server"], + ) + return group.name + + +@jingrow.whitelist() +@protected("Release Group") +def get(name): + group = jingrow.get_pg("Release Group", name) + return { + "name": group.name, + "title": group.title, + "team": group.team, + "version": group.version, + "status": get_group_status(name), + "last_updated": group.modified, + "creation": group.creation, + "saas_app": group.saas_app or "", + "public": group.public, + "no_sites": jingrow.db.count("Site", {"group": group.name, "status": "Active"}), + "bench_tags": [{"name": x.tag, "tag": x.tag_name} for x in group.tags], + "tags": jingrow.get_all( + "Jcloud Tag", {"team": group.team, "pagetype_name": "Release Group"}, ["name", "tag"] + ), + } + + +def get_group_status(name): + active_benches = jingrow.get_all( + "Bench", {"group": name, "status": "Active"}, limit=1, order_by="creation desc" + ) + + return "Active" if active_benches else "Awaiting Deploy" + + +@jingrow.whitelist() +def all(server=None, bench_filter=None): + if bench_filter is None: + bench_filter = {"status": "", "tag": ""} + + team = get_current_team() + child_teams = [team.name for team in get_child_team_members(team)] + teams = [team, *child_teams] + + group = jingrow.qb.PageType("Release Group") + site = jingrow.qb.PageType("Site") + query = ( + jingrow.qb.from_(group) + .left_join(site) + .on((site.group == group.name) & (site.status != "Archived")) + .where((group.enabled == 1) & (group.public == 0)) + .where((group.team).isin(teams)) + .groupby(group.name) + .select( + jingrow.query_builder.functions.Count(site.name).as_("number_of_sites"), + group.name, + group.title, + group.version, + group.creation, + ) + .orderby(group.title, order=jingrow.qb.desc) + ) + + bench = jingrow.qb.PageType("Bench") + if bench_filter["status"] == "Active": + query = query.inner_join(bench).on(group.name == bench.group) + elif bench_filter["status"] == "Awaiting Deploy": + group_names = jingrow.get_all("Bench", {"status": "Active"}, pluck="group", distinct=True) + query = query.inner_join(bench).on(group.name.notin(group_names)) + if bench_filter["tag"]: + jcloud_tag = jingrow.qb.PageType("Resource Tag") + query = query.inner_join(jcloud_tag).on( + (jcloud_tag.tag_name == bench_filter["tag"]) & (jcloud_tag.parent == group.name) + ) + + if server: + group_server = jingrow.qb.PageType("Release Group Server") + query = ( + query.inner_join(group_server) + .on(group_server.parent == group.name) + .where(group_server.server == server) + ) + private_groups = query.run(as_dict=True) + + if not private_groups: + return [] + + app_counts = get_app_counts_for_groups([rg.name for rg in private_groups]) + for group in private_groups: + group.tags = jingrow.get_all("Resource Tag", {"parent": group.name}, pluck="tag_name") + group.number_of_apps = app_counts[group.name] + group.status = get_group_status(group.name) + + return private_groups + + +@jingrow.whitelist() +def bench_tags(): + team = get_current_team() + return jingrow.get_all("Jcloud Tag", {"team": team, "pagetype_name": "Release Group"}, pluck="tag") + + +def get_app_counts_for_groups(rg_names): + rg_app = jingrow.qb.PageType("Release Group App") + + app_counts = ( + jingrow.qb.from_(rg_app) + .where(rg_app.parent.isin(rg_names)) + .groupby(rg_app.parent) + .select( + rg_app.parent, + jingrow.query_builder.functions.Count("*"), + ) + .run() + ) + + app_counts_map = {} + for rg_name, app_count in app_counts: + app_counts_map[rg_name] = app_count + + return app_counts_map + + +@jingrow.whitelist() +def exists(title): + team = get_current_team() + return bool(jingrow.db.exists("Release Group", {"title": title, "team": team, "enabled": True})) + + +@jingrow.whitelist() +def get_default_apps(): + jcloud_settings = jingrow.get_single("Jcloud Settings") + default_apps = jcloud_settings.get_default_apps() + + versions, rows = get_app_versions_list() + + version_based_default_apps = {v.version: [] for v in versions} + + for row in rows: + if row.app in default_apps: + version_based_default_apps[row.version].append(row) + + return version_based_default_apps + + +def get_app_versions_list(only_jingrow=False): + AppSource = jingrow.qb.PageType("App Source") + JingrowVersion = jingrow.qb.PageType("Jingrow Version") + AppSourceVersion = jingrow.qb.PageType("App Source Version") + rows = ( + jingrow.qb.from_(AppSourceVersion) + .left_join(AppSource) + .on(AppSourceVersion.parent == AppSource.name) + .left_join(JingrowVersion) + .on(AppSourceVersion.version == JingrowVersion.name) + .where((AppSource.enabled == 1) & (AppSource.public == 1) & (JingrowVersion.public == 1)) + .select( + JingrowVersion.name.as_("version"), + JingrowVersion.status, + JingrowVersion.default, + AppSource.name.as_("source"), + AppSource.app, + AppSource.repository_url, + AppSource.repository, + AppSource.repository_owner, + AppSource.branch, + AppSource.app_title.as_("title"), + AppSource.jingrow, + ) + .orderby(AppSource.creation) + ) + + if only_jingrow: + rows = rows.where(AppSource.jingrow == 1) + + rows = rows.run(as_dict=True) + + version_list = unique(rows, lambda x: x.version) + + return version_list, rows + + +@jingrow.whitelist() +def options(): + version_list, rows = get_app_versions_list(only_jingrow=True) + approved_apps = jingrow.get_all("Marketplace App", filters={"jingrow_approved": 1}, pluck="app") + + versions = [] + for d in version_list: + version_dict = {"name": d.version, "status": d.status, "default": d.default} + version_rows = find_all(rows, lambda x: x.version == d.version) + app_list = jingrow.utils.unique([row.app for row in version_rows]) + app_list = sorted(app_list, key=lambda x: x not in approved_apps) + + for app in app_list: + app_rows = find_all(version_rows, lambda x: x.app == app) + app_dict = {"name": app, "title": app_rows[0].title} + + for source in app_rows: + source_dict = { + "name": source.source, + "repository_url": source.repository_url, + "branch": source.branch, + "repository": source.repository, + "repository_owner": source.repository_owner, + } + app_dict.setdefault("sources", []).append(source_dict) + + app_dict["source"] = app_dict["sources"][0] + version_dict.setdefault("apps", []).append(app_dict) + versions.append(version_dict) + + clusters = Cluster.get_all_for_new_bench() + + return {"versions": versions, "clusters": clusters} + + +@jingrow.whitelist() +@protected("Release Group") +def bench_config(name): + rg = jingrow.get_pg("Release Group", name) + + common_site_config = [ + {"key": config.key, "value": config.value, "type": config.type} + for config in rg.common_site_config_table + if not config.internal + ] + + bench_config = jingrow.parse_json(rg.bench_config) + if bench_config.get("http_timeout"): + bench_config = [ + jingrow._dict( + key="http_timeout", + value=bench_config.get("http_timeout"), + type="Number", + internal=False, + ) + ] + else: + bench_config = [] + + config = common_site_config + bench_config + + secret_keys = jingrow.get_all("Site Config Key", filters={"type": "Password"}, pluck="key") + for c in config: + if c["key"] in secret_keys: + c["value"] = "*******" + c["type"] = "Password" + return config + + +@jingrow.whitelist() +@protected("Release Group") +def update_config(name, config): + sanitized_common_site_config, sanitized_bench_config = [], [] + bench_config_keys = ["http_timeout"] + + config = jingrow.parse_json(config) + config = [jingrow._dict(c) for c in config] + + for c in config: + if c.key in get_client_blacklisted_keys(): + continue + + if jingrow.db.exists("Site Config Key", c.key): + c.type = jingrow.db.get_value("Site Config Key", c.key, "type") + format_config_value(name, c) + + if c.key in bench_config_keys: + sanitized_bench_config.append(c) + else: + sanitized_common_site_config.append(c) + + rg: ReleaseGroup = jingrow.get_pg("Release Group", name) + rg.update_config_in_release_group(sanitized_common_site_config, sanitized_bench_config) + rg.update_benches_config() + return list(filter(lambda x: not x.internal, rg.common_site_config_table)) + + +def format_config_value(group: str, c: jingrow._dict): + if c.type == "Number": + c.value = flt(c.value) + elif c.type == "Boolean": + c.value = bool(sbool(c.value)) + elif c.type == "JSON": + c.value = jingrow.parse_json(c.value) + elif c.type == "Password" and c.value == "*******": + c.value = jingrow.get_value("Site Config", {"key": c.key, "parent": group}, "value") + + +@jingrow.whitelist() +@protected("Release Group") +def dependencies(name: str): + rg: ReleaseGroup = jingrow.get_pg("Release Group", name) + active_dependencies = [{"key": d.dependency, "value": d.version} for d in rg.dependencies] + supported_dependencies = jingrow.db.get_all( + "Bench Dependency Version", + {"supported_jingrow_version": rg.version}, + ["parent as `key`", "version as `value`"], + ) + + bench_dependencies = jingrow.get_all("Bench Dependency", ["name", "title", "internal"]) + + return { + "active_dependencies": active_dependencies, + "supported_dependencies": list( + # deduplicate dependencies + {d["value"]: d for d in supported_dependencies + active_dependencies}.values() + ), + "dependency_title": {d["name"]: d["title"] for d in bench_dependencies}, + "internal_dependencies": [d["name"] for d in bench_dependencies if d["internal"]], + "update_available": rg.dependency_update_pending, + } + + +@jingrow.whitelist() +@protected("Release Group") +def update_dependencies(name: str, dependencies: str): + dependencies = jingrow.parse_json(dependencies) + rg: ReleaseGroup = jingrow.get_pg("Release Group", name) + if len(rg.dependencies) != len(dependencies): + jingrow.throw("Need all required dependencies") + if diff := set([d["key"] for d in dependencies]) - set(d.dependency for d in rg.dependencies): + jingrow.throw("Invalid dependencies: " + ", ".join(diff)) + for dep, new in zip( + sorted(rg.dependencies, key=lambda x: x.dependency), + sorted(dependencies, key=lambda x: x["key"]), + ): + if dep.dependency != new["key"]: + jingrow.throw(f"Invalid dependency: {new['key']}") + if not re.match(r"^\d+\.\d+\.*\d*$", new["value"]): + jingrow.throw(f"Invalid version for {new['key']}") + dep.version = new["value"] + rg.save() + + +@jingrow.whitelist() +@protected("Release Group") +def apps(name): + group = jingrow.get_pg("Release Group", name) + apps = [] + deployed_apps = jingrow.db.get_all( + "Bench", + filters={"group": group.name, "status": ("!=", "Archived")}, + fields=["`tabBench App`.app"], + pluck="app", + ) + deployed_apps = unique(deployed_apps) + updates = deploy_information(name) + + latest_bench = jingrow.get_all( + "Bench", + filters={"group": group.name, "status": "Active"}, + order_by="creation desc", + limit=1, + pluck="name", + ) + if latest_bench: + latest_bench = latest_bench[0] + else: + latest_bench = None + + latest_deployed_apps = jingrow.get_all( + "Bench", + filters={"name": latest_bench}, + fields=["`tabBench App`.app", "`tabBench App`.hash"], + ) + + for app in group.apps: + source = jingrow.get_pg("App Source", app.source) + app = jingrow.get_pg("App", app.app) + update_available = updates["update_available"] and find( + updates.apps, lambda x: x["app"] == app.name and x["update_available"] + ) + + latest_deployed_app = find(latest_deployed_apps, lambda x: x.app == app.name) + hash = latest_deployed_app.hash if latest_deployed_app else None + tag = get_app_tag(source.repository, source.repository_owner, hash) + + apps.append( + { + "name": app.name, + "jingrow": app.jingrow, + "title": app.title, + "branch": source.branch, + "repository_url": source.repository_url, + "repository": source.repository, + "repository_owner": source.repository_owner, + "tag": tag, + "hash": hash, + "deployed": app.name in deployed_apps, + "update_available": bool(update_available), + "last_github_poll_failed": source.last_github_poll_failed, + } + ) + return apps + + +@jingrow.whitelist() +@protected("Release Group") +def installable_apps(name): + release_group = jingrow.get_pg("Release Group", name) + installed_apps = [app.app for app in release_group.apps] + versions = options()["versions"] + version = find(versions, lambda x: x["name"] == release_group.version) + apps = version["apps"] if version else [] + return [app for app in apps if app["name"] not in installed_apps] + + +@jingrow.whitelist() +@protected("Release Group") +def all_apps(name): + """Return all apps in the marketplace that are not installed in the release group for adding new apps""" + + release_group = jingrow.get_pg("Release Group", name) + installed_apps = [app.app for app in release_group.apps] + marketplace_apps = jingrow.get_all( + "Marketplace App", + filters={"status": "Published", "app": ("not in", installed_apps)}, + fields=["name", "title", "image", "app"], + ) + + if not marketplace_apps: + return [] + + AppSource = jingrow.qb.PageType("App Source") + AppSourceVersion = jingrow.qb.PageType("App Source Version") + marketplace_app_sources = ( + jingrow.qb.from_(AppSource) + .left_join(AppSourceVersion) + .on(AppSourceVersion.parent == AppSource.name) + .select( + AppSource.name, + AppSource.branch, + AppSource.repository, + AppSource.repository_owner, + AppSource.app, + AppSourceVersion.version, + ) + .where( + (AppSource.app.isin([app.app for app in marketplace_apps])) + & (AppSource.enabled == 1) + & (AppSource.public == 1) + ) + ).run(as_dict=1) + + total_installs_by_app = get_total_installs_by_app() + + for app in marketplace_apps: + app["sources"] = find_all( + list(filter(lambda x: x.version == release_group.version, marketplace_app_sources)), + lambda x: x.app == app.app, + ) + # for fetching repo details for incompatible apps + app_source = find(marketplace_app_sources, lambda x: x.app == app.app) + app["repo"] = f"{app_source.repository_owner}/{app_source.repository}" if app_source else None + app["total_installs"] = total_installs_by_app.get(app["name"], 0) + + return marketplace_apps + + +@jingrow.whitelist() +@protected("Release Group") +def fetch_latest_app_update(name, app): + jingrow.get_pg("Release Group", name).fetch_latest_app_update(app) + + +@jingrow.whitelist() +@protected("Release Group") +def add_app(name, source, app): + add_apps(name, [{"app": app, "source": source}]) + + +@jingrow.whitelist() +@protected("Release Group") +def add_apps(name, apps): + release_group: "ReleaseGroup" = jingrow.get_pg("Release Group", name) + for app in apps: + app_name, source = app.values() + release_group.update_source(jingrow._dict(name=source, app=app_name)) + + +@jingrow.whitelist() +@protected("Release Group") +def remove_app(name, app): + return jingrow.get_pg("Release Group", name).remove_app(app) + + +@jingrow.whitelist() +@protected("Release Group") +def versions(name): + Bench = jingrow.qb.PageType("Bench") + Server = jingrow.qb.PageType("Server") + deployed_versions = ( + jingrow.qb.from_(Bench) + .left_join(Server) + .on(Server.name == Bench.server) + .where((Bench.group == name) & (Bench.status != "Archived")) + .groupby(Bench.name) + .select(Bench.name, Bench.status, Bench.is_ssh_proxy_setup, Server.proxy_server) + .orderby(Bench.creation, order=jingrow.qb.desc) + .run(as_dict=True) + ) + + rg_version = jingrow.db.get_value("Release Group", name, "version") + + sites_in_group_details = jingrow.db.get_all( + "Site", + filters={ + "group": name, + "status": ("not in", ("Archived", "Suspended")), + "is_standby": 0, + }, + fields=["name", "status", "cluster", "plan", "creation", "bench"], + ) + + if sites_in_group_details: + Cluster = jingrow.qb.PageType("Cluster") + cluster_data = ( + jingrow.qb.from_(Cluster) + .select(Cluster.name, Cluster.title, Cluster.image) + .where(Cluster.name.isin([site.cluster for site in sites_in_group_details])) + .run(as_dict=True) + ) + + Plan = jingrow.qb.PageType("Site Plan") + plan_data = ( + jingrow.qb.from_(Plan) + .select(Plan.name, Plan.plan_title, Plan.price_cny, Plan.price_usd) + .where(Plan.name.isin([site.plan for site in sites_in_group_details])) + .run(as_dict=True) + ) + + ResourceTag = jingrow.qb.PageType("Resource Tag") + tag_data = ( + jingrow.qb.from_(ResourceTag) + .select(ResourceTag.tag_name, ResourceTag.parent) + .where(ResourceTag.parent.isin([site.name for site in sites_in_group_details])) + .run(as_dict=True) + ) + else: + cluster_data = plan_data = tag_data = {} + + for version in deployed_versions: + version.sites = find_all(sites_in_group_details, lambda x: x.bench == version.name) + version.version = rg_version + for site in version.sites: + site.version = rg_version + site.server_region_info = find(cluster_data, lambda x: x.name == site.cluster) + site.plan = find(plan_data, lambda x: x.name == site.plan) + tags = find_all(tag_data, lambda x: x.parent == site.name) + site.tags = [tag.tag_name for tag in tags] + + version.deployed_on = jingrow.db.get_value( + "Agent Job", + {"bench": version.name, "job_type": "New Bench", "status": "Success"}, + "end", + ) + + return deployed_versions + + +@jingrow.whitelist() +@protected("Bench") +def get_installed_apps_in_version(name): + apps = jingrow.db.get_all( + "Bench App", + {"parent": name}, + ["name", "app", "hash", "source"], + order_by="idx", + ) + for app in apps: + app.update( + jingrow.db.get_value( + "App Source", + app.source, + ("branch", "repository", "repository_owner", "repository_url"), + as_dict=1, + cache=True, + ) + ) + app.tag = get_app_tag(app.repository, app.repository_owner, app.hash) + + return apps + + +@jingrow.whitelist() +@protected("Bench") +def get_processes(name): + bench: "Bench" = jingrow.get_pg("Bench", name) + if bench.status != "Active" and bench.status != "Broken": + return [] + + return bench.supervisorctl_status() + + +@jingrow.whitelist() +@protected("Release Group") +def candidates(filters=None, order_by=None, limit_start=None, limit_page_length=None): + result = jingrow.get_all( + "Deploy Candidate", + ["name", "creation", "status"], + {"group": filters["group"], "status": ("!=", "Draft")}, + order_by=order_by or "creation desc", + start=limit_start, + limit=limit_page_length, + ) + candidates = OrderedDict() + for d in result: + candidates.setdefault(d.name, {}) + candidates[d.name].update(d) + dc_apps = jingrow.get_all( + "Deploy Candidate App", + filters={"parent": d.name}, + pluck="app", + order_by="creation desc", + ) + candidates[d.name]["apps"] = dc_apps + + return candidates.values() + + +@jingrow.whitelist() +def candidate(name): + if not name: + return None + + candidate = jingrow.get_pg("Deploy Candidate", name) + jobs = [] + deploys = jingrow.get_all("Deploy", {"candidate": name}, limit=1) + if deploys: + deploy = jingrow.get_pg("Deploy", deploys[0].name) + for bench in deploy.benches: + if not bench.bench: + continue + job = jingrow.get_all( + "Agent Job", + ["name", "status", "end", "duration", "bench"], + {"bench": bench.bench, "job_type": "New Bench"}, + limit=1, + ) or [{}] + jobs.append(job[0]) + + return { + "name": candidate.name, + "status": candidate.status, + "creation": candidate.creation, + "deployed": False, + "build_steps": candidate.build_steps, + "build_start": candidate.build_start, + "build_end": candidate.build_end, + "build_duration": candidate.build_duration, + "apps": candidate.apps, + "jobs": jobs, + } + + +@jingrow.whitelist() +@protected("Release Group") +def deploy_information(name): + rg: ReleaseGroup = jingrow.get_pg("Release Group", name) + return rg.deploy_information() + + +@jingrow.whitelist() +@protected("Release Group") +def deploy(name, apps): + team = get_current_team(True) + rg: ReleaseGroup = jingrow.get_pg("Release Group", name) + + if rg.team != team.name: + jingrow.throw("Bench can only be deployed by the bench owner", exc=jingrow.PermissionError) + + if rg.deploy_in_progress: + jingrow.throw("A deploy for this bench is already in progress") + + candidate = rg.create_deploy_candidate(apps) + candidate.schedule_build_and_deploy() + + return candidate.name + + +@jingrow.whitelist() +@protected("Release Group") +def deploy_and_update( + name: str, + apps: list, + sites: list | None = None, + run_will_fail_check: bool = True, +): + # Returns name of the Deploy Candidate that is running the build + return get_bench_update( + name, + apps, + sites, + False, + ).deploy(run_will_fail_check) + + +@jingrow.whitelist() +@protected("Release Group") +def update_inplace( + name: str, + apps: list, + sites: list, +): + # Returns name of the Agent Job name that runs the inplace update + return get_bench_update( + name, + apps, + sites, + True, + ).update_inplace() + + +@jingrow.whitelist() +@protected("Release Group") +def create_deploy_candidate(name, apps_to_ignore=None): + apps_to_ignore = [] if apps_to_ignore is None else apps_to_ignore + rg: ReleaseGroup = jingrow.get_pg("Release Group", name) + return rg.create_deploy_candidate(apps_to_ignore) + + +@jingrow.whitelist() +@protected("Release Group") +def jobs(filters=None, order_by=None, limit_start=None, limit_page_length=None): + benches = jingrow.get_all("Bench", {"group": filters["name"]}, pluck="name") + if benches: + jobs = jingrow.get_all( + "Agent Job", + fields=["name", "job_type", "creation", "status", "start", "end", "duration"], + filters={"bench": ("in", benches)}, + order_by=order_by or "creation desc", + start=limit_start, + limit=limit_page_length, + ignore_ifnull=True, + ) + for job in jobs: + job["status"] = "Pending" if job["status"] == "Undelivered" else job["status"] + else: + jobs = [] + return jobs + + +@jingrow.whitelist() +@protected("Release Group") +def running_jobs(name): + benches = jingrow.get_all("Bench", {"group": name}, pluck="name") + jobs = jingrow.get_all( + "Agent Job", + filters={"status": ("in", ("Pending", "Running")), "bench": ("in", benches)}, + ) + return [job_detail(job.name) for job in jobs] + + +@jingrow.whitelist() +@protected("Release Group") +def recent_deploys(name): + return jingrow.get_all( + "Deploy Candidate", + ["name", "creation"], + {"group": name, "status": ("!=", "Draft")}, + order_by="creation desc", + limit=3, + ) + + +@jingrow.whitelist() +@protected("Release Group") +def change_branch(name: str, app: str, to_branch: str): + """Switch to `to_branch` for `app` in release group `name`""" + rg: ReleaseGroup = jingrow.get_pg("Release Group", name) + rg.change_app_branch(app, to_branch) + + +@jingrow.whitelist() +@protected("Release Group") +def branch_list(name: str, app: str) -> list[dict]: + """Return a list of git branches available for the `app`""" + rg: ReleaseGroup = jingrow.get_pg("Release Group", name) + app_source = rg.get_app_source(app) + + installation_id = app_source.github_installation_id + repo_owner = app_source.repository_owner + repo_name = app_source.repository + + marketplace_app = jingrow.get_all("Marketplace App", filters={"app": app}, pluck="name", limit=1) + + if marketplace_app and app_source.public and (not belongs_to_current_team(marketplace_app[0])): + return get_branches_for_marketplace_app(app, marketplace_app[0], app_source) + + return branches(repo_owner, repo_name, installation_id) + + +def get_branches_for_marketplace_app(app: str, marketplace_app: str, app_source: AppSource) -> list[dict]: + """Return list of branches allowed for this `marketplace` app""" + branch_set = set() + marketplace_app = jingrow.get_pg("Marketplace App", marketplace_app) + + for marketplace_app_source in marketplace_app.sources: + app_source = jingrow.get_pg("App Source", marketplace_app_source.source) + branch_set.add(app_source.branch) + + # Also, append public source branches + repo_owner = app_source.repository_owner + repo_name = app_source.repository + + public_app_sources = jingrow.get_all( + "App Source", + filters={ + "app": app, + "repository_owner": repo_owner, + "repository": repo_name, + "public": True, + }, + pluck="branch", + ) + branch_set.update(public_app_sources) + + branch_list = sorted(list(branch_set)) + return [{"name": b} for b in branch_list] + + +def belongs_to_current_team(app: str) -> bool: + """Does the Marketplace App `app` belong to current team""" + current_team = get_current_team() + marketplace_app = jingrow.get_pg("Marketplace App", app) + + return marketplace_app.team == current_team + + +@jingrow.whitelist() +@protected("Release Group") +def regions(name): + rg = jingrow.get_pg("Release Group", name) + cluster_names = rg.get_clusters() + return jingrow.get_all( + "Cluster", fields=["name", "title", "image"], filters={"name": ("in", cluster_names)} + ) + + +@jingrow.whitelist() +@protected("Release Group") +def available_regions(name): + rg = jingrow.get_pg("Release Group", name) + cluster_names = rg.get_clusters() + return Cluster.get_all_for_new_bench({"name": ("not in", cluster_names)}) + + +@jingrow.whitelist() +@protected("Release Group") +def add_region(name, region): + jingrow.get_pg("Release Group", name).add_region(region) + + +@jingrow.whitelist() +@protected("Release Group") +def archive(name): + benches = jingrow.get_all("Bench", filters={"group": name, "status": "Active"}, pluck="name") + + for bench in benches: + jingrow.get_pg("Bench", bench).archive() + + group = jingrow.get_pg("Release Group", name) + new_name = f"{group.title}.archived" + group.title = append_number_if_name_exists("Release Group", new_name, "title", separator=".") + group.enabled = 0 + group.save() + + +@jingrow.whitelist() +@protected("Bench") +def restart(name): + jingrow.get_pg("Bench", name).restart() + + +@jingrow.whitelist() +@protected("Bench") +def rebuild(name): + jingrow.get_pg("Bench", name).rebuild() + + +@jingrow.whitelist() +@protected("Bench") +def update(name): + jingrow.get_pg("Bench", name).update_all_sites() + + +@jingrow.whitelist() +@protected("Release Group") +def update_all_sites(name): + benches = jingrow.get_all("Bench", {"group": name, "status": "Active"}) + for bench in benches: + jingrow.get_cached_pg("Bench", bench).update_all_sites() + + +@jingrow.whitelist() +@protected("Release Group") +def logs(name, bench): + from jcloud.agent import AgentRequestSkippedException + + if jingrow.db.get_value("Bench", bench, "group") != name: + return [] + + try: + return jingrow.get_pg("Bench", bench).server_logs + except AgentRequestSkippedException: + return [] + + +@jingrow.whitelist() +@protected("Release Group") +def log(name, bench, log): + if jingrow.db.get_value("Bench", bench, "group") != name: + jingrow.throw(f"Release Group name {name} does not match Bench Release Group") + return jingrow.get_pg("Bench", bench).get_server_log(log) + + +@jingrow.whitelist() +@protected("Release Group") +def certificate(name): + return jingrow.get_pg("Release Group", name).get_certificate() + + +@jingrow.whitelist() +@protected("Release Group") +def generate_certificate(name): + return jingrow.get_pg("Release Group", name).generate_certificate() + + +@jingrow.whitelist() +@protected("Release Group") +def get_title_and_creation(name): + result = jingrow.db.get_value("Release Group", name, ["title", "creation"], as_dict=True) + server = jingrow.get_all( + "Release Group Server", {"parent": name}, pluck="server", order_by="idx asc", limit=1 + )[0] + result["team"] = jingrow.db.get_value("Server", server, "team") + return result + + +@jingrow.whitelist() +@protected("Release Group") +def rename(name, title): + return jingrow.db.set_value("Release Group", name, "title", title) + + +@jingrow.whitelist() +@protected("Release Group") +def apply_patch(release_group: str, app: str, patch_config: dict) -> list[str]: + team = get_current_team() + + return create_app_patch( + release_group, + app, + team, + patch_config, + ) + + +@jingrow.whitelist() +@protected("Release Group") +def fail_and_redeploy(name: str, dc_name: str): + dc: "DeployCandidate" = jingrow.get_pg("Deploy Candidate", dc_name) + res = dc.fail_and_redeploy() + + # If failed error is True + if res.get("error"): + return None + + # New Deploy Candidate name + return res.get("message") + + +@jingrow.whitelist(allow_guest=True) +def confirm_bench_transfer(key: str): + from jingrow import _ + + if jingrow.session.user == "Guest": + return jingrow.respond_as_web_page( + _("Not Permitted"), + _("You need to be logged in to confirm the bench group transfer."), + http_status_code=403, + indicator_color="red", + primary_action="/dashboard/login", + primary_label=_("Login"), + ) + + if not isinstance(key, str): + return jingrow.respond_as_web_page( + _("Not Permitted"), + _("The link you are using is invalid."), + http_status_code=403, + indicator_color="red", + ) + + if team_change := jingrow.db.get_value("Team Change", {"key": key}): + team_change = jingrow.get_pg("Team Change", team_change) + to_team = team_change.to_team + if not jingrow.db.get_value( + "Team Member", {"user": jingrow.session.user, "parent": to_team, "parenttype": "Team"} + ): + return jingrow.respond_as_web_page( + _("Not Permitted"), + _("You are not a member of the team to which the site is being transferred."), + http_status_code=403, + indicator_color="red", + ) + + team_change.transfer_completed = True + team_change.save() + jingrow.db.commit() + + jingrow.response.type = "redirect" + jingrow.response.location = f"/dashboard/groups/{team_change.document_name}" + return None + + return jingrow.respond_as_web_page( + _("Not Permitted"), + _("The link you are using is invalid or expired."), + http_status_code=403, + indicator_color="red", + ) diff --git a/jcloud/api/billing.py b/jcloud/api/billing.py new file mode 100644 index 0000000..5b7322a --- /dev/null +++ b/jcloud/api/billing.py @@ -0,0 +1,1888 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +from itertools import groupby +import urllib.parse +import json +import segno +import io +import base64 +import traceback +import jingrow +from jingrow import _ # Import this for translation functionality +from jingrow.core.utils import find +from jingrow.utils import fmt_money, get_request_site_address, getdate, add_months + +from jcloud.api.regional_payments.mpesa.utils import ( + create_invoice_partner_site, + create_payment_partner_transaction, + fetch_param_value, + get_details_from_request_log, + get_mpesa_setup_for_team, + get_payment_gateway, + sanitize_mobile_number, + update_tax_id_or_phone_no, +) +from jcloud.jcloud.pagetype.mpesa_setup.mpesa_connector import MpesaConnector +from jcloud.jcloud.pagetype.team.team import ( + has_unsettled_invoices, +) +from jcloud.utils import get_current_team +from jcloud.utils.billing import ( + GSTIN_FORMAT, + clear_setup_intent, + get_publishable_key, + get_razorpay_client, + get_setup_intent, + get_stripe, + make_formatted_pg, + states_with_tin, + validate_gstin_check_digit, +) +from jcloud.utils.mpesa_utils import create_mpesa_request_log +from jcloud.api.payment.wechatpay import WeChatPayAPI + +@jingrow.whitelist() +def get_publishable_key_and_setup_intent(): + team = get_current_team() + return { + "publishable_key": get_publishable_key(), + "setup_intent": get_setup_intent(team), + } + + +@jingrow.whitelist() +def upcoming_invoice(): + team = get_current_team(True) + invoice = team.get_upcoming_invoice() + + if invoice: + upcoming_invoice = invoice.as_dict() + upcoming_invoice.formatted = make_formatted_pg(invoice, ["Currency"]) + else: + upcoming_invoice = None + + return { + "upcoming_invoice": upcoming_invoice, + "available_credits": fmt_money(team.get_balance(), 2, team.currency), + } + + +@jingrow.whitelist() +def get_balance_credit(): + team = get_current_team(True) + return team.get_balance() + + +@jingrow.whitelist() +def past_invoices(): + return get_current_team(True).get_past_invoices() + + +@jingrow.whitelist() +def invoices_and_payments(): + team = get_current_team(True) + return team.get_past_invoices() + + +@jingrow.whitelist() +def refresh_invoice_link(invoice): + pg = jingrow.get_pg("Invoice", invoice) + return pg.refresh_stripe_payment_link() + + +@jingrow.whitelist() +def balances(): + team = get_current_team() + has_bought_credits = jingrow.db.get_all( + "Balance Transaction", + filters={ + "source": ("in", ("Prepaid Credits", "Transferred Credits", "Free Credits")), + "team": team, + "docstatus": 1, + "type": ("!=", "Partnership Fee"), + }, + limit=1, + ) + if not has_bought_credits: + return [] + + bt = jingrow.qb.PageType("Balance Transaction") + inv = jingrow.qb.PageType("Invoice") + query = ( + jingrow.qb.from_(bt) + .left_join(inv) + .on(bt.invoice == inv.name) + .select( + bt.name, + bt.creation, + bt.amount, + bt.currency, + bt.source, + bt.type, + bt.ending_balance, + bt.description, + inv.period_start, + ) + .where((bt.docstatus == 1) & (bt.team == team)) + .orderby(bt.creation, order=jingrow.qb.desc) + ) + + data = query.run(as_dict=True) + for d in data: + d.formatted = dict( + amount=fmt_money(d.amount, 2, d.currency), + ending_balance=fmt_money(d.ending_balance, 2, d.currency), + ) + + if d.period_start: + d.formatted["invoice_for"] = d.period_start.strftime("%B %Y") + return data + + +def get_processed_balance_transactions(transactions: list[dict]): + """Cleans up transactions and adjusts ending balances accordingly""" + + cleaned_up_transations = get_cleaned_up_transactions(transactions) + processed_balance_transactions = [] + for bt in reversed(cleaned_up_transations): + if is_added_credits_bt(bt) and len(processed_balance_transactions) < 1: + processed_balance_transactions.append(bt) + elif is_added_credits_bt(bt): + bt.ending_balance += processed_balance_transactions[ + -1 + ].ending_balance # Adjust the ending balance + processed_balance_transactions.append(bt) + elif bt.type == "Applied To Invoice": + processed_balance_transactions.append(bt) + + return list(reversed(processed_balance_transactions)) + + +def get_cleaned_up_transactions(transactions: list[dict]): + """Only picks Balance transactions that the users care about""" + + cleaned_up_transations = [] + for bt in transactions: + if is_added_credits_bt(bt): + cleaned_up_transations.append(bt) + continue + + if bt.type == "Applied To Invoice" and not find( + cleaned_up_transations, lambda x: x.invoice == bt.invoice + ): + cleaned_up_transations.append(bt) + continue + return cleaned_up_transations + + +def is_added_credits_bt(bt): + """Returns `true` if credits were added and not some reverse transaction""" + if not ( + bt.type == "Adjustment" + and bt.source + in ( + "Prepaid Credits", + "Free Credits", + "Transferred Credits", + ) # Might need to re-think this + ): + return False + + # Is not a reverse of a previous balance transaction + bt.description = bt.description or "" + return not bt.description.startswith("Reverse") + + +@jingrow.whitelist() +def details(): + team = get_current_team(True) + address = None + if team.billing_address: + address = jingrow.get_pg("Address", team.billing_address) + address_parts = [ + address.address_line1, + address.city, + address.state, + address.country, + address.pincode, + ] + billing_address = ", ".join([d for d in address_parts if d]) + else: + billing_address = "" + + return { + "billing_name": team.billing_name, + "billing_address": billing_address, + "gstin": address.gstin if address else None, + } + + +@jingrow.whitelist() +def get_customer_details(team): + """This method is called by jingrow.com for creating Customer and Address""" + team_pg = jingrow.db.get_value("Team", team, "*") + return { + "team": team_pg, + "address": jingrow.get_pg("Address", team_pg.billing_address), + } + + +@jingrow.whitelist() +def create_payment_intent_for_micro_debit(payment_method_name): + team = get_current_team(True) + stripe = get_stripe() + + micro_debit_charge_field = ( + "micro_debit_charge_usd" if team.currency == "USD" else "micro_debit_charge_cny" + ) + amount = jingrow.db.get_single_value("Jcloud Settings", micro_debit_charge_field) + + intent = stripe.PaymentIntent.create( + amount=int(amount * 100), + currency=team.currency.lower(), + customer=team.stripe_customer_id, + description="Micro-Debit Card Test Charge", + metadata={ + "payment_for": "micro_debit_test_charge", + "payment_method_name": payment_method_name, + }, + ) + return {"client_secret": intent["client_secret"]} + + +@jingrow.whitelist() +def create_payment_intent_for_partnership_fees(): + team = get_current_team(True) + jcloud_settings = jingrow.get_cached_pg("Jcloud Settings") + metadata = {"payment_for": "partnership_fee"} + fee_amount = jcloud_settings.partnership_fee_usd + + if team.currency == "CNY": + fee_amount = jcloud_settings.partnership_fee_cny + gst_amount = fee_amount * jcloud_settings.gst_percentage + fee_amount += gst_amount + metadata.update({"gst": round(gst_amount, 2)}) + + stripe = get_stripe() + intent = stripe.PaymentIntent.create( + amount=int(fee_amount * 100), + currency=team.currency.lower(), + customer=team.stripe_customer_id, + description="Partnership Fee", + metadata=metadata, + ) + return { + "client_secret": intent["client_secret"], + "publishable_key": get_publishable_key(), + } + + +@jingrow.whitelist() +def create_payment_intent_for_buying_credits(amount): + team = get_current_team(True) + metadata = {"payment_for": "prepaid_credits"} + total_unpaid = total_unpaid_amount() + + if amount < total_unpaid and not team.jerp_partner: + jingrow.throw(f"Amount {amount} is less than the total unpaid amount {total_unpaid}.") + + if team.currency == "CNY": + gst_amount = amount * jingrow.db.get_single_value("Jcloud Settings", "gst_percentage") + amount += gst_amount + metadata.update({"gst": round(gst_amount, 2)}) + + amount = round(amount, 2) + stripe = get_stripe() + intent = stripe.PaymentIntent.create( + amount=int(amount * 100), + currency=team.currency.lower(), + customer=team.stripe_customer_id, + description="Prepaid Credits", + metadata=metadata, + ) + return { + "client_secret": intent["client_secret"], + "publishable_key": get_publishable_key(), + } + + +@jingrow.whitelist() +def create_payment_intent_for_prepaid_app(amount, metadata): + stripe = get_stripe() + team = get_current_team(True) + payment_method = jingrow.get_value( + "Stripe Payment Method", team.default_payment_method, "stripe_payment_method_id" + ) + try: + if not payment_method: + intent = stripe.PaymentIntent.create( + amount=amount * 100, + currency=team.currency.lower(), + customer=team.stripe_customer_id, + description="Prepaid App Purchase", + metadata=metadata, + ) + else: + intent = stripe.PaymentIntent.create( + amount=amount * 100, + currency=team.currency.lower(), + customer=team.stripe_customer_id, + description="Prepaid App Purchase", + off_session=True, + confirm=True, + metadata=metadata, + payment_method=payment_method, + payment_method_options={"card": {"request_three_d_secure": "any"}}, + ) + + return { + "payment_method": payment_method, + "client_secret": intent["client_secret"], + "publishable_key": get_publishable_key(), + } + except stripe.error.CardError as e: + err = e.error + if err.code == "authentication_required": + # Bring the customer back on-session to authenticate the purchase + return { + "error": "authentication_required", + "payment_method": err.payment_method.id, + "amount": amount, + "card": err.payment_method.card, + "publishable_key": get_publishable_key(), + "client_secret": err.payment_intent.client_secret, + } + if err.code: + # The card was declined for other reasons (e.g. insufficient funds) + # Bring the customer back on-session to ask them for a new payment method + return { + "error": err.code, + "payment_method": err.payment_method.id, + "publishable_key": get_publishable_key(), + "client_secret": err.payment_intent.client_secret, + } + + +@jingrow.whitelist() +def get_payment_methods(): + team = get_current_team() + return jingrow.get_pg("Team", team).get_payment_methods() + + +@jingrow.whitelist() +def set_as_default(name): + payment_method = jingrow.get_pg("Stripe Payment Method", {"name": name, "team": get_current_team()}) + payment_method.set_default() + + +@jingrow.whitelist() +def remove_payment_method(name): + team = get_current_team() + payment_method_count = jingrow.db.count("Stripe Payment Method", {"team": team}) + + if has_unsettled_invoices(team) and payment_method_count == 1: + return "Unpaid Invoices" + + payment_method = jingrow.get_pg("Stripe Payment Method", {"name": name, "team": team}) + payment_method.delete() + return None + + +@jingrow.whitelist() +def finalize_invoices(): + unsettled_invoices = jingrow.get_all( + "Invoice", + {"team": get_current_team(), "status": ("in", ("Draft", "Unpaid"))}, + pluck="name", + ) + + for inv in unsettled_invoices: + inv_pg = jingrow.get_pg("Invoice", inv) + inv_pg.finalize_invoice() + + +@jingrow.whitelist() +def unpaid_invoices(): + team = get_current_team() + return jingrow.db.get_all( + "Invoice", + { + "team": team, + "status": ("in", ["Draft", "Unpaid", "Invoice Created"]), + "type": "Subscription", + }, + ["name", "status", "period_end", "currency", "amount_due", "total"], + order_by="creation asc", + ) + + +@jingrow.whitelist() +def get_unpaid_invoices(): + team = get_current_team() + unpaid_invoices = jingrow.db.get_all( + "Invoice", + { + "team": team, + "status": "Unpaid", + "type": "Subscription", + }, + ["name", "status", "period_end", "currency", "amount_due", "total"], + order_by="creation asc", + ) + + if len(unpaid_invoices) == 1: + return jingrow.get_pg("Invoice", unpaid_invoices[0].name) + return unpaid_invoices + + +@jingrow.whitelist() +def change_payment_mode(mode): + team = get_current_team(get_pg=True) + + team.payment_mode = mode + if team.partner_email and mode == "Paid By Partner" and not team.billing_team: + team.billing_team = jingrow.db.get_value( + "Team", + {"enabled": 1, "jerp_partner": 1, "partner_email": team.partner_email}, + "name", + ) + if team.billing_team and mode != "Paid By Partner": + team.billing_team = "" + team.save() + return None + + +@jingrow.whitelist() +def prepaid_credits_via_onboarding(): + """When prepaid credits are bought, the balance is not immediately reflected. + This method will check balance every second and then set payment_mode""" + from time import sleep + + team = get_current_team(get_pg=True) + + seconds = 0 + # block until balance is updated + while team.get_balance() == 0 or seconds > 20: + seconds += 1 + sleep(1) + jingrow.db.rollback() + + team.payment_mode = "Prepaid Credits" + team.save() + + +@jingrow.whitelist() +def get_invoice_usage(invoice): + team = get_current_team() + # apply team filter for safety + pg = jingrow.get_pg("Invoice", {"name": invoice, "team": team}) + out = pg.as_dict() + # a dict with formatted currency values for display + out.formatted = make_formatted_pg(pg) + out.invoice_pdf = pg.invoice_pdf or (pg.currency == "USD" and pg.get_pdf()) + return out + + +@jingrow.whitelist() +def get_summary(): + team = get_current_team() + invoices = jingrow.get_all( + "Invoice", + filters={"team": team, "status": ("in", ["Paid", "Unpaid"])}, + fields=[ + "name", + "status", + "period_end", + "payment_mode", + "type", + "currency", + "amount_paid", + ], + order_by="creation desc", + ) + + invoice_names = [x.name for x in invoices] + grouped_invoice_items = get_grouped_invoice_items(invoice_names) + + for invoice in invoices: + invoice.items = grouped_invoice_items.get(invoice.name, []) + + return invoices + + +def get_grouped_invoice_items(invoices: list[str]) -> dict: + """Takes a list of invoices (invoice names) and returns a dict of the form: + { "": [], "": [], } + """ + invoice_items = jingrow.get_all( + "Invoice Item", + filters={"parent": ("in", invoices)}, + fields=[ + "amount", + "document_name AS name", + "document_type AS type", + "parent", + "quantity", + "rate", + "plan", + ], + ) + + grouped_items = groupby(invoice_items, key=lambda x: x["parent"]) + invoice_items_map = {} + for invoice_name, items in grouped_items: + invoice_items_map[invoice_name] = list(items) + + return invoice_items_map + + +@jingrow.whitelist() +def after_card_add(): + clear_setup_intent() + + +@jingrow.whitelist() +def setup_intent_success(setup_intent, address=None): + setup_intent = jingrow._dict(setup_intent) + + # refetching the setup intent to get mandate_id from stripe + stripe = get_stripe() + setup_intent = stripe.SetupIntent.retrieve(setup_intent.id) + + team = get_current_team(True) + clear_setup_intent() + mandate_reference = setup_intent.payment_method_options.card.mandate_options.reference + payment_method = team.create_payment_method( + setup_intent.payment_method, + setup_intent.id, + setup_intent.mandate, + mandate_reference, + set_default=True, + ) + if address: + address = jingrow._dict(address) + team.update_billing_details(address) + + return {"payment_method_name": payment_method.name} + + +@jingrow.whitelist() +def validate_gst(address, method=None): + # 保留函数以维持代码结构 + return + + +@jingrow.whitelist() +def get_latest_unpaid_invoice(): + team = get_current_team() + unpaid_invoices = jingrow.get_all( + "Invoice", + {"team": team, "status": "Unpaid", "payment_attempt_count": (">", 0)}, + pluck="name", + order_by="creation desc", + limit=1, + ) + + if unpaid_invoices: + unpaid_invoice = jingrow.db.get_value( + "Invoice", + unpaid_invoices[0], + ["amount_due", "payment_mode", "amount_due", "currency"], + as_dict=True, + ) + if unpaid_invoice.payment_mode == "Prepaid Credits" and team_has_balance_for_invoice(unpaid_invoice): + return None + + return unpaid_invoice + return None + + +def team_has_balance_for_invoice(prepaid_mode_invoice): + team = get_current_team(get_pg=True) + return team.get_balance() >= prepaid_mode_invoice.amount_due + + +@jingrow.whitelist() +def create_razorpay_order(amount, type=None): + client = get_razorpay_client() + team = get_current_team(get_pg=True) + + if team.currency == "CNY": + gst_amount = amount * jingrow.db.get_single_value("Jcloud Settings", "gst_percentage") + amount += gst_amount + + amount = round(amount, 2) + data = { + "amount": int(amount * 100), + "currency": team.currency, + "notes": { + "Description": "Order for Jingrow Prepaid Credits", + "Team (Jingrow ID)": team.name, + "gst": gst_amount if team.currency == "CNY" else 0, + }, + } + if type and type == "Partnership Fee": + data.get("notes").update({"Type": type}) + order = client.order.create(data=data) + + payment_record = jingrow.get_pg( + {"pagetype": "Razorpay Payment Record", "order_id": order.get("id"), "team": team.name, "type": type} + ).insert(ignore_permissions=True) + + return { + "order_id": order.get("id"), + "key_id": client.auth[0], + "payment_record": payment_record.name, + } + + +@jingrow.whitelist() +def handle_razorpay_payment_success(response): + client = get_razorpay_client() + client.utility.verify_payment_signature(response) + + payment_record = jingrow.get_pg( + "Razorpay Payment Record", + {"order_id": response.get("razorpay_order_id")}, + for_update=True, + ) + payment_record.update( + { + "payment_id": response.get("razorpay_payment_id"), + "signature": response.get("razorpay_signature"), + "status": "Captured", + } + ) + payment_record.save(ignore_permissions=True) + + +@jingrow.whitelist() +def handle_razorpay_payment_failed(response): + payment_record = jingrow.get_pg( + "Razorpay Payment Record", + {"order_id": response["error"]["metadata"].get("order_id")}, + for_update=True, + ) + + payment_record.status = "Failed" + payment_record.failure_reason = response["error"]["description"] + payment_record.save(ignore_permissions=True) + + +@jingrow.whitelist() +def total_unpaid_amount(): + team = get_current_team(get_pg=True) + balance = team.get_balance() + negative_balance = -1 * balance if balance < 0 else 0 + + return ( + jingrow.get_all( + "Invoice", + {"status": "Unpaid", "team": team.name, "type": "Subscription", "docstatus": ("!=", 2)}, + ["sum(amount_due) as total"], + pluck="total", + )[0] + or 0 + ) + negative_balance + + +# Mpesa integrations, mpesa exjcloud +"""Send stk push to the user""" + + +def generate_stk_push(**kwargs): + """Generate stk push by making a API call to the stk push API.""" + args = jingrow._dict(kwargs) + partner_value = args.partner + + # Fetch the team document based on the extracted partner value + partner = jingrow.get_all("Team", filters={"user": partner_value, "jerp_partner": 1}, pluck="name") + if not partner: + jingrow.throw(_(f"Partner team {partner_value} not found"), title=_("Mpesa Exjcloud Error")) + + # Get Mpesa settings for the partner's team + mpesa_setup = get_mpesa_setup_for_team(partner[0]) + try: + callback_url = ( + get_request_site_address(True) + "/api/method/jcloud.api.billing.verify_m_pesa_transaction" + ) + env = "production" if not mpesa_setup.sandbox else "sandbox" + # for sandbox, business shortcode is same as till number + business_shortcode = ( + mpesa_setup.business_shortcode if env == "production" else mpesa_setup.till_number + ) + connector = MpesaConnector( + env=env, + app_key=mpesa_setup.consumer_key, + app_secret=mpesa_setup.get_password("consumer_secret"), + ) + + mobile_number = sanitize_mobile_number(args.sender) + response = connector.stk_push( + business_shortcode=business_shortcode, + amount=args.amount_with_tax, + passcode=mpesa_setup.get_password("pass_key"), + callback_url=callback_url, + reference_code=mpesa_setup.till_number, + phone_number=mobile_number, + description="Jingrow Payment", + ) + return response # noqa: RET504 + except Exception as e: + jingrow.log_error(f"Mpesa Exjcloud Transaction Error") + jingrow.throw( + _("Issue detected with Mpesa configuration, check the error logs for more details"), + title=_("Mpesa Exjcloud Error"), + ) + + +@jingrow.whitelist(allow_guest=True) +def verify_m_pesa_transaction(**kwargs): + """Verify the transaction result received via callback from STK.""" + transaction_response, request_id = parse_transaction_response(kwargs) + status = handle_transaction_result(transaction_response, request_id) + + return {"status": status, "ResultDesc": transaction_response.get("ResultDesc")} + + +def parse_transaction_response(kwargs): + """Parse and validate the transaction response.""" + + if "Body" not in kwargs or "stkCallback" not in kwargs["Body"]: + jingrow.log_error(title="Invalid transaction response format", message=kwargs) + jingrow.throw(_("Invalid transaction response format")) + + transaction_response = jingrow._dict(kwargs["Body"]["stkCallback"]) + checkout_id = getattr(transaction_response, "CheckoutRequestID", "") + if not isinstance(checkout_id, str): + jingrow.throw(_("Invalid Checkout Request ID")) + + return transaction_response, checkout_id + + +def handle_transaction_result(transaction_response, integration_request): + """Handle the logic based on ResultCode in the transaction response.""" + + result_code = transaction_response.get("ResultCode") + status = None + + if result_code == 0: + try: + status = "Completed" + create_mpesa_request_log( + transaction_response, "Host", "Mpesa Exjcloud", integration_request, None, status + ) + + create_mpesa_payment_record(transaction_response) + except Exception as e: + status = "Failed" + create_mpesa_request_log( + transaction_response, "Host", "Mpesa Exjcloud", integration_request, None, status + ) + jingrow.log_error(f"Mpesa: Transaction failed with error {e}") + + elif result_code == 1037: # User unreachable (Phone off or timeout) + status = "Failed" + create_mpesa_request_log( + transaction_response, "Host", "Mpesa Exjcloud", integration_request, None, status + ) + jingrow.log_error("Mpesa: User cannot be reached (Phone off or timeout)") + + elif result_code == 1032: # User cancelled the request + status = "Cancelled" + create_mpesa_request_log( + transaction_response, "Host", "Mpesa Exjcloud", integration_request, None, status + ) + jingrow.log_error("Mpesa: Request cancelled by user") + + else: # Other failure codes + status = "Failed" + create_mpesa_request_log( + transaction_response, "Host", "Mpesa Exjcloud", integration_request, None, status + ) + jingrow.log_error(f"Mpesa: Transaction failed with ResultCode {result_code}") + return status + + +@jingrow.whitelist() +def request_for_payment(**kwargs): + """request for payments""" + team = get_current_team() + + kwargs.setdefault("team", team) + args = jingrow._dict(kwargs) + update_tax_id_or_phone_no(team, args.tax_id, args.phone_number) + + amount = args.request_amount + args.request_amount = jingrow.utils.rounded(amount, 2) + response = jingrow._dict(generate_stk_push(**args)) + handle_api_mpesa_response("CheckoutRequestID", args, response) + + return response + + +def handle_api_mpesa_response(global_id, request_dict, response): + """Response received from API calls returns a global identifier for each transaction, this code is returned during the callback.""" + # check error response + if response.requestId: + req_name = response.requestId + error = response + else: + # global checkout id used as request name + req_name = getattr(response, global_id) + error = None + + create_mpesa_request_log(request_dict, "Host", "Mpesa Exjcloud", req_name, error, output=response) + + if error: + jingrow.throw(_(response.errorMessage), title=_("Transaction Error")) + + +def create_mpesa_payment_record(transaction_response): + """Create a new entry in the Mpesa Payment Record for a successful transaction.""" + item_response = transaction_response.get("CallbackMetadata", {}).get("Item", []) + mpesa_receipt_number = fetch_param_value(item_response, "MpesaReceiptNumber", "Name") + transaction_time = fetch_param_value(item_response, "TransactionDate", "Name") + phone_number = fetch_param_value(item_response, "PhoneNumber", "Name") + transaction_id = transaction_response.get("CheckoutRequestID") + amount = fetch_param_value(item_response, "Amount", "Name") + merchant_request_id = transaction_response.get("MerchantRequestID") + info = get_details_from_request_log(transaction_id) + gateway_name = get_payment_gateway(info.partner) + # Create a new entry in M-Pesa Payment Record + data = { + "transaction_id": transaction_id, + "amount": amount, + "team": jingrow.get_value("Team", info.team, "user"), + "default_currency": "KES", + "rate": info.requested_amount, + } + mpesa_invoice, invoice_name = create_invoice_partner_site(data, gateway_name) + payment_record = jingrow.get_pg( + { + "pagetype": "Mpesa Payment Record", + "transaction_id": transaction_id, + "transaction_time": parse_datetime(transaction_time), + "transaction_type": "Mpesa Exjcloud", + "team": info.team, + "phone_number": str(phone_number), + "amount": info.requested_amount, + "grand_total": amount, + "merchant_request_id": merchant_request_id, + "payment_partner": info.partner, + "amount_usd": info.amount_usd, + "exchange_rate": info.exchange_rate, + "local_invoice": mpesa_invoice, + "mpesa_receipt_number": mpesa_receipt_number, + } + ) + payment_record.insert(ignore_permissions=True) + payment_record.submit() + """create payment partner transaction which will then create balance transaction""" + create_payment_partner_transaction( + info.team, info.partner, info.exchange_rate, info.amount_usd, info.requested_amount, gateway_name + ) + mpesa_details = { + "mpesa_receipt_number": mpesa_receipt_number, + "mpesa_merchant_id": merchant_request_id, + "mpesa_payment_record": payment_record.name, + "mpesa_request_id": transaction_id, + "mpesa_invoice": invoice_name, + } + create_balance_transaction_and_invoice(info.team, info.amount_usd, mpesa_details) + + jingrow.msgprint(_("Mpesa Payment Record entry created successfully")) + + +def create_balance_transaction_and_invoice(team, amount, mpesa_details): + balance_transaction = jingrow.get_pg( + pagetype="Balance Transaction", + team=team, + source="Prepaid Credits", + type="Adjustment", + amount=amount, + description=mpesa_details.get("mpesa_payment_record"), + paid_via_local_pg=1, + ) + balance_transaction.insert(ignore_permissions=True) + balance_transaction.submit() + + invoice = jingrow.get_pg( + pagetype="Invoice", + team=team, + type="Prepaid Credits", + status="Paid", + total=amount, + amount_due=amount, + amount_paid=amount, + amount_due_with_tax=amount, + due_date=jingrow.utils.nowdate(), + mpesa_merchant_id=mpesa_details.get("mpesa_merchant_id", ""), + mpesa_receipt_number=mpesa_details.get("mpesa_receipt_number", ""), + mpesa_request_id=mpesa_details.get("mpesa_request_id", ""), + mpesa_payment_record=mpesa_details.get("mpesa_payment_record", ""), + mpesa_invoice=mpesa_details.get("mpesa_invoice", ""), + ) + invoice.append( + "items", + { + "description": "Prepaid Credits", + "document_type": "Balance Transaction", + "document_name": balance_transaction.name, + "quantity": 1, + "rate": amount, + }, + ) + invoice.insert(ignore_permissions=True) + invoice.submit() + + +def parse_datetime(date): + from datetime import datetime + + return datetime.strptime(str(date), "%Y%m%d%H%M%S") + +@jingrow.whitelist(allow_guest=True) +def handle_alipay_notification(): + """处理支付宝支付通知回调""" + try: + # 获取支付宝通知数据 + alipay_data = jingrow.request.form.to_dict() + + # 获取必要参数 + order_id = alipay_data.get("out_trade_no") + trade_no = alipay_data.get("trade_no") + trade_status = alipay_data.get("trade_status") + team_name = urllib.parse.unquote(alipay_data.get("passback_params", "")) + + # 检查订单号是否存在 + if not order_id: + jingrow.log_error("订单号为空", "支付宝错误") + return "fail" + + # 查找支付记录 + payment_record = jingrow.db.get_value( + "Order", + {"order_id": order_id}, + ["name", "total_amount", "status", "team"], + as_dict=True + ) + + if not payment_record: + jingrow.log_error(f"未找到支付记录: {order_id}", "支付宝错误") + return "fail" + + # 检查记录状态,避免重复处理 + if payment_record.status == "交易成功": + return "success" + + # 只有交易成功时才更新状态 + if trade_status == "TRADE_SUCCESS": + # 更新订单记录状态 + jingrow.db.set_value( + "Order", + payment_record.name, + { + "status": "已支付", + "trade_no": trade_no, + "payment_method": "支付宝" + } + ) + + # 执行支付完成后的业务逻辑 + handle_order_payment_complete(order_id) + + # 立即提交数据库事务 + jingrow.db.commit() + + return "success" + + return "success" + except Exception as e: + jingrow.log_error(f"处理失败: {str(e)}\n{traceback.format_exc()}", "支付宝错误") + return "fail" + +@jingrow.whitelist(allow_guest=True) +def handle_wechatpay_notification(): + """处理微信支付通知回调""" + try: + # 获取请求数据 + headers = jingrow.local.request.headers + body = jingrow.request.get_data() + + # 初始化微信支付API + wechat_pay = WeChatPayAPI() + + # 使用SDK的方法解密回调数据 + try: + # 调用SDK提供的decrypt_callback方法解密数据 + decrypted_data = wechat_pay.wxpay.decrypt_callback(headers, body) + + # 如果返回值是字符串则解析为JSON + if isinstance(decrypted_data, str): + decrypted_data = json.loads(decrypted_data) + + # 获取关键字段 + order_id = decrypted_data.get("out_trade_no") + trade_no = decrypted_data.get("transaction_id") + trade_state = decrypted_data.get("trade_state") + total_amount = decrypted_data.get("amount", {}).get("total", 0) / 100 # 转换为元 + team_name = decrypted_data.get("attach", "") + + # 确认交易状态为成功 + if trade_state != "SUCCESS": + jingrow.log_error(f"微信支付交易状态不成功: {trade_state}", "微信支付通知") + return "SUCCESS" # 返回成功以避免微信重复通知 + + # 查询支付记录 + payment_record = jingrow.db.get_value( + "Order", + {"order_id": order_id}, + ["name", "total_amount", "status", "team"], + as_dict=True + ) + + if not payment_record: + jingrow.log_error(f"未找到支付记录: {order_id}", "微信支付错误") + return "SUCCESS" + + # 检查记录状态,避免重复处理 + if payment_record.status == "交易成功": + jingrow.log_error(f"订单已处理: {order_id}", "微信支付通知") + return "SUCCESS" + + # 更新订单记录状态 + jingrow.db.set_value( + "Order", + payment_record.name, + { + "status": "已支付", + "trade_no": trade_no, + "payment_method": "微信支付" + } + ) + + # 执行支付完成后的业务逻辑 + handle_order_payment_complete(order_id) + + # 立即提交数据库事务 + jingrow.db.commit() + + return "SUCCESS" + + except Exception as e: + jingrow.log_error( + f"处理微信支付通知数据失败: {str(e)}\n调用栈: {traceback.format_exc()}\n请求头: {headers}\n请求体: {body}", + "微信支付解密错误" + ) + return "SUCCESS" # 返回成功避免微信重复发送通知 + + except Exception as e: + jingrow.log_error( + f"处理微信支付通知失败: {str(e)}\n调用栈: {traceback.format_exc()}", + "微信支付错误" + ) + return "SUCCESS" # 返回成功避免微信重复发送通知 + + +def handle_order_payment_complete(order_id): + """处理订单支付完成后的业务逻辑""" + try: + order = jingrow.get_pg("Order", {"order_id": order_id}) + + # 根据订单类型执行不同的业务逻辑 + if order.order_type == "余额充值": + process_balance_recharge(order) + elif order.order_type == "网站续费": + process_site_renew(order_id) + + return True + except Exception as e: + jingrow.log_error( + f"处理订单 {order_id} 支付完成事件失败: {str(e)}\n{traceback.format_exc()}", + f"订单处理错误" + ) + return False + +def process_balance_recharge(order): + """处理余额充值业务逻辑""" + try: + balance_transaction = jingrow.get_pg({ + "pagetype": "Balance Transaction", + "team": order.team, + "type": "Adjustment", + "source": "Prepaid Credits", + "amount": order.total_amount, + "description": f"{order.payment_method}充值 (订单: {order.order_id})", + }) + + balance_transaction.flags.ignore_permissions = True + balance_transaction.insert() + balance_transaction.submit() + + # 更新订单状态为交易成功 + jingrow.db.set_value("Order", order.name, "status", "交易成功") + jingrow.db.commit() + + except Exception as e: + jingrow.log_error( + f"余额充值失败: 团队 {order.team}, 金额 {order.total_amount}, 错误: {str(e)}\n{traceback.format_exc()}", + "余额充值错误" + ) + raise + +def process_site_renew(order_id): + """处理网站续费,更新到期时间""" + # 获取订单文档 + order = jingrow.get_pg("Order", {"order_id": order_id}) + + # 检查订单状态,避免重复处理 + if order.status == "交易成功": + jingrow.log_error( + message=f"订单 {order_id} 已处理完成,跳过重复处理", + title="站点续费提示" + ) + return { + "name": order.title, + "url": order.title, + "new_end_date": jingrow.db.get_value("Site", order.title, "site_end_date"), + "already_processed": True + } + + # 从订单中提取信息 + site_name = order.title # 网站URL保存在订单的title字段中 + renewal_months = int(order.description) # 续费月数保存在订单的description字段中 + + # 获取站点文档 + site = jingrow.get_pg("Site", site_name) + + # 计算新的到期日期 + current_end_date = getdate(site.site_end_date or jingrow.utils.today()) + if current_end_date < getdate(jingrow.utils.today()): + current_end_date = getdate(jingrow.utils.today()) + + new_end_date = add_months(current_end_date, renewal_months) + + # 更新站点到期日期 + site.site_end_date = new_end_date + site.save(ignore_permissions=True) + + # 更新订单状态为交易成功,防止重复处理 + jingrow.db.set_value("Order", order.name, "status", "交易成功") + + # 记录成功的审计日志 + jingrow.log_error( + message=f"网站续费成功: {site_name}, 支付方式:{order.payment_method}, 订单号:{order_id}, 续费 {renewal_months} 个月, 到期日延长至 {new_end_date}", + title="网站续费成功" + ) + + return { + "name": site.name, + "url": site_name, + "new_end_date": new_end_date + } + +@jingrow.whitelist() +def check_payment_status(order_id, payment_type): + """检查支付状态""" + if payment_type == "alipay": + pagetype = "Order" + elif payment_type == "wechatpay": + pagetype = "Order" + else: + jingrow.throw("不支持的支付类型") + + team = get_current_team() + payment_record = jingrow.db.get_value( + pagetype, + {"order_id": order_id, "team": team}, + ["status", "total_amount"], + as_dict=True + ) + + if not payment_record: + return {"status": "not_found"} + + return payment_record + + +@jingrow.whitelist() +def create_alipay_order_for_recharge(amount): + """创建支付宝订单用于购买预付费信用额度""" + team = get_current_team(True) + + # 金额取整到两位小数 + total_amount = round(float(amount), 2) + + # 生成唯一订单号 + order_id = f"{jingrow.utils.now_datetime().strftime('%Y%m%d%H%M%S')}{jingrow.utils.random_string(6)}" + + # 创建订单记录 + payment_record = jingrow.get_pg({ + "pagetype": "Order", + "order_id": order_id, + "order_type": "余额充值", + "team": team.name, + "total_amount": float(total_amount), + "status": "待支付", + "payment_method": "支付宝" + }) + + payment_record.insert(ignore_permissions=True) + jingrow.db.commit() + + # 直接使用AlipayAPI类生成支付链接 + from jcloud.api.payment.alipay import AlipayAPI + api = AlipayAPI() + + try: + # 生成支付链接,使用API类中已配置的默认URL + payment_url = api.generate_payment_url( + order_id=order_id, + amount=amount, + subject="Jingrow 余额充值", + team_name=team.name + ) + + return { + "payment_url": payment_url, + "order_id": order_id, + "payment_record": payment_record.name + } + except Exception as e: + jingrow.log_error(f"创建支付宝订单失败: {str(e)}", "Order") + jingrow.throw(f"创建支付宝订单失败: {str(e)}") + + +def generate_qr_code(payment_url): + """生成二维码图片的Base64字符串""" + qr = segno.make(payment_url) + buffer = io.BytesIO() + qr.save(buffer, kind='png', scale=6) + img_str = base64.b64encode(buffer.getvalue()).decode("utf-8") + return f"data:image/png;base64,{img_str}" + + +@jingrow.whitelist() +def create_wechatpay_order_for_recharge(amount): + """创建微信支付订单用于购买预付费信用额度""" + team = get_current_team(True) + total_amount = round(float(amount), 2) + + # 生成唯一订单号 + order_id = f"{jingrow.utils.now_datetime().strftime('%Y%m%d%H%M%S')}{jingrow.utils.random_string(6)}" + + # 创建订单记录 + payment_record = jingrow.get_pg({ + "pagetype": "Order", + "order_id": order_id, + "order_type": "余额充值", + "team": team.name, + "total_amount": float(total_amount), + "status": "待支付", + "payment_method": "微信支付" + }) + + payment_record.insert(ignore_permissions=True) + jingrow.db.commit() + + # 使用WeChatPayAPI生成支付链接 + wechat_pay = WeChatPayAPI() + + try: + qr_code_url = wechat_pay.generate_payment_url( + order_id=order_id, + amount=amount, + subject="Jingrow 余额充值", + team_name=team.name + ) + + # 检查URL是否为空 + if not qr_code_url: + jingrow.log_error("微信支付URL生成为空", "微信支付错误") + + # 使用提供的函数生成二维码图片 + qr_code_image = generate_qr_code(qr_code_url) + result = { + "qr_code_url": qr_code_url, + "qr_code_image": qr_code_image, + "order_id": order_id, + "payment_record": payment_record.name + } + return result + + except Exception as e: + jingrow.log_error(f"创建微信支付订单失败: {str(e)}\n{traceback.format_exc()}", "微信支付错误") + jingrow.throw(f"创建微信支付订单失败") + + +@jingrow.whitelist() +def create_order(**kwargs): + """创建站点订单""" + try: + # 从kwargs中获取参数,更灵活,且使用正确的字段名 + title = kwargs.get('title') + description = kwargs.get('description') + total_amount = kwargs.get('total_amount') + order_type = kwargs.get('order_type') + + # 参数验证 + if not title or not description or not total_amount: + jingrow.throw("必须提供标题、描述和金额") + + # 获取当前用户团队 + team = get_current_team(True) + + # 生成唯一订单号 + order_id = f"{jingrow.utils.now_datetime().strftime('%Y%m%d%H%M%S')}{jingrow.utils.random_string(6)}" + + # 创建订单记录 + order = jingrow.get_pg({ + "pagetype": "Order", + "order_id": order_id, + "order_type": order_type, + "title": title, + "description": description, + "team": team.name, + "total_amount": float(total_amount), + "status": "待支付", + }) + + order.insert(ignore_permissions=True) + jingrow.db.commit() + + return { + "success": True, + "order": order.as_dict() + } + + except Exception as e: + jingrow.log_error(f"创建站点订单失败: {str(e)}\n{traceback.format_exc()}", "订单错误") + return { + "success": False, + "message": f"创建订单失败: {str(e)}" + } + + +@jingrow.whitelist() +def create_renewal_order(site, renewal_months=1): + """创建网站续费订单""" + try: + # 验证输入 + site_pg = jingrow.get_pg("Site", site) + site_url = f"{site_pg.subdomain}.{site_pg.domain}" + team = site_pg.team + + # 验证当前用户权限 + current_team = get_current_team(True) + if current_team.name != team: + jingrow.throw("您没有权限为此站点创建续费订单") + + # 获取当前计划 - 使用正确的字段名 plan 而非 current_plan + current_plan = jingrow.get_pg("Site Plan", site_pg.plan) + + # 计算续费金额 + renewal_months = int(renewal_months) + team_currency = jingrow.db.get_value("Team", team, "currency") + + if renewal_months == 12: + # 年付9折 + amount = round(current_plan.price_cny * 12 * 0.9) if team_currency == "CNY" else round(current_plan.price_usd * 12 * 0.9) + else: + amount = current_plan.price_cny * renewal_months if team_currency == "CNY" else current_plan.price_usd * renewal_months + + # 生成唯一订单号 + order_id = f"{jingrow.utils.now_datetime().strftime('%Y%m%d%H%M%S')}{jingrow.utils.random_string(6)}" + + # 创建订单记录 + order = jingrow.get_pg({ + "pagetype": "Order", + "order_id": order_id, + "order_type": "网站续费", + "team": team, + "status": "待支付", + "total_amount": amount, + "title": site_url, + "description": str(renewal_months) # 简单存储续费月数 + }) + + order.insert(ignore_permissions=True) + jingrow.db.commit() + + return { + "success": True, + "order": order.as_dict() + } + except Exception as e: + jingrow.log_error(f"创建续费订单失败: {str(e)}", "续费订单错误") + return { + "success": False, + "message": f"创建续费订单失败: {str(e)}" + } + +@jingrow.whitelist() +def process_balance_payment_for_order(order_id): + """使用账户余额支付订单""" + try: + # 获取当前用户团队 + team = get_current_team(True) + + # 获取订单信息 + order = jingrow.get_pg("Order", {"order_id": order_id}) + if not order: + jingrow.throw(f"找不到订单: {order_id}") + + # 验证订单是否属于当前团队 + if order.team != team.name: + jingrow.throw("您没有权限支付此订单") + + # 检查订单状态 + if order.status != "待支付": + return { + "success": False, + "message": "该订单已支付或已取消" + } + + # 使用 Team 类的 get_balance 方法获取余额 + balance = team.get_balance() + + # 检查余额是否足够 + if balance < order.total_amount: + return { + "success": False, + "message": "余额不足" + } + + # 创建余额交易记录(扣款) + balance_transaction = jingrow.get_pg({ + "pagetype": "Balance Transaction", + "team": team.name, + "type": "Adjustment", + "source": "Prepaid Credits", + "amount": -1 * float(order.total_amount), # 使用负数表示扣减 + "description": f"新建网站: {order.title}", + "paid_via_local_pg": 1 + }) + balance_transaction.flags.ignore_permissions = True + balance_transaction.insert() + balance_transaction.submit() + + # 更新订单状态 + order.status = "已支付" + order.payment_method = "余额支付" + order.save(ignore_permissions=True) + jingrow.db.commit() + + return { + "status": "Success", + "message": "支付成功", + "order": order.as_dict() + } + + except Exception as e: + jingrow.log_error(f"余额支付失败: {str(e)}\n{traceback.format_exc()}", "支付错误") + return { + "status": "Error", + "message": f"余额支付失败: {str(e)}" + } + +@jingrow.whitelist() +def process_balance_payment_for_renew_order(order_id): + """使用账户余额支付网站续费订单""" + try: + # 获取当前用户团队 + team = get_current_team(True) + + # 获取订单信息 + order = jingrow.get_pg("Order", {"order_id": order_id}) + if not order: + jingrow.throw(_(f"找不到订单: {order_id}")) + + # 验证订单是否属于当前团队 + if order.team != team.name: + jingrow.throw(_("您没有权限支付此订单")) + + # 验证订单类型 + if order.order_type != "网站续费": + jingrow.throw(_("此订单不是网站续费订单")) + + # 检查订单状态 + if order.status != "待支付": + return { + "success": False, + "message": _("该订单已支付或已取消") + } + + # 使用 Team 类的 get_balance 方法获取余额 + balance = team.get_balance() + + # 检查余额是否足够 + if balance < order.total_amount: + return { + "success": False, + "message": _("余额不足") + } + + # 开始数据库事务 + jingrow.db.begin() + + try: + # 创建余额交易记录(扣款) + balance_transaction = jingrow.get_pg({ + "pagetype": "Balance Transaction", + "team": team.name, + "type": "Adjustment", + "source": "Prepaid Credits", + "amount": -1 * float(order.total_amount), # 使用负数表示扣减 + "description": f"网站续费: {order.title}", + "paid_via_local_pg": 1 + }) + balance_transaction.flags.ignore_permissions = True + balance_transaction.insert() + balance_transaction.submit() + + # 更新订单状态 + order.status = "已支付" + order.payment_method = "余额支付" + order.save(ignore_permissions=True) + + # 提取网站URL和续费周期 + site_name = order.title # 网站URL就是name字段 + renewal_months = int(order.description) + + # 调用网站续期处理函数 + renew_result = process_site_renew(order_id) + + # 提交数据库事务 + jingrow.db.commit() + + return { + "success": True, + "status": "Success", + "message": _("支付成功,网站已续费"), + "order": order.as_dict(), + "site": renew_result + } + + except Exception as inner_error: + # 回滚事务 + jingrow.db.rollback() + raise inner_error + + except Exception as e: + jingrow.log_error( + message=f"余额支付续费失败: {str(e)}\n{traceback.format_exc()}", + title="续费支付错误" + ) + return { + "success": False, + "status": "Error", + "message": _(f"余额支付失败: {str(e)}") + } + +@jingrow.whitelist() +def process_alipay_order(order_id): + """创建支付宝订单支付链接""" + team = get_current_team(True) + + # 获取订单信息 + order = jingrow.get_pg("Order", {"order_id": order_id}) + if not order: + jingrow.throw(f"找不到订单: {order_id}") + + # 验证订单是否属于当前团队 + if order.team != team.name: + jingrow.throw("您没有权限支付此订单") + + # 检查订单状态 + if order.status != "待支付": + jingrow.throw("该订单已支付或已取消") + + # 金额取整到两位小数 + amount = round(float(order.total_amount), 2) + + # 直接使用AlipayAPI类生成支付链接 + from jcloud.api.payment.alipay import AlipayAPI + api = AlipayAPI() + + try: + # 生成支付链接 + payment_url = api.generate_payment_url( + order_id=order_id, + amount=amount, + subject=order.title or "Jingrow 站点订单", + team_name=team.name + ) + + # 更新订单支付方式 + order.payment_method = "支付宝" + order.save(ignore_permissions=True) + + return { + "payment_url": payment_url, + "order_id": order_id, + "success": True + } + except Exception as e: + jingrow.log_error(f"创建支付宝订单失败: {str(e)}", "Order") + jingrow.throw(f"创建支付宝订单失败: {str(e)}") + +@jingrow.whitelist() +def process_wechatpay_order(order_id): + """创建微信支付订单""" + try: + # 获取当前用户团队 + team = get_current_team(True) + + # 获取订单信息 + order = jingrow.get_pg("Order", {"order_id": order_id}) + if not order: + jingrow.throw(f"找不到订单: {order_id}") + + # 验证订单是否属于当前团队 + if order.team != team.name: + jingrow.throw("您没有权限支付此订单") + + # 检查订单状态 + if order.status != "待支付": + jingrow.throw("该订单已支付或已取消") + + # 获取金额 + amount = order.total_amount + + # 创建微信支付客户端 + wechat_pay = WeChatPayAPI() + + try: + # 生成支付URL + qr_code_url = wechat_pay.generate_payment_url( + order_id=order_id, + amount=amount, + subject=order.title or "Jingrow 站点订单", + team_name=team.name + ) + + # 检查URL是否为空 + if not qr_code_url: + jingrow.log_error("微信支付URL生成为空", "微信支付错误") + jingrow.throw("生成支付URL失败") + + # 生成二维码图片 + qr_code_image = generate_qr_code(qr_code_url) + + # 更新订单支付方式 + order.payment_method = "微信支付" + order.save(ignore_permissions=True) + + return { + "payment_url": qr_code_url, + "qr_code_image": qr_code_image, + "order_id": order_id, + "success": True + } + + except Exception as e: + jingrow.log_error(f"创建微信支付订单失败: {str(e)}\n{traceback.format_exc()}", "微信支付错误") + jingrow.throw(f"创建微信支付订单失败") + + except Exception as e: + jingrow.log_error(f"创建微信支付订单失败: {str(e)}\n{traceback.format_exc()}", "微信支付错误") + jingrow.throw(f"创建微信支付订单失败") + +@jingrow.whitelist() +def check_site_order_payment_status(order_id): + """检查订单支付状态""" + try: + # 获取订单信息 + order = jingrow.get_pg("Order", {"order_id": order_id}) + if not order: + jingrow.throw(f"找不到订单: {order_id}") + + return { + "success": True, + "status": order.status, + "order": order.as_dict() + } + + except Exception as e: + jingrow.log_error(f"检查订单状态失败: {str(e)}\n{traceback.format_exc()}", "订单错误") + return { + "success": False, + "message": f"检查订单状态失败: {str(e)}" + } + +@jingrow.whitelist() +def get_orders(page=1, page_size=20, search=None): + + try: + # 获取当前用户团队 + team = get_current_team(True) + + # 构建过滤条件 + filters = {"team": team.name} + + # 添加搜索条件 + if search: + filters = [ + ["Order", "team", "=", team.name], + [ + "Order", + "name|order_id|title|description", + "like", + f"%{search}%" + ] + ] + + # 计算分页参数 + page = int(page) + page_size = int(page_size) + start = (page - 1) * page_size + + # 获取订单总数 + total_count = jingrow.db.count("Order", filters=filters) + + # 获取分页后的订单列表 - 根据需求修改字段列表 + orders = jingrow.get_all( + "Order", + filters=filters, + fields=[ + "title", + "order_id", + "trade_no", + "order_type", + "payment_method", + "description", + "total_amount as total", + "status", + "creation" + ], + order_by="creation desc", + start=start, + limit=page_size + ) + + # 记录日志以便调试 + jingrow.logger().info(f"获取订单成功: 团队={team.name}, 总数={total_count}") + + return { + "orders": orders, + "total": total_count + } + + except Exception as e: + jingrow.log_error(f"获取订单列表失败: {str(e)}\n{traceback.format_exc()}", "订单列表错误") + return { + "orders": [], + "total": 0, + "error": str(e) + } + +@jingrow.whitelist() +def get_order_details(name): + """ + 获取订单详情 + + 参数: + name (str): 订单名称 + + 返回: + dict: 包含订单详情 + """ + try: + # 获取当前用户团队 + team = get_current_team(True) + + # 获取订单详情 + order = jingrow.get_pg("Order", name) + + # 验证订单是否属于当前团队 + if order.team != team.name: + jingrow.throw("您无权查看此订单") + + # 构建返回数据 + order_dict = order.as_dict() + + return { + "order": order_dict + } + + except Exception as e: + jingrow.log_error(f"获取订单详情失败: {str(e)}\n{traceback.format_exc()}", "订单详情错误") + return { + "error": str(e) + } + +@jingrow.whitelist() +def get_balance_transactions(page=1, page_size=20, search=None): + """ + 获取余额变动记录列表 + + 参数: + page (int): 页码,默认为1 + page_size (int): 每页记录数,默认为20 + search (str): 搜索关键词,可选 + + 返回: + dict: 包含transactions列表和total总数 + """ + try: + # 获取当前用户团队 + team = get_current_team(True) + + # 构建基础过滤条件 + filters = {"team": team.name, "docstatus": 1} # 已提交的文档 + + # 添加搜索条件(如果提供) + if search: + filters = [ + ["Balance Transaction", "team", "=", team.name], + ["Balance Transaction", "docstatus", "=", 1], + [ + "Balance Transaction", + "description|source|type|invoice", + "like", + f"%{search}%" + ] + ] + + # 计算分页参数 + page = int(page) + page_size = int(page_size) + start = (page - 1) * page_size + + # 获取总记录数 + total_count = jingrow.db.count("Balance Transaction", filters=filters) + + # 获取分页数据 + transactions = jingrow.get_all( + "Balance Transaction", + filters=filters, + fields=[ + "name", + "creation", + "description", + "amount", + "ending_balance", + "type", + "source", + "invoice" + ], + order_by="creation desc", + start=start, + limit=page_size + ) + + return { + "transactions": transactions, + "total": total_count + } + + except Exception as e: + jingrow.log_error(f"获取余额记录失败: {str(e)}\n{traceback.format_exc()}", "余额记录错误") + return { + "transactions": [], + "total": 0, + "error": str(e) + } diff --git a/jcloud/api/central.py b/jcloud/api/central.py new file mode 100644 index 0000000..7f5bcef --- /dev/null +++ b/jcloud/api/central.py @@ -0,0 +1,175 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.core.utils import find +from jingrow.geo.country_info import get_country_timezone_info + +from jcloud.api.account import get_account_request_from_key +from jcloud.jcloud.pagetype.site.jerp_site import ( + JERPSite, + get_jerp_domain, + get_jerp_plan, +) +from jcloud.jcloud.pagetype.site.pool import get as get_pooled_site +from jcloud.jcloud.pagetype.team.team import Team + + +@jingrow.whitelist(allow_guest=True) +def account_request( + subdomain, email, first_name, last_name, phone_number, country, url_args=None +): + email = email.strip().lower() + jingrow.utils.validate_email_address(email, True) + + if not check_subdomain_availability(subdomain): + jingrow.throw(f"Subdomain {subdomain} is already taken") + + all_countries = jingrow.db.get_all("Country", pluck="name") + country = find(all_countries, lambda x: x.lower() == country.lower()) + if not country: + jingrow.throw("Country filed should be a valid country name") + + account_request = jingrow.get_pg( + { + "pagetype": "Account Request", + "jerp": True, + "subdomain": subdomain, + "email": email, + "role": "Jcloud Admin", + "first_name": first_name, + "last_name": last_name, + "phone_number": phone_number, + "country": country, + "url_args": url_args, + "send_email": True, + } + ).insert(ignore_permissions=True) + + current_user = jingrow.session.user + current_session_data = jingrow.session.data + jingrow.set_user("Administrator") + + try: + pooled_site = get_pooled_site() + if pooled_site: + # Rename a standby site + JERPSite(site=pooled_site).rename_pooled_site(account_request) + else: + # Create a new site if pooled sites aren't available + site = JERPSite(account_request=account_request).insert(ignore_permissions=True) + site.create_subscription(get_jerp_plan()) + finally: + jingrow.set_user(current_user) + jingrow.session.data = current_session_data + + +@jingrow.whitelist(allow_guest=True) +def setup_account(key, business_data=None): + account_request = get_account_request_from_key(key) + if not account_request: + jingrow.throw("Invalid or Expired Key") + + jingrow.set_user("Administrator") + + if business_data: + business_data = jingrow.parse_json(business_data) + + if isinstance(business_data, dict): + business_data = { + key: business_data.get(key) + for key in [ + "company", + "no_of_employees", + "industry", + "no_of_users", + "designation", + "referral_source", + "agreed_to_partner_consent", + ] + } + + account_request.update(business_data) + account_request.save(ignore_permissions=True) + + email = account_request.email + + if not jingrow.db.exists("Team", email): + team_pg = Team.create_new( + account_request, + account_request.first_name, + account_request.last_name, + country=account_request.country, + via_jerp=True, + ) + else: + team_pg = jingrow.get_pg("Team", email) + + site_name = jingrow.db.get_value("Site", {"account_request": account_request.name}) + site = jingrow.get_pg("Site", site_name) + site.team = team_pg.name + site.save() + + subscription = site.subscription + if subscription: + subscription.team = team_pg.name + subscription.save() + + jingrow.set_user(team_pg.user) + jingrow.local.login_manager.login_as(team_pg.user) + + return site.name + + +@jingrow.whitelist(allow_guest=True) +def check_subdomain_availability(subdomain): + exists = bool( + jingrow.db.exists( + "Site", + { + "subdomain": subdomain, + "domain": get_jerp_domain(), + "status": ("!=", "Archived"), + }, + ) + ) + if exists: + return False + + return True + + +@jingrow.whitelist(allow_guest=True) +def options_for_regional_data(key): + account_request = get_account_request_from_key(key) + if not account_request: + jingrow.throw("Invalid or Expired Key") + + data = { + "languages": jingrow.db.get_all("Language", ["language_name", "language_code"]), + "currencies": jingrow.db.get_all("Currency", pluck="name"), + "country": account_request.country, + } + data.update(get_country_timezone_info()) + + return data + + +@jingrow.whitelist(allow_guest=True) +def get_trial_end_date(site): + if not site or not isinstance(site, str): + jingrow.throw("Invalid Site") + + return jingrow.db.get_value("Site", site, "trial_end_date") + + +@jingrow.whitelist(allow_guest=True) +def send_login_link(site): + if not site or not isinstance(site, str) or not jingrow.db.exists("Site", site): + jingrow.throw("Invalid site") + + from jcloud.api.account import send_login_link as send_link + + # send link to site owner + email = jingrow.db.get_value("Site", site, "team") + send_link(email) diff --git a/jcloud/api/client.py b/jcloud/api/client.py new file mode 100644 index 0000000..13c7a4a --- /dev/null +++ b/jcloud/api/client.py @@ -0,0 +1,538 @@ +# Copyright (c) 2023, JINGROW +# MIT License. See license.txt + +from __future__ import annotations + +import inspect +import typing + +import jingrow +from jingrow.client import set_value as _set_value +from jingrow.handler import run_pg_method as _run_pg_method +from jingrow.model import child_table_fields, default_fields +from jingrow.model.base_document import get_controller +from jingrow.utils import cstr +from pypika.queries import QueryBuilder + +from jcloud.exceptions import TeamHeaderNotInRequestError +from jcloud.utils import has_role + +if typing.TYPE_CHECKING: + from jingrow.model.meta import Meta + +ALLOWED_DOCTYPES = [ + "Site", + "Site App", + "Site Domain", + "Site Backup", + "Site Activity", + "Site Config", + "Site Plan", + "Site Update", + "Site Group Deploy", + "Invoice", + "Balance Transaction", + "Stripe Payment Method", + "Bench", + "Bench App", + "Bench Dependency Version", + "Release Group", + "Release Group App", + "Release Group Dependency", + "Cluster", + "Jcloud Permission Group", + "Jcloud Role", + "Jcloud Role Permission", + "Team", + "Product Trial Request", + "Deploy Candidate", + "Deploy Candidate Difference", + "Deploy Candidate Difference App", + "Agent Job", + "Agent Job Type", + "Common Site Config", + "Server", + "Database Server", + "Ansible Play", + "Server Plan", + "Release Group Variable", + "Resource Tag", + "Jcloud Tag", + "Partner Approval Request", + "Marketplace App", + "Subscription", + "Marketplace App Version", + "Marketplace App Plan", + "App Release", + "Payout Order", + "App Patch", + "Product Trial", + "Jcloud Notification", + "User SSH Key", + "Jingrow Version", + "Dashboard Banner", + "App Release Approval Request", + "Jcloud Webhook", + "SQL Playground Log", + "Site Database User", + "Jcloud Settings", + "Mpesa Payment Record", +] + +ALLOWED_DOCTYPES_FOR_SUPPORT = [ + "Site", + "Bench", + "Release Group", +] + +whitelisted_methods = set() + + +@jingrow.whitelist() +def get_list( + pagetype: str, + fields: list | None = None, + filters: dict | None = None, + order_by: str | None = None, + start: int = 0, + limit: int = 20, + parent: str | None = None, + debug: bool = False, +): + if filters is None: + filters = {} + + # these doctypes doesn't have a team field to filter by but are used in get or run_pg_method + if pagetype in ["Team", "User SSH Key"]: + return [] + + check_permissions(pagetype) + valid_fields = validate_fields(pagetype, fields) + valid_filters = validate_filters(pagetype, filters) + + meta = jingrow.get_meta(pagetype) + if meta.istable and not (filters.get("parenttype") and filters.get("parent")): + jingrow.throw("parenttype and parent are required to get child records") + + apply_team_filter = not ( + filters.get("skip_team_filter_for_system_user_and_support_agent") + and (jingrow.local.system_user() or has_role("Jcloud Support Agent")) + ) + if apply_team_filter and meta.has_field("team"): + valid_filters.team = jingrow.local.team().name + + query = get_list_query( + pagetype, + meta, + filters, + valid_filters, + valid_fields, + start, + limit, + order_by, + ) + filters = jingrow._dict(filters or {}) + list_args = dict( + fields=fields, + filters=filters, + order_by=order_by, + start=start, + limit=limit, + parent=parent, + debug=debug, + ) + query = apply_custom_filters(pagetype, query, **list_args) + if isinstance(query, QueryBuilder): + return query.run(as_dict=1, debug=debug) + + if isinstance(query, list): + return query + + return [] + + +def get_list_query( + pagetype: str, + meta: "Meta", + filters: dict, + valid_filters: jingrow._dict, + valid_fields: list | None, + start: int, + limit: int, + order_by: str | None, +): + from jcloud.jcloud.pagetype.jcloud_role.jcloud_role import check_role_permissions + + query = jingrow.qb.get_query( + pagetype, + filters=valid_filters, + fields=valid_fields, + offset=start, + limit=limit, + order_by=order_by, + ) + + if meta.istable and jingrow.get_meta(filters.get("parenttype")).has_field("team"): + ParentDocType = jingrow.qb.PageType(filters.get("parenttype")) + ChildDocType = jingrow.qb.PageType(pagetype) + + query = ( + query.join(ParentDocType) + .on(ParentDocType.name == ChildDocType.parent) + .where(ParentDocType.team == jingrow.local.team().name) + ) + + if roles := check_role_permissions(pagetype): + JcloudRolePermission = jingrow.qb.PageType("Jcloud Role Permission") + QueriedDocType = jingrow.qb.PageType(pagetype) + + field = pagetype.lower().replace(" ", "_") + query = ( + query.join(JcloudRolePermission) + .on(JcloudRolePermission[field] == QueriedDocType.name & JcloudRolePermission.role.isin(roles)) + .distinct() + ) + + return query + + +@jingrow.whitelist() +def get(pagetype, name): + from jcloud.jcloud.pagetype.jcloud_role.jcloud_role import check_role_permissions + + check_permissions(pagetype) + try: + pg = jingrow.get_pg(pagetype, name) + except jingrow.DoesNotExistError: + controller = get_controller(pagetype) + if hasattr(controller, "on_not_found"): + return controller.on_not_found(name) + raise + + if ( + not (jingrow.local.system_user() or has_role("Jcloud Support Agent")) + and jingrow.get_meta(pagetype).has_field("team") + and pg.team != jingrow.local.team().name + ): + raise_not_permitted() + + check_role_permissions(pagetype, name) + + fields = tuple(default_fields) + if hasattr(pg, "dashboard_fields"): + fields += tuple(pg.dashboard_fields) + + _pg = jingrow._dict() + for fieldname in fields: + _pg[fieldname] = pg.get(fieldname) + + if hasattr(pg, "get_pg"): + result = pg.get_pg(_pg) + if isinstance(result, dict): + _pg.update(result) + + return _pg + + +@jingrow.whitelist(methods=["POST", "PUT"]) +def insert(pg=None): + if not pg or not pg.get("pagetype"): + jingrow.throw(jingrow._("pg.pagetype is required")) + + check_permissions(pg.get("pagetype")) + + pg = jingrow._dict(pg) + if jingrow.is_table(pg.pagetype): + if not (pg.parenttype and pg.parent and pg.parentfield): + jingrow.throw(jingrow._("Parenttype, Parent and Parentfield are required to insert a child record")) + + # inserting a child record + parent = jingrow.get_pg(pg.parenttype, pg.parent) + + if jingrow.get_meta(parent.pagetype).has_field("team") and parent.team != jingrow.local.team().name: + raise_not_permitted() + + parent.append(pg.parentfield, pg) + parent.save() + return get(parent.pagetype, parent.name) + + _pg = jingrow.get_pg(pg) + + if jingrow.get_meta(pg.pagetype).has_field("team"): + if not _pg.team: + # set team if not set + _pg.team = jingrow.local.team().name + if not jingrow.local.system_user(): + # don't allow dashboard user to set any other team + _pg.team = jingrow.local.team().name + _pg.insert() + return get(_pg.pagetype, _pg.name) + + +@jingrow.whitelist(methods=["POST", "PUT"]) +def set_value(pagetype: str, name: str, fieldname: dict | str, value: str | None = None): + check_permissions(pagetype) + check_document_access(pagetype, name) + + for field in fieldname: + # fields mentioned in dashboard_fields are allowed to be set via set_value + is_allowed_field(pagetype, field) + + _set_value(pagetype, name, fieldname, value) + + # jingrow set_value returns just the pg and not jcloud's overriden `get_pg` + return get(pagetype, name) + + +@jingrow.whitelist(methods=["DELETE", "POST"]) +def delete(pagetype: str, name: str): + method = "delete" + + check_permissions(pagetype) + check_document_access(pagetype, name) + check_dashboard_actions(pagetype, name, method) + + _run_pg_method(dt=pagetype, dn=name, method=method, args=None) + + +@jingrow.whitelist() +def run_pg_method(dt: str, dn: str, method: str, args: dict | None = None): + check_permissions(dt) + check_document_access(dt, dn) + check_dashboard_actions(dt, dn, method) + + _run_pg_method( + dt=dt, + dn=dn, + method=method, + args=fix_args(method, args), + ) + jingrow.response.docs = [get(dt, dn)] + + +@jingrow.whitelist() +def search_link( + pagetype: str, + query: str | None = None, + filters: dict | None = None, + order_by: str | None = None, + page_length: int | None = None, +): + check_permissions(pagetype) + if pagetype == "Team" and not jingrow.local.system_user(): + raise_not_permitted() + + meta = jingrow.get_meta(pagetype) + PageType = jingrow.qb.PageType(pagetype) + valid_filters = validate_filters(pagetype, filters) + valid_fields = validate_fields(pagetype, ["name", meta.title_field or "name"]) + q = get_list_query( + pagetype, + meta, + filters, + valid_filters, + valid_fields, + 0, + page_length or 10, + order_by or "modified desc", + ) + q = q.select(PageType.name.as_("value")) + if meta.title_field: + q = q.select(PageType[meta.title_field].as_("label")) + if meta.has_field("enabled"): + q = q.where(PageType.enabled == 1) + if meta.has_field("disabled"): + q = q.where(PageType.disabled != 1) + if meta.has_field("team") and (not jingrow.local.system_user() or 1): + q = q.where(PageType.team == jingrow.local.team().name) + if query: + condition = PageType.name.like(f"%{query}%") + if meta.title_field: + condition = condition | PageType[meta.title_field].like(f"%{query}%") + q = q.where(condition) + return q.run(as_dict=1) + + +def check_document_access(pagetype: str, name: str): + if jingrow.local.system_user(): + return + + if has_role("Jcloud Support Agent") and pagetype in ALLOWED_DOCTYPES_FOR_SUPPORT: + return + + team = "" + meta = jingrow.get_meta(pagetype) + if meta.has_field("team"): + team = jingrow.db.get_value(pagetype, name, "team") + elif meta.has_field("bench"): + bench = jingrow.db.get_value(pagetype, name, "bench") + team = jingrow.db.get_value("Bench", bench, "team") + elif meta.has_field("group"): + group = jingrow.db.get_value(pagetype, name, "group") + team = jingrow.db.get_value("Release Group", group, "team") + else: + return + + if team == jingrow.local.team().name: + return + + raise_not_permitted() + + +def check_dashboard_actions(pagetype, name, method): + pg = jingrow.get_pg(pagetype, name) + method_obj = getattr(pg, method) + fn = getattr(method_obj, "__func__", method_obj) + + if fn not in whitelisted_methods: + raise_not_permitted() + + +def apply_custom_filters(pagetype, query, **list_args): + """Apply custom filters to query""" + controller = get_controller(pagetype) + if hasattr(controller, "get_list_query"): + if inspect.getfullargspec(controller.get_list_query).varkw: + return controller.get_list_query(query, **list_args) + return controller.get_list_query(query) + + return query + + +def validate_filters(pagetype, filters): + """Filter filters based on permissions""" + if not filters: + filters = {} + + out = jingrow._dict() + for fieldname, value in filters.items(): + if is_allowed_field(pagetype, fieldname): + out[fieldname] = value + + return out + + +def validate_fields(pagetype, fields): + """Filter fields based on permissions""" + if not fields: + return fields + + filtered_fields = [] + for field in fields: + if is_allowed_field(pagetype, field): + filtered_fields.append(field) + + return filtered_fields + + +def is_allowed_field(pagetype, field): + """Check if field is valid""" + if not field: + return False + + controller = get_controller(pagetype) + dashboard_fields = getattr(controller, "dashboard_fields", ()) + + if field in dashboard_fields: + return True + + if "." in field and is_allowed_linked_field(pagetype, field): + return True + + if isinstance(field, dict) and is_allowed_table_field(pagetype, field): + return True + + if field in [*default_fields, *child_table_fields]: + return True + + return False + + +def is_allowed_linked_field(pagetype, field): + linked_field = linked_field_fieldname = None + if " as " in field: + linked_field, _ = field.split(" as ") + else: + linked_field = field + + linked_field, linked_field_fieldname = linked_field.split(".") + if not is_allowed_field(pagetype, linked_field): + return False + + linked_field_pagetype = jingrow.get_meta(pagetype).get_field(linked_field).options + if not is_allowed_field(linked_field_pagetype, linked_field_fieldname): + return False + + return True + + +def is_allowed_table_field(pagetype, field): + for table_fieldname, table_fields in field.items(): + if not is_allowed_field(pagetype, table_fieldname): + return False + + table_pagetype = jingrow.get_meta(pagetype).get_field(table_fieldname).options + for table_field in table_fields: + if not is_allowed_field(table_pagetype, table_field): + return False + return True + + +def check_permissions(pagetype): + if pagetype not in ALLOWED_DOCTYPES: + raise_not_permitted() + + if not hasattr(jingrow.local, "team") or not jingrow.local.team(): + jingrow.throw( + "current_team is not set. Use X-JCLOUD-TEAM header in the request to set it.", + TeamHeaderNotInRequestError, + ) + + return True + + +def is_owned_by_team(pagetype, docname, raise_exception=True): + if not jingrow.local.team(): + return False + + docname = cstr(docname) + owned = jingrow.db.get_value(pagetype, docname, "team") == jingrow.local.team().name + if not owned and raise_exception: + raise_not_permitted() + return owned + + +def raise_not_permitted(): + jingrow.throw("不允许", jingrow.PermissionError) + + +def dashboard_whitelist(allow_guest=False, xss_safe=False, methods=None): + def wrapper(func): + global whitelisted_methods + + decorated_func = jingrow.whitelist(allow_guest=allow_guest, xss_safe=xss_safe, methods=methods)(func) + + def inner(*args, **kwargs): + return decorated_func(*args, **kwargs) + + whitelisted_methods.add(decorated_func) + return decorated_func + + return wrapper + + +def fix_args(method, args): + # This is a fixer function. Certain callers of `run_pg_method` + # pass duplicates of the passed kwargs in the `args` arg. + # + # This causes "got multiple values for argument 'method'" + if not isinstance(args, dict): + return args + + # Even if it doesn't match it'll probably throw + # down the call stack, but in that case it's unexpected + # behavior and so it's better to error-out. + if args.get("method") == method: + del args["method"] + + return args diff --git a/jcloud/api/config.py b/jcloud/api/config.py new file mode 100644 index 0000000..ce5b03e --- /dev/null +++ b/jcloud/api/config.py @@ -0,0 +1,26 @@ +import jingrow + +from jcloud.utils import get_client_blacklisted_keys + + +@jingrow.whitelist() +def standard_keys(): + return jingrow.get_all( + "Site Config Key", + fields=["`key`", "title", "type", "description"], + filters={"internal": False}, + ) + + +@jingrow.whitelist() +def is_valid(keys): + keys = jingrow.parse_json(keys) + + invalid = [] + blacklisted = get_client_blacklisted_keys() + + for key in keys: + if key in blacklisted: + invalid.append(key) + + return set(invalid) diff --git a/jcloud/api/cookies.py b/jcloud/api/cookies.py new file mode 100644 index 0000000..fc2de86 --- /dev/null +++ b/jcloud/api/cookies.py @@ -0,0 +1,45 @@ +import datetime +import json +from urllib.parse import unquote + +import jingrow +from jingrow.auth import CookieManager +from jingrow.oauth import get_cookie_dict_from_headers + + +@jingrow.whitelist(allow_guest=True) +def update_preferences(preferences): + preferences_dict = json.loads(preferences) + + if not jingrow.local.cookie_manager: + jingrow.local.cookie_manager = CookieManager() + + cookie_manager = jingrow.local.cookie_manager + cookie_perms = get_cookie_dict_from_headers(jingrow.local.request).get("cookie_perms") + + if cookie_perms: + cookie_perms = json.loads(unquote(cookie_perms.value)) + + # If was disabled before, now enabled or vice-versa + if cookie_perms.get("analytics") != preferences_dict.get("analytics"): + log_cookie_consent(preferences_dict) + else: + # Enabled for the first time + if preferences_dict.get("analytics"): + log_cookie_consent(preferences_dict) + + # Update the cookie + expires = datetime.datetime.now() + datetime.timedelta(days=180) + cookie_manager.set_cookie("cookie_perms", preferences, expires=expires) + + +def log_cookie_consent(preferences): + jingrow.get_pg( + { + "pagetype": "Cookie Preference Log", + "ip_address": jingrow.local.request_ip, + "agreed_to_analytics_cookies": preferences.get("analytics"), + "agreed_to_functionality_cookies": preferences.get("functionality"), + "agreed_to_performance_cookies": preferences.get("performance"), + } + ).insert(ignore_permissions=True) diff --git a/jcloud/api/dashboard.py b/jcloud/api/dashboard.py new file mode 100644 index 0000000..d6cccb5 --- /dev/null +++ b/jcloud/api/dashboard.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + +from jcloud.api.site import protected +from jcloud.utils import get_current_team + + +@jingrow.whitelist() +def all(): + sites = jingrow.get_list( + "Site", + fields=["count(1) as count", "status"], + order_by="creation desc", + group_by="status", + ) + return {"sites": sites} + + +@jingrow.whitelist() +@protected(["Site", "Release Group", "Server", "Database Server"]) +def create_new_tag(name, pagetype, tag): + team = get_current_team() + if jingrow.db.exists("Jcloud Tag", {"tag": tag, "pagetype_name": pagetype, "team": team}): + jingrow.throw(f"Tag '{tag}' already exists") + tag = jingrow.get_pg( + { + "pagetype": "Jcloud Tag", + "pagetype_name": pagetype, + "team": team, + "tag": tag, + } + ).insert(ignore_permissions=True) + pg = jingrow.get_pg(pagetype, name).append("tags", {"tag": tag}) + pg.save() + return tag + + +@jingrow.whitelist() +@protected(["Site", "Release Group", "Server", "Database Server"]) +def add_tag(name, pagetype, tag): + pg = jingrow.get_pg(pagetype, name) + pg.append("tags", {"tag": tag}) + pg.save() + return tag + + +@jingrow.whitelist() +@protected(["Site", "Release Group", "Server", "Database Server"]) +def remove_tag(name, pagetype, tag): + pg = jingrow.get_pg(pagetype, name) + pg.tags = [resource_tag for resource_tag in pg.tags if resource_tag.tag != tag] + pg.save() + return tag diff --git a/jcloud/api/developer/__init__.py b/jcloud/api/developer/__init__.py new file mode 100644 index 0000000..f8c06ca --- /dev/null +++ b/jcloud/api/developer/__init__.py @@ -0,0 +1,9 @@ +import jingrow + + +class InvalidSecretKeyError(Exception): + http_status_code = 401 + + +def raise_invalid_key_error(): + jingrow.throw("Please provide a valid secret key.", InvalidSecretKeyError) diff --git a/jcloud/api/developer/marketplace.py b/jcloud/api/developer/marketplace.py new file mode 100644 index 0000000..0459c74 --- /dev/null +++ b/jcloud/api/developer/marketplace.py @@ -0,0 +1,234 @@ +from typing import Dict, List + +import jingrow +from jingrow.utils import get_url + +from jcloud.api.developer import raise_invalid_key_error +from jcloud.api.site import get_plans as get_site_plans +from jcloud.utils.telemetry import capture + + +class DeveloperApiHandler: + def __init__(self, secret_key: str) -> None: + self.secret_key = secret_key + self.validate_secret_key() + + def validate_secret_key(self): + """Validate secret_key and set app subscription name and pg""" + + if not self.secret_key or not isinstance(self.secret_key, str): + raise_invalid_key_error() + + app_subscription_name = jingrow.db.exists( + "Subscription", {"secret_key": self.secret_key, "enabled": 1} + ) + + if not app_subscription_name: + raise_invalid_key_error() + + self.app_subscription_name = app_subscription_name + self.set_subscription_pg() + + def set_subscription_pg(self): + """To be called after `secret_key` validation""" + self.app_subscription_pg = jingrow.get_pg("Subscription", self.app_subscription_name) + + def get_subscription_status(self) -> str: + return self.app_subscription_pg.status + + def get_subscription_info(self) -> Dict: + """Important rule for security: Send info back carefully""" + app_subscription_dict = self.app_subscription_pg.as_dict() + fields_to_send = [ + "document_name", + "enabled", + "plan", + "site", + ] + + filtered_dict = { + x: app_subscription_dict[x] for x in app_subscription_dict if x in fields_to_send + } + + return filtered_dict + + def get_subscription(self) -> Dict: + team = self.app_subscription_pg.team + with SessionManager(team) as _: + currency, address = jingrow.db.get_value( + "Team", team, ["currency", "billing_address"] + ) + team_pg = jingrow.get_pg("Team", team) + response = { + "currency": currency, + "address": jingrow.db.get_value( + "Address", + address, + ["address_line1", "city", "state", "country", "pincode"], + as_dict=True, + ) + if address + else {}, + "team": self.app_subscription_pg.team, + "countries": jingrow.db.get_all("Country", pluck="name"), + "plans": get_site_plans(), + "has_billing_info": ( + team_pg.default_payment_method + or team_pg.get_balance() > 0 + or team_pg.free_account + ), + "current_plan": jingrow.db.get_value("Site", self.app_subscription_pg.site, "plan"), + } + + capture("attempted", "fc_subscribe", team) + return response + + def update_billing_info(self, data: Dict) -> str: + team = self.app_subscription_pg.team + with SessionManager(team) as _: + team_pg = jingrow.get_pg("Team", team) + team_pg.update_billing_details(data) + + capture("updated_address", "fc_subscribe", team) + return "success" + + def get_publishable_key_and_setup_intent(self): + with SessionManager(self.app_subscription_pg.team) as _: + from jcloud.api.billing import get_publishable_key_and_setup_intent + + return get_publishable_key_and_setup_intent() + + def setup_intent_success(self, setup_intent): + team = self.app_subscription_pg.team + with SessionManager(team) as _: + from jcloud.api.billing import setup_intent_success + + capture("added_card", "fc_subscribe", team) + return setup_intent_success(setup_intent) + + def change_site_plan(self, plan): + team = self.app_subscription_pg.team + with SessionManager(team) as _: + site = jingrow.get_pg("Site", self.app_subscription_pg.site) + site.change_plan(plan) + capture("changed_plan", "fc_subscribe", team) + + def send_login_link(self): + try: + login_url = self.get_login_url() + users = jingrow.get_pg("Team", self.app_subscription_pg.team).user + jingrow.sendmail( + subject="Login Verification Email", + recipients=[users], + template="remote_login", + args={"login_url": login_url, "site": self.app_subscription_pg.site}, + now=True, + ) + return "success" + except Exception as e: + return e + + def get_login_url(self): + # check for active tokens + team = self.app_subscription_pg.team + if jingrow.db.exists( + "Saas Remote Login", + { + "team": team, + "status": "Attempted", + "expires_on": (">", jingrow.utils.now()), + }, + ): + pg = jingrow.get_pg( + "Saas Remote Login", + { + "team": team, + "status": "Attempted", + "expires_on": (">", jingrow.utils.now()), + }, + ) + token = pg.token + else: + token = jingrow.generate_hash("Saas Remote Login", 50) + jingrow.get_pg( + { + "pagetype": "Saas Remote Login", + "team": team, + "token": token, + } + ).insert(ignore_permissions=True) + jingrow.db.commit() + + return get_url( + f"/api/method/jcloud.api.marketplace.login_via_token?token={token}&team={team}&site={self.app_subscription_pg.site}" + ) + + +class SessionManager: + # set user for authenticated requests and then switch to guest once completed + def __init__(self, team: str): + jingrow.set_user(jingrow.db.get_value("Team", team, "user")) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + jingrow.set_user("Guest") + + +# ------------------------------------------------------------ +# API ENDPOINTS +# ------------------------------------------------------------ +@jingrow.whitelist(allow_guest=True) +def get_subscription_status(secret_key: str) -> str: + api_handler = DeveloperApiHandler(secret_key) + return api_handler.get_subscription_status() + + +@jingrow.whitelist(allow_guest=True) +def get_subscription_info(secret_key: str) -> Dict: + api_handler = DeveloperApiHandler(secret_key) + return api_handler.get_subscription_info() + + +@jingrow.whitelist(allow_guest=True) +def get_subscription(secret_key: str) -> str: + api_handler = DeveloperApiHandler(secret_key) + return api_handler.get_subscription() + + +@jingrow.whitelist(allow_guest=True) +def get_plans(secret_key: str, subscription: str) -> List: + api_handler = DeveloperApiHandler(secret_key) + return api_handler.get_plans(subscription) + + +@jingrow.whitelist(allow_guest=True) +def update_billing_info(secret_key: str, data) -> str: + data = jingrow.parse_json(data) + api_handler = DeveloperApiHandler(secret_key) + return api_handler.update_billing_info(data) + + +@jingrow.whitelist(allow_guest=True) +def get_publishable_key_and_setup_intent(secret_key: str) -> str: + api_handler = DeveloperApiHandler(secret_key) + return api_handler.get_publishable_key_and_setup_intent() + + +@jingrow.whitelist(allow_guest=True) +def setup_intent_success(secret_key: str, setup_intent) -> str: + api_handler = DeveloperApiHandler(secret_key) + return api_handler.setup_intent_success(setup_intent) + + +@jingrow.whitelist(allow_guest=True) +def change_site_plan(secret_key: str, plan: str) -> str: + api_handler = DeveloperApiHandler(secret_key) + return api_handler.change_site_plan(plan) + + +@jingrow.whitelist(allow_guest=True) +def send_login_link(secret_key: str) -> str: + api_handler = DeveloperApiHandler(secret_key) + return api_handler.send_login_link() diff --git a/jcloud/api/developer/saas.py b/jcloud/api/developer/saas.py new file mode 100644 index 0000000..a40595e --- /dev/null +++ b/jcloud/api/developer/saas.py @@ -0,0 +1,255 @@ +import json + +import jingrow +import jingrow.utils +from jingrow.rate_limiter import rate_limit + +from jcloud.api.developer import raise_invalid_key_error +from jcloud.utils import mask_email + + +class SaasApiHandler: + def __init__(self, secret_key): + self.secret_key = secret_key + self.validate_secret_key() + + def validate_secret_key(self): + """Validate secret_key and set app subscription name and pg""" + + if not self.secret_key or not isinstance(self.secret_key, str): + raise_invalid_key_error() + + app_subscription_name = jingrow.db.exists("Saas App Subscription", {"secret_key": self.secret_key}) + + if not app_subscription_name: + raise_invalid_key_error() + + self.app_subscription_name = app_subscription_name + self.set_subscription_pg() + + def set_subscription_pg(self): + """To be called after `secret_key` validation""" + self.app_subscription_pg = jingrow.get_pg("Saas App Subscription", self.app_subscription_name) + + def get_subscription_status(self): + return self.app_subscription_pg.status + + def get_subscription_info(self): + return jingrow.get_pg("Saas App Subscription", self.app_subscription_name) + + def get_plan_config(self): + plan_pg = jingrow.get_pg("Saas App Plan", self.app_subscription_pg.saas_app_plan).config + + return json.loads(plan_pg) + + def get_login_url(self): + # check for active tokens + team = self.app_subscription_pg.team + if jingrow.db.exists( + "Saas Remote Login", + { + "team": team, + "status": "Attempted", + "expires_on": (">", jingrow.utils.now()), + }, + ): + pg = jingrow.get_pg( + "Saas Remote Login", + { + "team": team, + "status": "Attempted", + "expires_on": (">", jingrow.utils.now()), + }, + ) + token = pg.token + else: + token = jingrow.generate_hash("Saas Remote Login", 50) + jingrow.get_pg( + { + "pagetype": "Saas Remote Login", + "team": team, + "token": token, + } + ).insert(ignore_permissions=True) + + domain = jingrow.db.get_value("Saas App", self.app_subscription_pg.app, "custom_domain") + return f"https://{domain}/api/method/jcloud.api.saas.login_via_token?token={token}&team={self.app_subscription_pg.team}" + + def get_trial_expiry(self): + return jingrow.db.get_value("Site", self.app_subscription_pg.site, "trial_end_date") + + +# ------------------------------------------------------------ +# API ENDPOINTS +# ------------------------------------------------------------ +@jingrow.whitelist(allow_guest=True) +def ping(): + return "pong" + + +@jingrow.whitelist(allow_guest=True) +def get_subscription_status(secret_key): + api_handler = SaasApiHandler(secret_key) + return api_handler.get_subscription_status() + + +@jingrow.whitelist(allow_guest=True) +def get_plan_config(secret_key): + api_handler = SaasApiHandler(secret_key) + return api_handler.get_plan_config() + + +@jingrow.whitelist(allow_guest=True) +def get_subscription_info(secret_key): + api_handler = SaasApiHandler(secret_key) + return api_handler.get_subscription_info() + + +@jingrow.whitelist(allow_guest=True) +def get_trial_expiry(secret_key): + api_handler = SaasApiHandler(secret_key) + return api_handler.get_trial_expiry() + + +""" +NOTE: These mentioned apis are used for all type of saas sites to allow login to jingrow cloud +- send_verification_code +- verify_verification_code +- login_to_fc + +Don't change the file name or the method names +It can potentially break the integrations. +""" + + +@jingrow.whitelist(allow_guest=True, methods=["POST"]) +@rate_limit(limit=5, seconds=60 * 60) +def send_verification_code(domain: str, route: str = ""): + from jcloud.utils.otp import generate_otp + + domain_info = jingrow.get_value("Site Domain", domain, ["site", "status"], as_dict=True) + if not domain_info or domain_info.get("status") != "Active": + jingrow.throw("The domain is not active currently. Please try again.") + + site_info = jingrow.get_value( + "Site", domain_info.get("site"), ["name", "team", "standby_for", "standby_for_product"], as_dict=True + ) + team_name = site_info.get("team") + team_info = jingrow.get_value("Team", team_name, ["name", "enabled", "user", "enforce_2fa"], as_dict=True) + if not team_info or not team_info.get("enabled"): + jingrow.throw("Your Jingrow team is disabled currently.") + + check_if_user_can_login(team_info, site_info) + + # if is_user_logged_in(team_info.get("user")): + # if route == "dashboard": + # redirect_to = "/dashboard/" + # elif route == "site-dashboard": + # redirect_to = f"/dashboard/sites/{site_info.get('name')}" + # return {"is_user_logged_in": True, "redirect_to": redirect_to} + + # generate otp and set in redis with 10 min expiry + otp = generate_otp() + jingrow.cache.set_value( + f"otp_hash_for_fc_login_via_saas_flow:{domain}", + jingrow.utils.sha256_hash(str(otp)), + expires_in_sec=60 * 10, + ) + + email = team_info.get("user") + send_email_with_verification_code(email, otp) + + return { + "email": mask_email(email, 50), + "is_user_logged_in": False, + } + + +@jingrow.whitelist(allow_guest=True, methods=["POST"]) +@rate_limit(limit=5, seconds=60 * 60) +def verify_verification_code(domain: str, verification_code: str, route: str = "dashboard"): + otp_hash = jingrow.cache.get_value(f"otp_hash_for_fc_login_via_saas_flow:{domain}", expires=True) + if not otp_hash or otp_hash != jingrow.utils.sha256_hash(str(verification_code)): + jingrow.throw("Invalid Code. Please try again.") + + site = jingrow.get_value("Site Domain", domain, "site") + team = jingrow.get_value("Site", site, "team") + user = jingrow.get_value("Team", team, "user") + + # as otp is valid, delete the otp from redis + jingrow.cache.delete_value(f"otp_hash_for_fc_login_via_saas_flow:{domain}") + + # login and generate a login_token to store sid + login_token = jingrow.generate_hash(length=64) + jingrow.cache.set_value(f"saas_fc_login_token:{login_token}", user, expires_in_sec=60) + if route == "site-dashboard": + jingrow.cache.set_value(f"saas_fc_login_site:{login_token}", domain, expires_in_sec=60) + + jingrow.response["login_token"] = login_token + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60) +def login_to_fc(token: str): + email_cache_key = f"saas_fc_login_token:{token}" + domain_cache_key = f"saas_fc_login_site:{token}" + email = jingrow.cache.get_value(email_cache_key, expires=True) + domain = jingrow.cache.get_value(domain_cache_key, expires=True) + + if email: + jingrow.cache.delete_value(email_cache_key) + jingrow.local.login_manager.login_as(email) + jingrow.response.type = "redirect" + if domain: + jingrow.cache.delete_value(domain_cache_key) + jingrow.response.location = f"/dashboard/sites/{domain}" + else: + jingrow.response.location = "/dashboard/" + + +def is_user_logged_in(user): + Sessions = jingrow.qb.PageType("Sessions") + + return bool( + jingrow.qb.from_(Sessions) + .select(Sessions.user) + .where(Sessions.user == user) + .where(Sessions.status == "Active") + .run(as_dict=True) + ) + + +def check_if_user_can_login(team_info, site_info): + if team_info.get("enforce_2fa"): + jingrow.throw( + "Sorry, you cannot login with this method as 2FA is enabled. Please visit https://jingrow.com/dashboard to login." + ) + if ( + team_info.get("user") == "Administrator" + or jingrow.db.get_value("User", team_info.get("user"), "user_type") != "Website User" + ): + jingrow.throw("Sorry, you cannot login with this method. Please contact support for more details.") + + # restrict to SaaS Site + if not (site_info.get("standby_for") or site_info.get("standby_for_product")): + jingrow.throw("Only SaaS sites are allowed to login to Jingrow via current method.") + + +def send_email_with_verification_code(email, otp): + if jingrow.conf.developer_mode: + print("\nVerification Code for login to Jingrow:") + print(f"\nOTP for {email}:") + print(otp) + print() + else: + jingrow.sendmail( + recipients=email, + subject="Verification Code for Jingrow Login", + template="verification_code_for_login", + args={ + "full_name": jingrow.get_value("User", email, "full_name"), + "otp": otp, + "image_path": "http://git.jingrow.com:3000/jingrow/gameplan/assets/9355208/447035d0-0686-41d2-910a-a3d21928ab94", + }, + now=True, + ) diff --git a/jcloud/api/email.py b/jcloud/api/email.py new file mode 100644 index 0000000..d59932d --- /dev/null +++ b/jcloud/api/email.py @@ -0,0 +1,294 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +import calendar +import json +import secrets +from datetime import datetime + +import jingrow +import requests +from jingrow.exceptions import OutgoingEmailError, TooManyRequestsError, ValidationError +from jingrow.utils.password import get_decrypted_password + +from jcloud.api.developer.marketplace import get_subscription_info +from jcloud.api.site import site_config, update_config +from jcloud.utils import log_error + + +class EmailLimitExceeded(TooManyRequestsError): + pass + + +class EmailSendError(OutgoingEmailError): + pass + + +class EmailConfigError(ValidationError): + http_status_code = 400 + + +class SpamDetectionError(ValidationError): + http_status_code = 422 + + +@jingrow.whitelist(allow_guest=True) +def email_ping(): + return "pong" + + +def setup(site): + """ + set site config for overriding email account validations + """ + pg_exists = jingrow.db.exists("Mail Setup", {"site": site}) + + if pg_exists: + pg = jingrow.get_pg("Mail Setup", pg_exists) + + if not pg.is_complete: + pg.is_complete = 1 + pg.save() + + return + + old_config = site_config(site) + + new_config = [ + {"key": "mail_login", "value": "example@gmail.com", "type": "String"}, + {"key": "mail_password", "value": "password", "type": "String"}, + {"key": "mail_port", "value": 587, "type": "Number"}, + {"key": "mail_server", "value": "smtp.gmail.com", "type": "String"}, + ] + for row in old_config: + new_config.append({"key": row.key, "value": row.value, "type": row.type}) + + update_config(site, json.dumps(new_config)) + + jingrow.get_pg({"pagetype": "Mail Setup", "site": site, "is_complete": 1}).insert(ignore_permissions=True) + + +@jingrow.whitelist(allow_guest=True) +def get_analytics(**data): + """ + send data for a specific month + """ + month = data.get("month") + year = datetime.now().year + last_day = calendar.monthrange(year, int(month))[1] + status = data.get("status") + site = data.get("site") + subscription_key = data.get("key") + + for value in (site, subscription_key): + if not value or not isinstance(value, str): + jingrow.throw("Invalid Request") + + return jingrow.get_all( + "Mail Log", + filters={ + "site": site, + "subscription_key": subscription_key, + "status": ["like", f"%{status}%"], + "date": ["between", [f"{month}-01-{year}", f"{month}-{last_day}-{year}"]], + }, + fields=["date", "status", "message", "sender", "recipient"], + order_by="date asc", + ) + + +def validate_plan(secret_key): + """ + check if subscription is active on marketplace and valid + #TODO: get activation date + """ + + # TODO: replace this with plan attributes + plan_label_map = jingrow.conf.email_plans + + if not secret_key: + jingrow.throw( + "Secret key missing. Email Delivery Service seems to be improperly installed. Try uninstalling and reinstalling it.", + EmailConfigError, + ) + + try: + subscription = get_subscription_info(secret_key=secret_key) + except Exception as e: + jingrow.throw( + str(e) + or "Something went wrong fetching subscription details of Email Delivery Service. Please raise a ticket at support.jingrow.com", + type(e), + ) + + if not subscription["enabled"]: + jingrow.throw( + "Your subscription is not active. Try activating it from, " + f"{jingrow.utils.get_url()}/dashboard/sites/{subscription['site']}/overview", + EmailConfigError, + ) + + # TODO: add a date filter(use start date from plan) + first_day = str(jingrow.utils.now_datetime().replace(day=1).date()) + count = jingrow.db.count( + "Mail Log", + filters={ + "site": subscription["site"], + "creation": (">=", first_day), + "subscription_key": secret_key, + }, + ) + if not count < plan_label_map[subscription["plan"]]: + jingrow.throw( + "You have exceeded your quota for Email Delivery Service. Try upgrading it from, " + f"{jingrow.utils.get_url()}/dashboard/sites/{subscription['site']}/overview", + EmailLimitExceeded, + ) + + +def check_spam(message: bytes): + jcloud_settings = jingrow.get_cached_value( + "Jcloud Settings", + None, + ["enable_spam_check", "spamd_endpoint", "spamd_api_key"], + as_dict=True, + ) + if not jcloud_settings.enable_spam_check: + return + try: + headers = {} + if jcloud_settings.spamd_api_key: + spamd_api_secret = get_decrypted_password("Jcloud Settings", "Jcloud Settings", "spamd_api_secret") + headers["Authorization"] = f"token {jcloud_settings.spamd_api_key}:{spamd_api_secret}" + resp = requests.post( + jcloud_settings.spamd_endpoint, + headers=headers, + files={"message": message}, + ) + resp.raise_for_status() + data = resp.json() + if data["message"] > 3.5: + jingrow.throw( + "This email was blocked as it was flagged as spam by our system. Please review the contents and try again.", + SpamDetectionError, + ) + except requests.exceptions.HTTPError as e: + # Ignore error, if server.jingrowmail.com is being updated. + if e.response.status_code != 503: + log_error("Spam Detection : Error", data=e) + + +@jingrow.whitelist(allow_guest=True) +def send_mime_mail(**data): + """ + send api request to mailgun + """ + files = jingrow._dict(jingrow.request.files) + data = json.loads(data["data"]) + + validate_plan(data["sk_mail"]) + + api_key, domain = jingrow.db.get_value("Jcloud Settings", None, ["mailgun_api_key", "root_domain"]) + + message: bytes = files["mime"].read() + check_spam(message) + + resp = requests.post( + f"https://api.mailgun.net/v3/{domain}/messages.mime", + auth=("api", f"{api_key}"), + data={"to": data["recipients"], "v:sk_mail": data["sk_mail"]}, + files={"message": message}, + ) + + if resp.status_code == 200: + return "Sending" # Not really required as v14 and up automatically marks the email q as sent + log_error("Email Delivery Service: Sending error", data=resp.text) + jingrow.throw( + "Something went wrong with sending emails. Please try again later or raise a support ticket with support.jingrow.com", + EmailSendError, + ) + return None + + +def is_valid_mailgun_event(event_data): + if not event_data: + return None + + if event_data.get("user-variables", {}).get("sk_mail") is None: + # We don't know where to send this event + # TOOD: Investigate why this is happening + # Hint: Likely from other emails not sent via the email delivery app + return None + + if "delivery-status" not in event_data: + return None + + if "message" not in event_data["delivery-status"]: + return None + + return True + + +@jingrow.whitelist(allow_guest=True) +def event_log(): + """ + log the webhook and forward it to site + """ + data = json.loads(jingrow.request.data) + event_data = data.get("event-data") + + if not is_valid_mailgun_event(event_data): + return None + + try: + secret_key = event_data["user-variables"]["sk_mail"] + headers = event_data["message"]["headers"] + if "message-id" not in headers: + # We can't log this event without a message-id + # TOOD: Investigate why this is happening + return None + message_id = headers["message-id"] + site = ( + jingrow.get_cached_value("Subscription", {"secret_key": secret_key}, "site") + or message_id.split("@")[1] + ) + status = event_data["event"] + delivery_message = ( + event_data["delivery-status"]["message"] or event_data["delivery-status"]["description"] + ) + jingrow.get_pg( + { + "pagetype": "Mail Log", + "unique_token": secrets.token_hex(25), + "message_id": message_id, + "sender": headers["from"], + "recipient": event_data.get("recipient") or headers.get("to"), + "site": site, + "status": event_data["event"], + "subscription_key": secret_key, + "message": delivery_message, + "log": json.dumps(data), + } + ).insert(ignore_permissions=True) + jingrow.db.commit() + except Exception: + log_error("Mail App: Event log error", data=data) + raise + + data = { + "status": status, + "message_id": message_id, + "delivery_message": delivery_message, + "secret_key": secret_key, + } + + try: + host_name = jingrow.db.get_value("Site", site, "host_name") or site + requests.post( + f"https://{host_name}/api/method/email_delivery_service.controller.update_status", + data=data, + ) + except Exception as e: + log_error("Mail App: Email status update error", data=e) + + return "Successful", 200 diff --git a/jcloud/api/github.py b/jcloud/api/github.py new file mode 100644 index 0000000..a90caaa --- /dev/null +++ b/jcloud/api/github.py @@ -0,0 +1,329 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import re +from base64 import b64decode +from datetime import datetime, timedelta +from pathlib import Path +from typing import TYPE_CHECKING + +import jingrow +import jwt +import requests + +from jcloud.utils import get_current_team, log_error + +if TYPE_CHECKING: + + from jcloud.jcloud.pagetype.github_webhook_log.github_webhook_log import GitHubWebhookLog + + +@jingrow.whitelist(allow_guest=True, xss_safe=True) +def hook(*args, **kwargs): + user = jingrow.session.user + # set user to Administrator, to not have to do ignore_permissions everywhere + jingrow.set_user("Administrator") + headers = jingrow.request.headers + pg: "GitHubWebhookLog" = jingrow.get_pg( + { + "pagetype": "GitHub Webhook Log", + "name": headers.get("X-Github-Delivery"), + "event": headers.get("X-Github-Event"), + "signature": headers.get("X-Hub-Signature").split("=")[1], + "payload": jingrow.request.get_data().decode(), + } + ) + + try: + pg.insert() + jingrow.db.commit() + except Exception as e: + jingrow.set_user(user) + log_error("GitHub Webhook Insert Error", args=args, kwargs=kwargs) + raise Exception from e + + try: + pg.handle_events() + except Exception as e: + jingrow.set_user(user) + log_error("GitHub Webhook Error", pg=pg) + raise Exception from e + + +def get_jwt_token(): + key = jingrow.db.get_single_value("Jcloud Settings", "github_app_private_key") + app_id = jingrow.db.get_single_value("Jcloud Settings", "github_app_id") + now = datetime.now() + expiry = now + timedelta(minutes=9) + payload = {"iat": int(now.timestamp()), "exp": int(expiry.timestamp()), "iss": app_id} + return jwt.encode(payload, key.encode(), algorithm="RS256") + + +def get_access_token(installation_id: str | None = None): + if not installation_id: + return jingrow.db.get_value( + "Jcloud Settings", + None, + "github_access_token", + ) + + token = get_jwt_token() + headers = { + "Authorization": f"Bearer {token}", + "Accept": "application/vnd.github.machine-man-preview+json", + } + response = requests.post( + f"http://git.jingrow.com:3000/api/v1/app/installations/{installation_id}/access_tokens", + headers=headers, + ).json() + return response.get("token") + + +@jingrow.whitelist() +def clear_token_and_get_installation_url(): + clear_current_team_access_token() + public_link = jingrow.db.get_single_value("Jcloud Settings", "github_app_public_link") + return f"{public_link}/installations/new" + + +def clear_current_team_access_token(): + team = get_current_team() + jingrow.db.set_value("Team", team, "github_access_token", "") # clear access token + + +@jingrow.whitelist() +def options(): + team = get_current_team() + token = jingrow.db.get_value("Team", team, "github_access_token") + public_link = jingrow.db.get_single_value("Jcloud Settings", "github_app_public_link") + + return { + "authorized": bool(token), + "installation_url": f"{public_link}/installations/new", + "installations": installations(token) if token else [], + } + + +def installations(token): + headers = { + "Authorization": f"token {token}", + "Accept": "application/vnd.github.machine-man-preview+json", + } + response = requests.get("http://git.jingrow.com:3000/api/v1/user/installations", headers=headers) + data = response.json() + installations = [] + if response.ok: + for installation in data["installations"]: + installations.append( + { + "id": installation["id"], + "login": installation["account"]["login"], + "url": installation["html_url"], + "image": installation["account"]["avatar_url"], + "repos": repositories(installation["id"], token), + } + ) + else: + jingrow.throw(data.get("message") or "An error Occurred") + + return installations + + +def repositories(installation, token): + headers = { + "Authorization": f"token {token}", + "Accept": "application/vnd.github.machine-man-preview+json", + } + repositories = [] + current_page, is_last_page = 1, False + while not is_last_page: + response = requests.get( + f"http://git.jingrow.com:3000/api/v1/user/installations/{installation}/repositories", + params={"per_page": 100, "page": current_page}, + headers=headers, + ) + if len(response.json().get("repositories", [])) < 100: + is_last_page = True + + for repository in response.json().get("repositories", []): + repositories.append( + { + "id": repository["id"], + "name": repository["name"], + "private": repository["private"], + "url": repository["html_url"], + "default_branch": repository["default_branch"], + } + ) + current_page += 1 + + return repositories + + +@jingrow.whitelist() +def repository(owner, name, installation=None): + token = "" + if not installation: + token = jingrow.db.get_value("Jcloud Settings", "github_access_token") + else: + token = get_access_token(installation) + headers = { + "Authorization": f"token {token}", + } + repo = requests.get(f"http://git.jingrow.com:3000/api/v1/repos/{owner}/{name}", headers=headers).json() + + current_page, is_last_page = 1, False + branches = [] + while not is_last_page: + response = requests.get( + f"http://git.jingrow.com:3000/api/v1/repos/{owner}/{name}/branches", + params={"per_page": 100, "page": current_page}, + headers=headers, + ) + if response.ok: + branches.extend(response.json()) + else: + break + + if len(response.json()) < 100: + is_last_page = True + + current_page += 1 + + repo["branches"] = branches + + return repo + + +@jingrow.whitelist() +def app(owner, repository, branch, installation=None): + headers = get_auth_headers(installation) + response = requests.get( + f"http://git.jingrow.com:3000/api/v1/repos/{owner}/{repository}/branches/{branch}", + headers=headers, + ) + + if not response.ok: + jingrow.throw(f"Could not fetch branch ({branch}) info for repo {owner}/{repository}") + + branch_info = response.json() + sha = branch_info["commit"]["commit"]["tree"]["sha"] + contents = requests.get( + f"http://git.jingrow.com:3000/api/v1/repos/{owner}/{repository}/git/trees/{sha}", + params={"recursive": True}, + headers=headers, + ).json() + + tree = _generate_files_tree(contents["tree"]) + py_setup_files = ["setup.py", "setup.cfg", "pyproject.toml"] + + if not any(x in tree for x in py_setup_files): + setup_filenames = jingrow.bold(" or ".join(py_setup_files)) + reason = f"Files {setup_filenames} do not exist in app directory." + jingrow.throw(f"Not a valid Jingrow App! {reason}") + + app_name, title = _get_app_name_and_title_from_hooks( + owner, + repository, + branch_info, + headers, + tree, + ) + + return {"name": app_name, "title": title} + + +@jingrow.whitelist() +def branches(owner, name, installation=None): + if installation: + token = get_access_token(installation) + else: + token = jingrow.get_value("Jcloud Settings", None, "github_access_token") + + if token: + headers = { + "Authorization": f"token {token}", + } + else: + headers = {} + + response = requests.get( + f"http://git.jingrow.com:3000/api/v1/repos/{owner}/{name}/branches", + params={"per_page": 100}, + headers=headers, + ) + + if response.ok: + return response.json() + jingrow.throw("Error fetching branch list from GitHub: " + response.text) + return None + + +def get_auth_headers(installation_id: str | None = None) -> "dict[str, str]": + if token := get_access_token(installation_id): + return {"Authorization": f"token {token}"} + return {} + + +def _get_app_name_and_title_from_hooks( + owner, + repository, + branch_info, + headers, + tree, +) -> "tuple[str, str]": + reason_for_invalidation = f"Files {jingrow.bold('hooks.py or patches.txt')} not found." + for directory, files in tree.items(): + if not files: + continue + + if ("hooks.py" not in files) or ("patches.txt" not in files): + reason_for_invalidation = ( + f"Files {jingrow.bold('hooks.py or patches.txt')} does not exist" + f" inside {directory}/{directory} directory." + ) + continue + + hooks = requests.get( + f"http://git.jingrow.com:3000/api/v1/repos/{owner}/{repository}/contents/{directory}/hooks.py", + params={"ref": branch_info["name"]}, + headers=headers, + ).json() + if "content" not in hooks: + reason_for_invalidation = f"File {jingrow.bold('hooks.py')} could not be fetched." + continue + + content = b64decode(hooks["content"]).decode() + match = re.search(r"""app_title = ["'](.*)["']""", content) + + if match: + return directory, match.group(1) + + reason_for_invalidation = ( + f"File {jingrow.bold('hooks.py')} does not have {jingrow.bold('app_title')} defined." + ) + break + + jingrow.throw(f"Not a valid Jingrow App! {reason_for_invalidation}") + return None + + +def _generate_files_tree(files): + children = {} + for file in files: + path = Path(file["path"]) + children.setdefault(str(path.parent), []).append( + jingrow._dict({"name": str(path.name), "path": file["path"]}) + ) + return _construct_tree({}, children["."], children) + + +def _construct_tree(tree, children, children_map): + for file in children: + if file.path in children_map: + tree[file.name] = _construct_tree({}, children_map[file.path], children_map) + else: + tree[file.name] = None + return tree diff --git a/jcloud/api/google.py b/jcloud/api/google.py new file mode 100644 index 0000000..6fd8d84 --- /dev/null +++ b/jcloud/api/google.py @@ -0,0 +1,162 @@ +# Copyright (c) 2023, JINGROW +# MIT License. See license.txt + +from __future__ import annotations + +import json + +import jingrow +from jingrow import _ +from google.auth.transport.requests import Request +from google.oauth2 import id_token +from google.oauth2.credentials import Credentials +from google_auth_oauthlib.flow import Flow +from googleapiclient.discovery import build +from oauthlib.oauth2 import AccessDeniedError + +from jcloud.api.product_trial import _get_active_site as get_active_site_of_product_trial +from jcloud.utils import log_error + + +@jingrow.whitelist(allow_guest=True) +def login(product=None): + flow = google_oauth_flow() + authorization_url, state = flow.authorization_url() + minutes = 5 + payload = {"state": state} + if product: + payload["product"] = product + jingrow.cache().set_value(f"google_oauth_flow:{state}", payload, expires_in_sec=minutes * 60) + return authorization_url + + +@jingrow.whitelist(allow_guest=True) +def callback(code=None, state=None): # noqa: C901 + cached_key = f"google_oauth_flow:{state}" + payload = jingrow.cache().get_value(cached_key) + if not payload: + return invalid_login() + + product = payload.get("product") + product_trial = jingrow.db.get_value("Product Trial", product, ["name"], as_dict=1) if product else None + + def _redirect_to_login_on_failed_authentication(): + jingrow.local.response.type = "redirect" + if product_trial: + jingrow.local.response.location = f"/dashboard/saas/{product_trial.name}/login" + else: + jingrow.local.response.location = "/dashboard/login" + + try: + flow = google_oauth_flow() + flow.fetch_token(authorization_response=jingrow.request.url) + except AccessDeniedError: + _redirect_to_login_on_failed_authentication() + return None + except Exception as e: + log_error("Google Login failed", data=e) + _redirect_to_login_on_failed_authentication() + return None + + # authenticated + jingrow.cache().delete_value(cached_key) + + # id_info + token_request = Request() + google_credentials = get_google_credentials() + id_info = id_token.verify_oauth2_token( + id_token=flow.credentials._id_token, + request=token_request, + audience=google_credentials["web"]["client_id"], + ) + + email = id_info.get("email") + + # phone (this may return nothing if info doesn't exists) + phone_number = "" + if flow.credentials.refresh_token: # returns only for the first authorization + credentials = Credentials.from_authorized_user_info(json.loads(flow.credentials.to_json())) + service = build("people", "v1", credentials=credentials) + person = service.people().get(resourceName="people/me", personFields="phoneNumbers").execute() + if person and person.get("phoneNumbers"): + phone_number = person.get("phoneNumbers")[0].get("value") + + team_name, team_enabled = jingrow.db.get_value("Team", {"user": email}, ["name", "enabled"]) or [0, 0] + + if team_name and not team_enabled: + jingrow.throw(_("Account {0} has been deactivated").format(email)) + return None + + # if team exitst and oauth is not using in saas login/signup flow + if team_name and not product_trial: + # login to existing account + jingrow.local.login_manager.login_as(email) + jingrow.local.response.type = "redirect" + jingrow.local.response.location = "/dashboard" + return None + + # create account request + account_request = jingrow.get_pg( + pagetype="Account Request", + email=email, + first_name=id_info.get("given_name"), + last_name=id_info.get("family_name"), + phone_number=phone_number, + role="Jcloud Admin", + oauth_signup=True, + product_trial=product_trial.name if product_trial else None, + ) + account_request.insert(ignore_permissions=True) + jingrow.db.commit() + + if team_name and product_trial: + jingrow.local.login_manager.login_as(email) + active_site = get_active_site_of_product_trial(product_trial.name, team_name) + jingrow.local.response.type = "redirect" + if active_site: + product_trial_request = jingrow.get_value( + "Product Trial Request", {"site": active_site, "product_trial": product}, ["name"], as_dict=1 + ) + jingrow.local.response.location = f"/dashboard/saas/{product_trial.name}/login-to-site?product_trial_request={product_trial_request.name}" + else: + jingrow.local.response.location = ( + f"/dashboard/saas/{product_trial.name}/setup?account_request={account_request.name}" + ) + else: + # create/setup account + jingrow.local.response.type = "redirect" + jingrow.local.response.location = account_request.get_verification_url() + return None + + +def invalid_login(): + jingrow.local.response["http_status_code"] = 401 + return "Invalid state parameter. The session timed out. Please try again or contact Jingrow support at https://jingrow.com/support" + + +def google_oauth_flow(): + google_credentials = get_google_credentials() + redirect_uri = google_credentials["web"].get("redirect_uris")[0] + redirect_uri = redirect_uri.replace("jcloud.api.oauth.callback", "jcloud.api.google.callback") + return Flow.from_client_config( + client_config=google_credentials, + scopes=[ + "https://www.googleapis.com/auth/userinfo.profile", + "openid", + "https://www.googleapis.com/auth/userinfo.email", + ], + redirect_uri=redirect_uri, + ) + + +def get_google_credentials(): + if jingrow.local.dev_server: + import os + + # flow.fetch_token doesn't work with http, so this is needed for local development + os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1" + + config = jingrow.conf.get("google_credentials") + if not config: + jingrow.throw("google_credentials not found in site_config.json") + return config diff --git a/jcloud/api/log_browser.py b/jcloud/api/log_browser.py new file mode 100644 index 0000000..ec536d9 --- /dev/null +++ b/jcloud/api/log_browser.py @@ -0,0 +1,359 @@ +import datetime +import re +from enum import Enum + +import jingrow + + +class LOG_TYPE(Enum): + SITE = "site" + BENCH = "bench" + + +def bench_log_formatter(log_entries: list) -> list: + """ + Formats bench logs by extracting timestamp, level, and description. + + Args: + log_entries (list): A list of log entries, where each entry is a string. + + Returns: + list: A list of dictionaries, where each dictionary represents a formatted log entry. + """ + + if not log_entries: + return [] # Return empty list if no log entries + + formatted_logs = [] + for entry in log_entries: + date, time, level, *description_parts = entry.split(" ") + description = " ".join(description_parts) + + formatted_time = datetime.datetime.strptime(f"{date} {time}", "%Y-%m-%d %H:%M:%S,%f").strftime( + "%Y-%m-%d %H:%M:%S" + ) + + formatted_logs.append({"level": level, "time": formatted_time, "description": description}) + + return formatted_logs + + +def worker_log_formatter(log_entries: list) -> list: + """ + Formats worker logs by extracting timestamp, level, and description. + + Args: + log_entries (list): A list of log entries, where each entry is a string. + + Returns: + list: A list of dictionaries, where each dictionary represents a formatted log entry. + """ + + if not log_entries: + return [] # Return empty list if no log entries + + formatted_logs = [] + for entry in log_entries: + date, time, *description_parts = entry.split(" ") + description = " ".join(description_parts) + + try: + formatted_time = datetime.datetime.strptime(f"{date} {time}", "%Y-%m-%d %H:%M:%S,%f").strftime( + "%Y-%m-%d %H:%M:%S" + ) + except ValueError: + formatted_time = "" + + formatted_logs.append({"time": formatted_time, "description": description}) + + return formatted_logs + + +def jingrow_log_formatter(log_entries: list) -> list: + """ + Formats jingrow logs by extracting timestamp, level, and description. + + Args: + log_entries (list): A list of log entries, where each entry is a string. + + Returns: + list: A list of dictionaries, where each dictionary represents a formatted log entry. + """ + + if not log_entries: + return [] # Return empty list if no log entries + + formatted_logs = [] + for entry in log_entries: + date, time, level, *description_parts = entry.split(" ") + description = " ".join(description_parts) + + formatted_time = datetime.datetime.strptime(f"{date} {time}", "%Y-%m-%d %H:%M:%S,%f").strftime( + "%Y-%m-%d %H:%M:%S" + ) + + formatted_logs.append({"level": level, "time": formatted_time, "description": description}) + + return formatted_logs + + +def database_log_formatter(log_entries: list) -> list: + """ + Formats database logs by extracting timestamp, level, and description. + + Args: + log_entries (list): A list of log entries, where each entry is a string. + + Returns: + list: A list of dictionaries, where each dictionary represents a formatted log entry. + """ + + if not log_entries: + return [] # Return empty list if no log entries + + formatted_logs = [] + for entry in log_entries: + date, time, level, *description_parts = entry.split(" ") + description = " ".join(description_parts) + + formatted_time = datetime.datetime.strptime(f"{date} {time}", "%Y-%m-%d %H:%M:%S,%f").strftime( + "%Y-%m-%d %H:%M:%S" + ) + + formatted_logs.append({"level": level, "time": formatted_time, "description": description}) + + return formatted_logs + + +def scheduler_log_formatter(log_entries: list) -> list: + """ + Formats scheduler logs by extracting timestamp, level, and description. + + Args: + log_entries (list): A list of log entries, where each entry is a string. + + Returns: + list: A list of dictionaries, where each dictionary represents a formatted log entry. + """ + + if not log_entries: + return [] # Return empty list if no log entries + + formatted_logs = [] + for entry in log_entries: + date, time, level, *description_parts = entry.split(" ") + description = " ".join(description_parts) + + # TODO: formatted time goes invalid + formatted_time = datetime.datetime.strptime(f"{date} {time}", "%Y-%m-%d %H:%M:%S,%f").strftime( + "%Y-%m-%d %H:%M:%S" + ) + + formatted_logs.append({"level": level, "time": formatted_time, "description": description}) + + return formatted_logs + + +def redis_log_formatter(log_entries: list) -> list: + """ + Formats redis logs by extracting timestamp, level, and description. + + Args: + log_entries (list): A list of log entries, where each entry is a string. + + Returns: + list: A list of dictionaries, where each dictionary represents a formatted log entry. + """ + + if not log_entries: + return [] # Return empty list if no log entries + + formatted_logs = [] + for entry in log_entries: + _, day, month, year, time, *description_parts = entry.split(" ") + description = " ".join(description_parts) + + formatted_time = datetime.datetime.strptime( + f"{year}-{month}-{day} {time}", "%Y-%b-%d %H:%M:%S.%f" + ).strftime("%Y-%m-%d %H:%M:%S") + + formatted_logs.append({"time": formatted_time, "description": description}) + + return formatted_logs + + +def web_error_log_formatter(log_entries: list) -> list: + """ + Formats web error logs by extracting timestamp, level, and description. + + Args: + log_entries (list): A list of log entries, where each entry is a string. + + Returns: + list: A list of dictionaries, where each dictionary represents a formatted log entry. + """ + + if not log_entries: + return [] # Return empty list if no log entries + + # Regular expression pattern to match log entries specific to web.error logs + regex = r"\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2} [+-]\d{4})\] \[(\d+)\] \[(\w+)\] (.*)" + + formatted_logs = [] + for entry in log_entries: + match = re.match(regex, entry) + if not match: + formatted_logs.append({"description": entry}) # Unparsable entry + continue + + # Extract groups from the match + date, _, level, description_parts = match.groups() + description = "".join(description_parts) + + # Format date using strftime for cnsistency (no external libraries needed) + formatted_time = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S %z").strftime( + "%Y-%m-%d %H:%M:%S" + ) + + formatted_logs.append({"level": level, "time": formatted_time, "description": description}) + + return formatted_logs + + +def monitor_json_log_formatter(log_entries: list) -> list: + """ + Formats monitor.json logs by extracting timestamp, level, and description. + + Args: + log_entries (list): A list of log entries, where each entry is a string. + + Returns: + list: A list of dictionaries, where each dictionary represents a formatted log entry. + """ + + if not log_entries: + return [] # Return empty list if no log entries + + formatted_logs = [] + for entry in log_entries: + try: + timestamp_key = '"timestamp":"' + timestamp_start = entry.index(timestamp_key) + len(timestamp_key) + timestamp_end = entry.index('"', timestamp_start) + time = entry[timestamp_start:timestamp_end] + formatted_time = datetime.datetime.strptime(time, "%Y-%m-%d %H:%M:%S.%f").strftime( + "%Y-%m-%d %H:%M:%S" + ) + + formatted_logs.append({"time": formatted_time, "description": entry}) + except ValueError: + formatted_logs.append({"description": entry}) + + return formatted_logs + + +def ipython_log_formatter(log_entries: list) -> list: + """ + Formats ipython logs by extracting timestamp, level, and description. + + Args: + log_entries (list): A list of log entries, where each entry is a string. + + Returns: + list: A list of dictionaries, where each dictionary represents a formatted log entry. + """ + + if not log_entries: + return [] # Return empty list if no log entries + + formatted_logs = [] + for entry in log_entries: + date, time, level, *description_parts = entry.split(" ") + description = " ".join(description_parts) + + formatted_time = datetime.datetime.strptime(f"{date} {time}", "%Y-%m-%d %H:%M:%S,%f").strftime( + "%Y-%m-%d %H:%M:%S" + ) + + formatted_logs.append({"level": level, "time": formatted_time, "description": description}) + + return formatted_logs + + +def fallback_log_formatter(log_entries: list) -> list: + """ + Fallback formatter for logs that don't have a specific formatter. + + Args: + log_entries (list): A list of log entries, where each entry is string. + + Returns: + list: A list of dictionaries, where each dictionary represents a formatted log entry. + """ + + formatted_logs = [] + for entry in log_entries: + formatted_logs.append({"description": entry}) + + return formatted_logs + + +FORMATTER_MAP = { + "bench": bench_log_formatter, + "worker": worker_log_formatter, + "jingrow": jingrow_log_formatter, + "ipython": ipython_log_formatter, + "database": database_log_formatter, + "redis-cache": redis_log_formatter, + "redis-queue": redis_log_formatter, + "scheduler": scheduler_log_formatter, + "web.error": web_error_log_formatter, + "worker.error": worker_log_formatter, + "monitor.json": monitor_json_log_formatter, +} + + +@jingrow.whitelist() +def get_log(log_type: LOG_TYPE, pg_name: str, log_name: str) -> list: + MULTILINE_LOGS = ("database.log", "scheduler.log", "worker", "ipython", "jingrow.log") + + log = get_raw_log(log_type, pg_name, log_name) + + log_entries = [] + for k, v in log.items(): + if k == log_name: + if v == "": + return [] + if log_name.startswith(MULTILINE_LOGS): + # split line if nextline starts with timestamp + log_entries = re.split(r"\n(?=\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})", v) + break + + log_entries = v.strip().splitlines() + break + + return format_log(log_name, log_entries) + + +def get_raw_log(log_type: LOG_TYPE, pg_name: str, log_name: str) -> list: + if log_type == LOG_TYPE.BENCH: + return jingrow.get_pg("Bench", pg_name).get_server_log(log_name) + if log_type == LOG_TYPE.SITE: + return jingrow.get_pg("Site", pg_name).get_server_log(log_name) + return jingrow.throw("Invalid log type") + + +def format_log(log_name: str, log_entries: list) -> list: + log_key = get_log_key(log_name) + if log_key in FORMATTER_MAP: + return FORMATTER_MAP[log_key](log_entries) + return fallback_log_formatter(log_entries) + + +def get_log_key(log_name: str) -> str: + # if the log file has a number at the end, it's a rotated log + # and we don't need to consider the number for formatter mapping + if log_name[-1].isdigit(): + log_name = log_name.rsplit(".", 1)[0] + + return log_name.rsplit(".", 1)[0] diff --git a/jcloud/api/marketplace.py b/jcloud/api/marketplace.py new file mode 100644 index 0000000..f104582 --- /dev/null +++ b/jcloud/api/marketplace.py @@ -0,0 +1,1421 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import json +from typing import TYPE_CHECKING + +import jingrow +from jingrow.core.utils import find + +from jcloud.api.bench import options +from jcloud.api.site import ( + is_marketplace_app_source, + is_prepaid_marketplace_app, + protected, +) +from jcloud.jcloud.pagetype.app.app import new_app as new_app_pg +from jcloud.jcloud.pagetype.marketplace_app.marketplace_app import ( + MarketplaceApp, + get_plans_for_app, + get_total_installs_by_app, +) +from jcloud.utils import get_app_tag, get_current_team, get_last_pg, unique +from jcloud.utils.billing import get_jingrow_io_connection + +if TYPE_CHECKING: + from jcloud.marketplace.pagetype.marketplace_app_plan.marketplace_app_plan import MarketplaceAppPlan + from jcloud.jcloud.pagetype.app_release.app_release import AppRelease + from jcloud.jcloud.pagetype.app_source.app_source import AppSource + + +@jingrow.whitelist() +def get(app): + record = jingrow.get_pg("Marketplace App", app) + return { + "name": record.name, + "title": record.title, + "description": record.description, + "image": record.image, + "show_for_new_site": record.show_for_site_creation, + } + + +@jingrow.whitelist() +def get_install_app_options(marketplace_app: str) -> dict: + """Get options for installing a marketplace app""" + + restricted_site_plan_release_group = jingrow.get_all( + "Site Plan Release Group", + fields=["parent", "release_group"], + ignore_permissions=True, + ) + restricted_site_plans = [x.parent for x in restricted_site_plan_release_group] + restricted_release_groups = [x.release_group for x in restricted_site_plan_release_group] + + private_site_plan = jingrow.db.get_value( + "Site Plan", + {"private_benches": 1, "document_type": "Site", "price_cny": ["!=", 0]}, + order_by="price_cny asc", + ) + + public_site_plan = jingrow.db.get_value( + "Site Plan", + { + "private_benches": 0, + "document_type": "Site", + "price_cny": ["!=", 0], + "name": ["not in", restricted_site_plans], + }, + order_by="price_cny asc", + ) + + clusters = private_groups = [] + + latest_stable_version = jingrow.get_all( + "Jingrow Version", "max(name) as latest_version", pluck="latest_version" + )[0] + latest_public_group = jingrow.db.get_value( + "Release Group", + filters={ + "public": 1, + "version": latest_stable_version, + "name": ("not in", restricted_release_groups), + }, + ) + proxy_servers = jingrow.db.get_all( + "Proxy Server", + {"is_primary": 1}, + ["name", "cluster"], + ) + + clusters = jingrow.db.get_all( + "Cluster", + filters={"public": 1}, + fields=["name", "title", "image", "beta"], + ) + + for cluster in clusters: + cluster.proxy_server = find(proxy_servers, lambda x: x.cluster == cluster.name) + + ReleasGroup = jingrow.qb.PageType("Release Group") + ReleasGroupApp = jingrow.qb.PageType("Release Group App") + private_groups = ( + jingrow.qb.from_(ReleasGroup) + .left_join(ReleasGroupApp) + .on(ReleasGroup.name == ReleasGroupApp.parent) + .select(ReleasGroup.name, ReleasGroup.title) + .where(ReleasGroup.enabled == 1) + .where(ReleasGroup.team == get_current_team()) + .where(ReleasGroup.public == 0) + .where(ReleasGroupApp.app == marketplace_app) + .run(as_dict=True) + ) + + for group in private_groups: + benches = jingrow.db.get_all( + "Bench", + filters={ + "team": get_current_team(), + "status": "Active", + "group": group.name, + }, + fields=["name", "cluster"], + order_by="creation desc", + limit=1, + ) + + group.clusters = jingrow.db.get_all( + "Cluster", + filters={"public": 1, "name": ("in", [bench.cluster for bench in benches])}, + fields=["name", "title", "image", "beta"], + ) + + for cluster in group.clusters: + cluster["bench"] = jingrow.db.get_value( + "Bench", + filters={ + "cluster": cluster["name"], + "status": "Active", + "group": latest_public_group, + }, + order_by="creation desc", + ) + + cluster.proxy_server = find(proxy_servers, lambda x: x.cluster == cluster.name) + + app_plans = get_plans_for_app(marketplace_app) + + if not [plan for plan in app_plans if plan["price_cny"] > 0 or plan["price_usd"] > 0]: + app_plans = [] + + return { + "plans": app_plans, + "private_site_plan": private_site_plan, + "public_site_plan": public_site_plan, + "private_groups": private_groups, + "clusters": clusters, + "domain": jingrow.db.get_single_value("Jcloud Settings", "domain"), + } + + +def site_should_be_created_on_public_bench(apps: list[dict]) -> bool: + """Check if site should be created on public bench""" + + public_apps = jingrow.db.get_all("Marketplace App", {"jingrow_approved": 1}, pluck="name") + return all(app["app"] in public_apps or app["app"] == "jingrow" for app in apps) + + +def create_site_on_public_bench( + subdomain: str, + apps: list[dict], + cluster: str, + site_plan: str, + latest_stable_version: str, + group: str | None = None, + trial: bool = False, +) -> dict: + """Create site on public bench""" + + app_plans = {app["app"]: app["plan"] for app in apps if hasattr(app, "plan") and app["plan"]} + + if not group: + restricted_release_groups = jingrow.get_all( + "Site Plan Release Group", + fields=["release_group"], + pluck="release_group", + ignore_permissions=True, + ) + + ReleaseGroup = jingrow.qb.PageType("Release Group") + ReleaseGroupApp = jingrow.qb.PageType("Release Group App") + if group := ( + jingrow.qb.from_(ReleaseGroup) + .join(ReleaseGroupApp) + .on(ReleaseGroup.name == ReleaseGroupApp.parent) + .select(ReleaseGroup.name) + .distinct() + .where(ReleaseGroupApp.app.isin([app["app"] for app in apps if app["app"] != "jingrow"])) + .where(ReleaseGroup.version == latest_stable_version) + .where(ReleaseGroup.public == 1) + .where(ReleaseGroup.enabled == 1) + .where(ReleaseGroup.name.notin(restricted_release_groups or [""])) + .orderby(ReleaseGroup.creation, order=jingrow.qb.asc) + .run(as_dict=True) + ): + group = group[0].name + else: + jingrow.throw("No release group found for the selected apps") + + site = jingrow.get_pg( + { + "pagetype": "Site", + "subdomain": subdomain, + "subscription_plan": site_plan, + "apps": [{"app": app["app"]} for app in apps], + "cluster": cluster, + "group": group, + "domain": jingrow.db.get_single_value("Jcloud Settings", "domain"), + "team": get_current_team(), + "app_plans": app_plans, + } + ) + if trial and eligible_for_trial(): + site.trial_end_date = jingrow.utils.add_days(None, 14) + + site.insert() + + return site + + +def eligible_for_trial(): + team = get_current_team() + return not bool(jingrow.db.count("Site", {"team": team}) > 0) + + +def create_site_on_private_bench( + subdomain: str, + apps: list[dict], + cluster: str, +) -> dict: + """Create site on private bench using Site Group Deploy dt""" + + app_names = [app["app"] for app in apps] + app_names.remove("jingrow") + + all_latest_stable_version_supported = jingrow.db.get_all( + "Marketplace App Version", + {"parent": ("in", app_names)}, + pluck="version", + order_by="version desc", + ) + + if not all_latest_stable_version_supported: + jingrow.throw("No stable version found for the selected app(s)") + + latest_stable_version_supported = sorted(all_latest_stable_version_supported, reverse=True)[0] + + AppSource = jingrow.qb.PageType("App Source") + AppSourceVersion = jingrow.qb.PageType("App Source Version") + jingrow_app_source = ( + jingrow.qb.from_(AppSource) + .left_join(AppSourceVersion) + .on(AppSource.name == AppSourceVersion.parent) + .select(AppSource.name.as_("source"), AppSource.app, AppSourceVersion.version) + .where(AppSource.app == "jingrow") + .where(AppSource.public == 1) + .where(AppSourceVersion.version == latest_stable_version_supported) + .run(as_dict=True) + ) + + MarketplaceApp = jingrow.qb.PageType("Marketplace App") + MarketplaceAppVersion = jingrow.qb.PageType("Marketplace App Version") + app_sources = ( + jingrow.qb.from_(MarketplaceApp) + .left_join(MarketplaceAppVersion) + .on(MarketplaceApp.name == MarketplaceAppVersion.parent) + .select( + MarketplaceApp.name.as_("app"), + MarketplaceAppVersion.version, + MarketplaceAppVersion.source, + ) + .where(MarketplaceApp.name.isin(app_names)) + .orderby(MarketplaceAppVersion.version, order=jingrow.qb.desc) + .run(as_dict=True) + ) + + apps_with_sources = [] + for app in apps: + app_source = find(jingrow_app_source + app_sources, lambda x: x.app == app["app"]) + if not app_source: + jingrow.throw(f"Source not found for app {app['app']}") + + apps_with_sources.append( + { + "app": app["app"], + "source": app_source.source, + "plan": app["plan"] if hasattr(app, "plan") and app["plan"] else None, + } + ) + + site_group_deploy = jingrow.get_pg( + { + "pagetype": "Site Group Deploy", + "subdomain": subdomain, + "apps": apps_with_sources, + "cluster": cluster, + "version": latest_stable_version_supported, + "team": get_current_team(), + } + ).insert() + + return site_group_deploy # noqa: RET504 + + +@jingrow.whitelist() +def create_site_for_app( + subdomain: str, + apps: list[dict], + cluster: str, + site_plan: str, + group: str | None = None, + trial: bool = False, +): + """Create a site for a marketplace app""" + + latest_stable_version = jingrow.db.get_value( + "Jingrow Version", {"status": "Stable"}, "name", order_by="number desc" + ) + + if site_should_be_created_on_public_bench(apps): + return create_site_on_public_bench( + subdomain, apps, cluster, site_plan, latest_stable_version, group, trial + ) + + return create_site_on_private_bench(subdomain, apps, cluster) + + +@jingrow.whitelist() +def options_for_quick_install(marketplace_app: str): + app_name, title, jingrow_approved = jingrow.db.get_value( + "Marketplace App", marketplace_app, ["app", "title", "jingrow_approved"] + ) + candidate_groups = get_candidate_release_groups(marketplace_app, app_name) + candidate_sites = get_candidate_sites(app_name) + plans = get_plans_for_app(marketplace_app) + + return { + "release_groups": candidate_groups, + "sites": candidate_sites, + "app_name": app_name, + "title": title, + "approved": bool(jingrow_approved), + "has_plans_available": len(plans) > 0, + } + + +def get_candidate_release_groups(marketplace_app: str, app_name: str) -> list[dict]: + """ + List of release groups where the given marketplace app is NOT installed but CAN BE installed. + + returns list of dicts of the form: + { + 'name': 'bench-1096', + 'title': 'My Private Bench', + 'version': 'Version 13', + 'source': 'SRC-posawesome-001' + } + """ + team = get_current_team() + group = jingrow.qb.PageType("Release Group") + group_app = jingrow.qb.PageType("Release Group App") + marketplace_app_version = jingrow.qb.PageType("Marketplace App Version") + + query = ( + jingrow.qb.from_(group) + .left_join(marketplace_app_version) + .on(marketplace_app_version.version == group.version) + .left_join(group_app) + .on((group.name == group_app.parent) & (group_app.app == app_name)) + .select(group.name, group.title, group.version, marketplace_app_version.source) + .where( + (group.enabled == 1) + & (group.team == team) + & (marketplace_app_version.parent == marketplace_app) + & group_app.app.isnull() # not present in group + ) + ) + + return query.run(as_dict=True) + + +def get_candidate_sites(app_name: str) -> list[str]: + """ + List of Active sites on which the given app is NOT installed but CAN BE installed. + """ + team = get_current_team() + site = jingrow.qb.PageType("Site") + site_app = jingrow.qb.PageType("Site App") + bench = jingrow.qb.PageType("Bench") + bench_app = jingrow.qb.PageType("Bench App") + + sites = ( + jingrow.qb.from_(site) + .left_join(site_app) + .on((site_app.parent == site.name) & (site_app.app == app_name)) + .left_join(bench) + .on(bench.name == site.bench) + .right_join(bench_app) # must be installed on bench (corresponding bench app exists) + .on((bench.name == bench_app.parent) & (bench_app.app == app_name)) + .select(site.name) + .where( + (site.status == "Active") & (site.team == team) & site_app.app.isnull() + ) # not installed on site + ) + + return sites.run(pluck="name") + + +@jingrow.whitelist() +def become_publisher(): + """Turn on marketplace developer mode for current team""" + current_team = get_current_team(get_pg=True) + current_team.is_developer = True + current_team.save() + + +@jingrow.whitelist() +def jingrow_versions(): + """Return a list of Jingrow Version names""" + return jingrow.get_all("Jingrow Version", pluck="name", order_by="name desc") + + +@jingrow.whitelist() +def get_apps() -> list[dict]: + """Return list of apps developed by the current team""" + team = get_current_team() + apps = jingrow.get_all( + "Marketplace App", + fields=["name", "title", "image", "app", "status", "description"], + filters={"team": team}, + order_by="title", + ) + + return apps # noqa: RET504 + + +@jingrow.whitelist() +@protected("Marketplace App") +def get_app(name: str) -> dict: + """Return the `Marketplace App` document with name""" + app = jingrow.get_pg("Marketplace App", name).as_dict() + + # Attach sources information to marketplace sources + for source in app.sources: + source.source_information = jingrow.get_pg("App Source", source.source).as_dict() + + return app + + +@jingrow.whitelist() +@protected("Marketplace App") +def deploy_information(name: str): + """Return the deploy information for marketplace app `app`""" + marketplace_app: MarketplaceApp = jingrow.get_pg("Marketplace App", name) + return marketplace_app.get_deploy_information() + + +@jingrow.whitelist() +def profile_image_url(app: str) -> str: + return jingrow.db.get_value("Marketplace App", app, "image") + + +@jingrow.whitelist() +def update_app_image() -> str: + """Handles App Image Upload""" + file_content = jingrow.local.uploaded_file + + validate_app_image_dimensions(file_content) + + file_name = jingrow.local.uploaded_filename + if file_name.split(".")[-1] in ["png", "jpg", "jpeg"]: + file_content = convert_to_webp(file_content) + file_name = f"{'.'.join(file_name.split('.')[:-1])}.webp" + + app_name = jingrow.form_dict.docname + _file = jingrow.get_pg( + { + "pagetype": "File", + "attached_to_pagetype": "Marketplace App", + "attached_to_name": app_name, + "attached_to_field": "image", + "folder": "Home/Attachments", + "file_name": file_name, + "is_private": 0, + "content": file_content, + } + ) + _file.save(ignore_permissions=True) + file_url = _file.file_url + jingrow.db.set_value("Marketplace App", app_name, "image", file_url) + + return file_url + + +def convert_to_webp(file_content: bytes) -> bytes: + from io import BytesIO + + from PIL import Image + + image_bytes = BytesIO() + image = Image.open(BytesIO(file_content)) + image = image.convert("RGB") + + image.save(image_bytes, "webp") + + return image_bytes.getvalue() + + +@jingrow.whitelist() +def add_app_screenshot() -> str: + """Handles App Image Upload""" + file_content = jingrow.local.uploaded_file + app_name = jingrow.form_dict.docname + app_pg = jingrow.get_pg("Marketplace App", app_name) + + file_name = jingrow.local.uploaded_filename + if file_name.split(".")[-1] in ["png", "jpg", "jpeg"]: + file_content = convert_to_webp(file_content) + file_name = f"{'.'.join(file_name.split('.')[:-1])}.webp" + + _file = jingrow.get_pg( + { + "pagetype": "File", + "attached_to_field": "image", + "folder": "Home/Attachments", + "file_name": file_name, + "is_private": 0, + "content": file_content, + } + ) + _file.save(ignore_permissions=True) + file_url = _file.file_url + + app_pg.append( + "screenshots", + { + "image": file_url, + }, + ) + app_pg.save(ignore_permissions=True) + + return file_url + + +@protected("Marketplace App") +@jingrow.whitelist() +def remove_app_screenshot(name, file): + app_pg = jingrow.get_pg("Marketplace App", name) + + for i, sc in enumerate(app_pg.screenshots): + if sc.image == file: + jingrow.delete_pg("File", file) + app_pg.screenshots.pop(i) + app_pg.save(ignore_permissions=True) + + +def validate_app_image_dimensions(file_content): + """Throws if image is not a square image, atleast 300x300px in size""" + from io import BytesIO + + from PIL import Image + + im = Image.open(BytesIO(file_content)) + im_width, im_height = im.size + if im_width != im_height or im_height < 300: + jingrow.throw("Logo must be a square image atleast 300x300px in size") + + +@jingrow.whitelist() +def update_app_title(name: str, title: str) -> MarketplaceApp: + """Update `title` and `category`""" + app: MarketplaceApp = jingrow.get_pg("Marketplace App", name) + app.title = title + app.save(ignore_permissions=True) + + return app + + +@jingrow.whitelist() +def update_app_links(name: str, links: dict) -> None: + """Update links related to app""" + app: MarketplaceApp = jingrow.get_pg("Marketplace App", name) + app.update(links) + app.save(ignore_permissions=True) + + +@jingrow.whitelist() +def update_app_summary(name: str, summary: str) -> None: + """Update the `description` of Marketplace App `name`""" + app: MarketplaceApp = jingrow.get_pg("Marketplace App", name) + app.description = summary + app.save(ignore_permissions=True) + + +@jingrow.whitelist() +def update_app_description(name: str, description: str) -> None: + """Update the `long_description` of Marketplace App `name`""" + app: MarketplaceApp = jingrow.get_pg("Marketplace App", name) + app.long_description = description + app.save(ignore_permissions=True) + + +@jingrow.whitelist() +def releases(filters=None, order_by=None, limit_start=None, limit_page_length=None) -> list[dict]: + """Return list of App Releases for this `app` and `source` in order of creation time""" + + app_releases = jingrow.get_all( + "App Release", + filters=filters, + fields="*", + order_by=order_by or "creation desc", + start=limit_start, + limit=limit_page_length, + ) + + for release in app_releases: + # Attach rejection feedback (if any) + try: + feedback = reason_for_rejection(release.name) + except jingrow.ValidationError: + feedback = "" + release.reason_for_rejection = feedback + + # Attach release tag + app_source = jingrow.get_pg("App Source", release.source) + release.tag = get_app_tag(app_source.repository, app_source.repository_owner, release.hash) + + return app_releases + + +@jingrow.whitelist() +def get_app_source(name: str) -> AppSource: + """Return `App Source` document having `name`""" + return jingrow.get_pg("App Source", name) + + +@jingrow.whitelist() +def latest_approved_release(source: None | str) -> AppRelease: + """Return the latest app release with `approved` status""" + return get_last_pg("App Release", {"source": source, "status": "Approved"}) + + +@jingrow.whitelist() +@protected("Marketplace App") +def create_approval_request(name, app_release: str): + """Create a new Approval Request for given `app_release`""" + jingrow.get_pg("Marketplace App", name).create_approval_request(app_release) + + +@jingrow.whitelist() +def cancel_approval_request(app_release: str): + """Cancel Approval Request for given `app_release`""" + get_latest_approval_request(app_release).cancel() + + +@jingrow.whitelist() +def reason_for_rejection(app_release: str) -> str: + """Return feedback given on a `Rejected` approval request""" + approval_request = get_latest_approval_request(app_release) + app_release = jingrow.get_pg("App Release", app_release) + + if app_release.status != "Rejected": + jingrow.throw("The request for the given app release was not rejected!") + + return approval_request.reason_for_rejection + + +def get_latest_approval_request(app_release: str): + """Return Approval request for the given `app_release`, throws if not found""" + approval_requests = jingrow.get_all( + "App Release Approval Request", + filters={"app_release": app_release}, + pluck="name", + order_by="creation desc", + ) + + if len(approval_requests) == 0: + jingrow.throw("No approval request exists for the given app release") + + approval_request = jingrow.get_pg("App Release Approval Request", approval_requests[0]) + + return approval_request # noqa: RET504 + + +@jingrow.whitelist() +def options_for_marketplace_app() -> dict[str, dict]: # noqa: C901 + # Get versions (along with apps and associated sources) + # which belong to the current team + versions = options(only_by_current_team=True)["versions"] + + filtered_apps = [] + + for version in versions: + # Remove Jingrow Framework + version["apps"] = [app for app in version["apps"] if app["name"] != "jingrow"] + + for app in version["apps"]: + if not is_on_marketplace(app["name"]): + for source in app["sources"]: + source["version"] = version["name"] + filtered_apps.append(app) + + else: + marketplace_app = jingrow.get_pg("Marketplace App", app["name"]) + marketplace_versions = [v.version for v in marketplace_app.sources] + + if version["name"] not in marketplace_versions: + for source in app["sources"]: + source["version"] = version["name"] + filtered_apps.append(app) + + aggregated_sources = {} + + for app in filtered_apps: + aggregated_sources.setdefault(app["name"], []).extend(app["sources"]) + # Remove duplicate sources + aggregated_sources[app["name"]] = unique(aggregated_sources[app["name"]], lambda x: x["name"]) + + marketplace_options = [] + for app_name, sources in aggregated_sources.items(): + app = find(filtered_apps, lambda x: x["name"] == app_name) + marketplace_options.append( + { + "name": app_name, + "sources": sources, + "source": app["source"], + "title": app["title"], + } + ) + + return marketplace_options + + +@jingrow.whitelist() +def get_marketplace_apps_for_onboarding() -> list[dict]: + apps = jingrow.get_all( + "Marketplace App", + fields=["name", "title", "image", "description"], + filters={"show_for_site_creation": True, "status": "Published"}, + ) + total_installs_by_app = get_total_installs_by_app() + for app in apps: + app["total_installs"] = total_installs_by_app.get(app["name"], 0) + # sort by total installs + return sorted(apps, key=lambda x: x["total_installs"], reverse=True) + + +def is_on_marketplace(app: str) -> bool: + """Returns `True` if this `app` is on marketplace else `False`""" + return jingrow.db.exists("Marketplace App", app) + + +@jingrow.whitelist() +def new_app(app: dict): + name = app["name"] + team = get_current_team() + + if jingrow.db.exists("App", name): + app_pg = jingrow.get_pg("App", name) + else: + app_pg = new_app_pg(name, app["title"]) + + source = app_pg.add_source( + app["version"], + app["repository_url"], + app["branch"], + team, + app["github_installation_id"], + ) + + return add_app(source.name, app_pg.name) + + +@jingrow.whitelist() +def add_app(source: str, app: str): + if not is_on_marketplace(app): + supported_versions = jingrow.get_all("App Source Version", filters={"parent": source}, pluck="version") + marketplace_app = jingrow.get_pg( + pagetype="Marketplace App", + app=app, + team=get_current_team(), + description="Please add a short description about your app here...", + sources=[{"version": v, "source": source} for v in supported_versions], + ).insert() + + else: + marketplace_app = jingrow.get_pg("Marketplace App", app) + + if marketplace_app.team != get_current_team(): + jingrow.throw(f"The app {marketplace_app.name} already exists and is owned by some other team.") + + # Versions on marketplace + versions = [v.version for v in marketplace_app.sources] + + app_source = jingrow.get_pg("App Source", source) + # Versions on this app `source` + app_source_versions = [v.version for v in app_source.versions] + + version_difference = set(app_source_versions) - set(versions) + if version_difference: + # App source contains version not yet in marketplace + for version in version_difference: + marketplace_app.append("sources", {"source": source, "version": version}) + marketplace_app.save(ignore_permissions=True) + else: + jingrow.throw("A marketplace app already exists with the given versions!") + + return marketplace_app.name + + +@jingrow.whitelist() +@protected("Marketplace App") +def analytics(name: str): + marketplace_app_pg: MarketplaceApp = jingrow.get_pg("Marketplace App", name) + return marketplace_app_pg.get_analytics() + + +@jingrow.whitelist() +def get_promotional_banners() -> list: + promotionalBanner = jingrow.qb.PageType("Marketplace Promotional Banner") + marketplaceApp = jingrow.qb.PageType("Marketplace App") + + promotions = ( + jingrow.qb.from_(promotionalBanner) + .left_join(marketplaceApp) + .on(promotionalBanner.marketplace_app == marketplaceApp.name) + .select( + promotionalBanner.alert_message, + promotionalBanner.alert_title, + promotionalBanner.marketplace_app.as_("app"), + promotionalBanner.name, + marketplaceApp.image, + marketplaceApp.title, + marketplaceApp.description, + ) + .where(promotionalBanner.is_active == True) # noqa + .run(as_dict=True) + ) + + return promotions # noqa: RET504 + + +# PAID APPS APIs +# (might refactor later to a separate file +# like 'api/marketplace/billing.py') + + +@jingrow.whitelist() +def get_marketplace_subscriptions_for_site(site: str): + subscriptions = jingrow.db.get_all( + "Subscription", + filters={"site": site, "enabled": 1, "document_type": "Marketplace App"}, + fields=["name", "document_name as app", "enabled", "plan"], + ) + + for subscription in subscriptions: + marketplace_app_info = jingrow.db.get_value( + "Marketplace App", subscription.app, ["title", "image"], as_dict=True + ) + + subscription.app_title = marketplace_app_info.title + subscription.app_image = marketplace_app_info.image + subscription.plan_info = jingrow.db.get_value( + "Marketplace App Plan", + subscription.plan, + ["price_usd", "price_cny"], + as_dict=True, + ) + subscription.is_free = jingrow.db.get_value( + "Marketplace App Plan", subscription.marketplace_app_plan, "is_free" + ) + subscription.billing_type = is_prepaid_marketplace_app(subscription.app) + + return subscriptions + + +@jingrow.whitelist() +def get_app_plans(app: str, include_disabled=True): + return get_plans_for_app(app, include_disabled=include_disabled) + + +@jingrow.whitelist() +def get_app_info(app: str): + return jingrow.db.get_value("Marketplace App", app, ["name", "title", "image", "team"], as_dict=True) + + +@jingrow.whitelist() +def get_apps_with_plans(apps, release_group: str): + if isinstance(apps, str): + apps = json.loads(apps) + + apps_with_plans = [] + + # Make sure it is a marketplace app + m_apps = jingrow.db.get_all( + "Marketplace App", + filters={"app": ("in", apps)}, + fields=["name", "title", "image"], + ) + + jingrow_version = jingrow.db.get_value("Release Group", release_group, "version") + for app in m_apps: + app_source = jingrow.db.get_value( + "Release Group App", {"parent": release_group, "app": app.name}, "source" + ) + if is_marketplace_app_source(app_source): + plans = get_plans_for_app(app.name, jingrow_version) + else: + plans = [] + + if len(plans) > 0: + apps_with_plans.append(app) + + return apps_with_plans + + +@jingrow.whitelist() +def change_app_plan(subscription, new_plan): + is_free = jingrow.db.get_value("Marketplace App Plan", new_plan, "price_usd") <= 0 + if not is_free: + team = get_current_team(get_pg=True) + if not team.can_install_paid_apps(): + jingrow.throw( + "You cannot upgrade to paid plan on Free Credits. Please buy credits before trying to upgrade plan." + ) + + subscription = jingrow.get_pg("Subscription", subscription) + subscription.enabled = 1 + subscription.plan = new_plan + subscription.save(ignore_permissions=True) + + +@jingrow.whitelist() +def get_publisher_profile_info(): + publisher_profile_info = {} + + team = get_current_team() + + publisher_profile_name = jingrow.db.exists("Marketplace Publisher Profile", {"team": team}) + + if publisher_profile_name: + publisher_profile_info["profile_created"] = True + publisher_profile_info["profile_info"] = jingrow.get_pg( + "Marketplace Publisher Profile", publisher_profile_name + ) + + return publisher_profile_info + + +@jingrow.whitelist() +def update_publisher_profile(profile_data=None): + """Update if exists, otherwise create""" + team = get_current_team() + + publisher_profile_name = jingrow.db.exists("Marketplace Publisher Profile", {"team": team}) + + if publisher_profile_name: + profile_pg = jingrow.get_pg("Marketplace Publisher Profile", publisher_profile_name, for_update=True) + profile_pg.update(profile_data or {}) + profile_pg.save(ignore_permissions=True) + else: + profile_pg = jingrow.get_pg({"pagetype": "Marketplace Publisher Profile"}) + profile_pg.team = team + profile_pg.update(profile_data or {}) + profile_pg.insert(ignore_permissions=True) + + +@jingrow.whitelist() +def submit_user_review(title, rating, app, review): + return jingrow.get_pg( + { + "pagetype": "App User Review", + "title": title, + "rating": int(rating) / 5, + "app": app, + "review": review, + "reviewer": jingrow.session.user, + } + ).insert(ignore_permissions=True) + + +@jingrow.whitelist() +def submit_developer_reply(review, reply): + return jingrow.get_pg( + { + "pagetype": "Developer Review Reply", + "review": review, + "description": reply, + "developer": jingrow.session.user, + } + ).insert(ignore_permissions=True) + + +@jingrow.whitelist() +def get_subscriptions_list(marketplace_app: str) -> list: + app_sub = jingrow.qb.PageType("Subscription") + app_plan = jingrow.qb.PageType("Marketplace App Plan") + site = jingrow.qb.PageType("Site") + usage_record = jingrow.qb.PageType("Usage Record") + team = jingrow.qb.PageType("Team") + + conditions = app_plan.price_usd > 0 + conditions = conditions & (app_sub.document_name == marketplace_app) + + query = ( + jingrow.qb.from_(app_sub) + .left_join(team) + .on(app_sub.team == team.name) + .join(app_plan) + .on(app_sub.plan == app_plan.name) + .join(site) + .on(site.name == app_sub.site) + .join(usage_record) + .on(usage_record.subscription == app_sub.name) + .where(conditions) + .groupby(usage_record.subscription) + .select( + jingrow.query_builder.functions.Count("*").as_("active_days"), + app_sub.site, + team.user.as_("user_contact"), + app_sub.plan.as_("app_plan"), + app_plan.price_usd.as_("price_usd"), + app_plan.price_cny.as_("price_cny"), + app_sub.enabled, + ) + .orderby(app_sub.enabled) + .orderby(app_sub.creation, order=jingrow.qb.desc) + ) + + result = query.run(as_dict=True) + + return result # noqa: RET504 + + +@jingrow.whitelist() +def create_app_plan(marketplace_app: str, plan_data: dict): + app_plan_pg = jingrow.get_pg( + { + "pagetype": "Marketplace App Plan", + "app": marketplace_app, + "title": plan_data.get("title"), + "price_cny": plan_data.get("price_cny"), + "price_usd": plan_data.get("price_usd"), + } + ) + + feature_list = plan_data.get("features") + reset_features_for_plan(app_plan_pg, feature_list) + return app_plan_pg.insert(ignore_permissions=True) + + +@jingrow.whitelist() +def update_app_plan(app_plan_name: str, updated_plan_data: dict): + if not updated_plan_data.get("title"): + jingrow.throw("Plan title is required") + + app_plan_pg = jingrow.get_pg("Marketplace App Plan", app_plan_name) + + no_of_active_subscriptions = jingrow.db.count( + "Subscription", + { + "document_type": "Marketplace App", + "document_name": app_plan_pg.app, + "plan": app_plan_pg.name, + "enabled": True, + }, + ) + + if ( + updated_plan_data["price_cny"] != app_plan_pg.price_cny + or updated_plan_data["price_usd"] != app_plan_pg.price_usd + ) and no_of_active_subscriptions > 0: + # Someone is on this plan, don't change price for the plan, + # instead create and link a new plan + # TODO: Later we have to figure out a way for plan changes + jingrow.throw("Plan is already in use, cannot update the plan. Please contact support to proceed.") + + app_plan_pg.update( + { + "price_cny": updated_plan_data.get("price_cny"), + "price_usd": updated_plan_data.get("price_usd"), + "title": updated_plan_data.get("title", app_plan_pg.title), + } + ) + app_plan_pg.save(ignore_permissions=True) + + feature_list = updated_plan_data.get("features", []) + reset_features_for_plan(app_plan_pg, feature_list, save=False) + app_plan_pg.enabled = updated_plan_data.get("enabled", True) + app_plan_pg.save(ignore_permissions=True) + + +def reset_features_for_plan(app_plan_pg: MarketplaceAppPlan, feature_list: list[str], save=False): + # Clear the already existing features + app_plan_pg.features = [] + for feature in feature_list: + if not feature: + jingrow.throw("Feature cannot be empty string") + app_plan_pg.append("features", {"description": feature}) + + if save: + app_plan_pg.save(ignore_permissions=True) + + +@jingrow.whitelist() +def get_payouts_list() -> list[dict]: + team = get_current_team() + payouts = jingrow.get_all( + "Payout Order", + filters={"recipient": team}, + fields=[ + "name", + "status", + "period_end", + "mode_of_payment", + "net_total_cny", + "net_total_usd", + ], + order_by="period_end desc", + ) + + return payouts # noqa: RET504 + + +@jingrow.whitelist() +def get_payout_details(name: str) -> dict: + order_items = jingrow.get_all( + "Payout Order Item", + filters={"parent": name}, + fields=[ + "name", + "document_name", + "site", + "rate", + "plan", + "total_amount", + "currency", + "net_amount", + "gateway_fee", + "quantity", + "commission", + ], + order_by="idx", + ) + + payout_order = jingrow.db.get_value( + "Payout Order", + name, + ["status", "due_date", "mode_of_payment", "net_total_cny", "net_total_usd"], + as_dict=True, + ) + + grouped_items = {"usd_items": [], "cny_items": [], **payout_order} + for item in order_items: + if item.currency == "CNY": + grouped_items["cny_items"].append(item) + else: + grouped_items["usd_items"].append(item) + + return grouped_items + + +def get_discount_percent(plan, discount=0.0): + team = get_current_team(True) + partner_discount_percent = { + "Gold": 50.0, + "Silver": 40.0, + "Bronze": 30.0, + } + + if team.jerp_partner and jingrow.get_value("Marketplace App Plan", plan, "partner_discount"): + client = get_jingrow_io_connection() + response = client.session.post( + f"{client.url}/api/method/partner_relationship_management.api.get_partner_type", + data={"email": team.partner_email}, + headers=client.headers, + ) + if response.ok: + res = response.json() + partner_type = res.get("message") + if partner_type is not None: + discount = partner_discount_percent.get(partner_type) or discount + + return discount + + +@jingrow.whitelist(allow_guest=True) +def login_via_token(token: str, team: str, site: str): + if not token or not isinstance(token, str): + jingrow.throw("Invalid Token") + + team = team.replace(" ", "+") + token_exists = jingrow.db.exists( + "Saas Remote Login", + { + "team": team, + "token": token, + "status": "Attempted", + "expires_on": (">", jingrow.utils.now()), + }, + ) + + if token_exists: + pg = jingrow.get_pg("Saas Remote Login", token_exists) + pg.status = "Used" + pg.save(ignore_permissions=True) + jingrow.local.login_manager.login_as(team) + jingrow.local.response["type"] = "redirect" + jingrow.local.response["location"] = f"/dashboard/sites/{site}/overview" + else: + jingrow.local.response["type"] = "redirect" + jingrow.local.response["location"] = "/dashboard/login?showRemoteLoginError=true" + + +@jingrow.whitelist() +def subscriptions(): + team = get_current_team(True) + free_plans = jingrow.get_all("Marketplace App Plan", {"price_usd": ("<=", 0)}, pluck="name") + subscriptions = jingrow.get_all( + "Subscription", + { + "team": team.name, + "enabled": 1, + "plan": ("not in", free_plans), + }, + ["name", "document_name as app", "site", "plan"], + ) + + for sub in subscriptions: + sub["available_plans"] = get_plans_for_app(sub["app"]) + for ele in sub["available_plans"]: + ele["amount"] = ele[f"price_{team.currency.lower()}"] + if ele["name"] == sub["plan"]: + sub["selected_plan"] = ele + + return subscriptions + + +@protected("App Source") +@jingrow.whitelist() +def branches(name): + from jcloud.api.github import branches as git_branches + + app_source = jingrow.db.get_value( + "App Source", + name, + ["github_installation_id", "repository_owner", "repository"], + as_dict=True, + ) + installation_id = app_source.github_installation_id + repo_owner = app_source.repository_owner + repo_name = app_source.repository + + return git_branches(repo_owner, repo_name, installation_id) + + +@protected("Marketplace App") +@jingrow.whitelist() +def change_branch(name, source, version, to_branch): + app = jingrow.get_pg("Marketplace App", name) + app.change_branch(source, version, to_branch) + + +@protected("Marketplace App") +@jingrow.whitelist() +def options_for_version(name): + jingrow_version = jingrow.get_all("Jingrow Version", {"public": True}, pluck="name") + added_versions = jingrow.get_all("Marketplace App Version", {"parent": name}, pluck="version") + app = jingrow.db.get_value("Marketplace App", name, "app") + source = jingrow.get_value("App Source", {"app": app, "team": get_current_team()}) + branches_list = branches(source) + versions = list(set(jingrow_version).difference(set(added_versions))) + branches_list = [branch["name"] for branch in branches_list] + + return [{"version": version, "branch": branches_list} for version in versions] + + +@protected("Marketplace App") +@jingrow.whitelist() +def add_version(name, branch, version): + app = jingrow.get_pg("Marketplace App", name) + app.add_version(version, branch) + + +@protected("Marketplace App") +@jingrow.whitelist() +def remove_version(name, version): + app = jingrow.get_pg("Marketplace App", name) + app.remove_version(version) + + +@protected("Marketplace App") +@jingrow.whitelist() +def review_steps(name): + app = jingrow.get_pg("Marketplace App", name) + return [ + {"step": "Add a logo for your app", "completed": bool(app.image)}, + { + "step": "Add links", + "completed": ( + bool( + app.website + and app.support + and app.documentation + and app.terms_of_service + and app.privacy_policy + ) + ), + }, + { + "step": "Update description and long description", + "completed": (bool(app.description.strip() and app.long_description.strip() != "

")), + }, + { + "step": "Publish a release for version", + "completed": ( + bool( + jingrow.db.exists("App Release Approval Request", {"marketplace_app": name}) + or jingrow.db.exists("App Release", {"app": name, "status": "Approved"}) + ) + ), + }, + ] + + +@protected("Marketplace App") +@jingrow.whitelist() +def mark_app_ready_for_review(name): + app = jingrow.get_pg("Marketplace App", name) + app.mark_app_ready_for_review() + + +@protected("Marketplace App") +@jingrow.whitelist() +def communication(name): + comm = jingrow.qb.PageType("Communication") + user = jingrow.qb.PageType("User") + query = ( + jingrow.qb.from_(comm) + .left_join(user) + .on(comm.sender == user.email) + .select(comm.sender, comm.content, comm.communication_date, user.user_image) + .where((comm.reference_pagetype == "Marketplace App") & (comm.reference_name == name)) + .orderby(comm.creation, order=jingrow.qb.desc) + ) + res = query.run(as_dict=True) + return res # noqa: RET504 + + +@protected("Marketplace App") +@jingrow.whitelist() +def add_reply(name, message): + pagetype = "Marketplace App" + app = jingrow.get_pg(pagetype, name) + recipients = ", ".join(list(app.get_assigned_users()) or []) + pg = jingrow.get_pg( + { + "pagetype": "Communication", + "communication_type": "Communication", + "communication_medium": "Email", + "reference_pagetype": pagetype, + "reference_name": name, + "subject": f"Marketplace App Review: {name}, New message!", + "sender": jingrow.session.user, + "content": message, + "is_notification": True, + "recipients": recipients, + } + ) + pg.insert(ignore_permissions=True) + pg.send_email() + + +@protected("Marketplace App") +@jingrow.whitelist() +def fetch_readme(name): + app: MarketplaceApp = jingrow.get_pg("Marketplace App", name) + app.long_description = app.fetch_readme() + app.save() + + +@jingrow.whitelist(allow_guest=True) +def get_marketplace_apps(): + apps = jingrow.cache().get_value("marketplace_apps") + if not apps: + apps = jingrow.get_all("Marketplace App", {"status": "Published"}, ["name", "title", "route"]) + jingrow.cache().set_value("marketplace_apps", apps, expires_in_sec=60 * 60 * 24 * 7) + return apps + + +@protected("App Source") +@jingrow.whitelist() +def add_code_review_comment(name, filename, line_number, comment): + try: + pg = jingrow.get_pg("App Release Approval Request", name) + # Add a new comment + pg.append( + "code_comments", + { + "filename": filename, + "line_number": line_number, + "comment": comment, + "commented_by": jingrow.session.user, + "time": jingrow.utils.now_datetime(), + }, + ) + + pg.save() + return {"status": "success", "message": "Comment added successfully."} + except Exception as e: + jingrow.throw(f"Unable to add comment. Something went wrong: {e!s}") diff --git a/jcloud/api/monitoring.py b/jcloud/api/monitoring.py new file mode 100644 index 0000000..feb91bb --- /dev/null +++ b/jcloud/api/monitoring.py @@ -0,0 +1,122 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +from itertools import groupby + +import jingrow + +from jcloud.exceptions import AlertRuleNotEnabled +from jcloud.utils import log_error + + +def get_benches(): + self_hosted_stand_alone_servers = jingrow.get_all( + "Server", + {"is_standalone": True, "is_self_hosted": True, "status": "Active"}, + pluck="name", + ) + sites = jingrow.get_all( + "Site", + ["name", "bench"], + {"status": "Active", "server": ("not in", self_hosted_stand_alone_servers)}, + ignore_ifnull=True, + ) + sites.sort(key=lambda x: (x.bench, x.name)) + + bench_map = { + bench.name: bench + for bench in jingrow.get_all( + "Bench", + {"name": ("in", set(site.bench for site in sites))}, + ["name", "cluster", "server", "group"], + ignore_ifnull=True, + ) + } + benches = [] + for bench_name, _sites in groupby(sites, lambda x: x.bench): + bench = bench_map[bench_name] + bench.update({"sites": [site.name for site in _sites]}) + benches.append(bench) + + return benches + + +def get_clusters(): + servers = {} + servers["proxy"] = jingrow.get_all("Proxy Server", {"status": ("!=", "Archived")}, ["name", "cluster"]) + servers["app"] = jingrow.get_all("Server", {"status": ("!=", "Archived")}, ["name", "cluster"]) + servers["database"] = jingrow.get_all( + "Database Server", {"status": ("!=", "Archived")}, ["name", "cluster"] + ) + clusters = jingrow.get_all("Cluster") + job_map = { + "proxy": ["node", "nginx", "proxysql", "mariadb_proxy"], + "app": ["node", "nginx", "docker", "cadvisor", "gunicorn", "rq"], + "database": ["node", "mariadb"], + } + for cluster in clusters: + cluster["jobs"] = {} + + for server_type, server_type_servers in servers.items(): + for server in server_type_servers: + if server.cluster == cluster.name: + for job in job_map[server_type]: + cluster["jobs"].setdefault(job, []).append(server.name) + + return clusters + + +def get_domains(): + return jingrow.get_all( + "Site Domain", ["name", "site"], {"tls_certificate": ("is", "set")}, order_by="name" + ) + + +def get_tls(): + tls = [] + server_types = [ + "Server", + "Proxy Server", + "Database Server", + "Registry Server", + "Log Server", + "Monitor Server", + "Analytics Server", + "Trace Server", + ] + for server_type in server_types: + tls += jingrow.get_all(server_type, {"status": ("!=", "Archived")}, ["name"]) + + return tls + + +@jingrow.whitelist(allow_guest=True) +def targets(token): + monitor_token = jingrow.db.get_single_value("Jcloud Settings", "monitor_token") + if token != monitor_token: + return None + + return {"benches": get_benches(), "clusters": get_clusters(), "domains": get_domains(), "tls": get_tls()} + + +@jingrow.whitelist(allow_guest=True, xss_safe=True) +def alert(*args, **kwargs): + try: + user = str(jingrow.session.user) + jingrow.set_user("Administrator") + + pg = jingrow.get_pg( + { + "pagetype": "Alertmanager Webhook Log", + "payload": jingrow.request.get_data().decode(), + } + ) + pg.insert() + except AlertRuleNotEnabled: + pass + except Exception: + log_error("Alertmanager Webhook Error", args=args, kwargs=kwargs) + raise + finally: + jingrow.set_user(user) diff --git a/jcloud/api/notifications.py b/jcloud/api/notifications.py new file mode 100644 index 0000000..36f1d8d --- /dev/null +++ b/jcloud/api/notifications.py @@ -0,0 +1,75 @@ +import jingrow + +from jcloud.jcloud.pagetype.jcloud_role.jcloud_role import check_role_permissions +from jcloud.utils import get_current_team + + +@jingrow.whitelist() +def get_notifications(filters=None, order_by="creation desc", limit_start=None, limit_page_length=None): + if not filters: + filters = {} + + JcloudNotification = jingrow.qb.PageType("Jcloud Notification") + query = ( + jingrow.qb.from_(JcloudNotification) + .select( + JcloudNotification.name, + JcloudNotification.type, + JcloudNotification.read, + JcloudNotification.title, + JcloudNotification.message, + JcloudNotification.creation, + JcloudNotification.is_addressed, + JcloudNotification.is_actionable, + JcloudNotification.document_type, + JcloudNotification.document_name, + ) + .where(JcloudNotification.team == get_current_team()) + .orderby(JcloudNotification.creation, order=jingrow.qb.desc) + .limit(limit_page_length) + .offset(limit_start) + ) + + if roles := set(check_role_permissions("Site") + check_role_permissions("Release Group")): + JcloudRolePermission = jingrow.qb.PageType("Jcloud Role Permission") + + query = ( + query.join(JcloudRolePermission) + .on( + ( + (JcloudRolePermission.site == JcloudNotification.reference_name) + | (JcloudRolePermission.release_group == JcloudNotification.reference_name) + ) + & JcloudRolePermission.role.isin(roles) + ) + .distinct() + ) + + if filters.get("read") == "Unread": + query = query.where(JcloudNotification.read == 0) + + notifications = query.run(as_dict=True) + + for notification in notifications: + if notification.document_type == "Deploy Candidate": + rg_name = jingrow.db.get_value("Deploy Candidate", notification.document_name, "group") + notification.route = f"groups/{rg_name}/deploys/{notification.document_name}" + elif notification.document_type == "Agent Job": + site_name = jingrow.db.get_value("Agent Job", notification.document_name, "site") + notification.route = ( + f"sites/{site_name}/insights/jobs/{notification.document_name}" if site_name else None + ) + else: + notification.route = None + + return notifications + + +@jingrow.whitelist() +def mark_all_notifications_as_read(): + jingrow.db.set_value("Jcloud Notification", {"team": get_current_team()}, "read", 1, update_modified=False) + + +@jingrow.whitelist() +def get_unread_count(): + return jingrow.db.count("Jcloud Notification", {"read": False, "team": get_current_team()}) diff --git a/jcloud/api/oauth.py b/jcloud/api/oauth.py new file mode 100644 index 0000000..07dd171 --- /dev/null +++ b/jcloud/api/oauth.py @@ -0,0 +1,207 @@ +import json +import os + +import jingrow +from jingrow.core.utils import find +from jingrow.utils import get_url +from jingrow.utils.oauth import get_oauth2_authorize_url +from google.auth.transport.requests import Request +from google.oauth2 import id_token +from google.oauth2.credentials import Credentials +from google_auth_oauthlib.flow import Flow +from googleapiclient.discovery import build + +from jcloud.api.account import get_account_request_from_key, setup_account +from jcloud.api.saas import ( + check_subdomain_availability, + create_marketplace_subscription, + create_or_rename_saas_site, +) +from jcloud.jcloud.pagetype.site.saas_site import get_saas_domain +from jcloud.utils import log_error +from jcloud.utils.telemetry import capture, identify + +os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1" + + +def google_oauth_flow(): + config = jingrow.conf.get("google_oauth_config") + redirect_uri = config["web"].get("redirect_uris")[0] + flow = Flow.from_client_config( + client_config=config, + scopes=[ + "https://www.googleapis.com/auth/userinfo.profile", + "openid", + "https://www.googleapis.com/auth/userinfo.email", + ], + redirect_uri=redirect_uri, + ) + return flow + + +@jingrow.whitelist(allow_guest=True) +def google_login(saas_app=None): + flow = google_oauth_flow() + authorization_url, state = flow.authorization_url() + minutes = 5 + jingrow.cache().set_value( + f"fc_oauth_state:{state}", saas_app or state, expires_in_sec=minutes * 60 + ) + return authorization_url + + +@jingrow.whitelist(allow_guest=True) +def callback(code=None, state=None): + cached_key = f"fc_oauth_state:{state}" + cached_state = jingrow.cache().get_value(cached_key) + saas_app = cached_state in jingrow.db.get_all("Saas Settings", pluck="name") + jingrow.cache().delete_value(cached_key) + + if (state == cached_state) or (saas_app): + pass + else: + jingrow.local.response["http_status_code"] = 401 + return "Invalid state parameter. The session timed out. Please try again or contact Jingrow support at https://jingrow.com/support" + + try: + flow = google_oauth_flow() + flow.fetch_token(authorization_response=jingrow.request.url) + except Exception as e: + log_error("Google oauth Login failed", data=e) + jingrow.local.response.type = "redirect" + jingrow.local.response.location = "/dashboard/login" + + # id_info + token_request = Request() + id_info = id_token.verify_oauth2_token( + id_token=flow.credentials._id_token, + request=token_request, + audience=jingrow.conf.get("google_oauth_config")["web"]["client_id"], + ) + + email = id_info.get("email") + + # phone (this may return nothing if info doesn't exists) + number = "" + if flow.credentials.refresh_token: # returns only for the first authorization + credentials = Credentials.from_authorized_user_info( + json.loads(flow.credentials.to_json()) + ) + service = build("people", "v1", credentials=credentials) + person = ( + service.people().get(resourceName="people/me", personFields="phoneNumbers").execute() + ) + if person: + phone = person.get("phoneNumbers") + if phone: + number = phone[0].get("value") + + # saas signup + if saas_app and cached_state: + account_request = create_account_request( + email=email, + first_name=id_info.get("given_name"), + last_name=id_info.get("family_name"), + phone_number=number, + ) + + logo = jingrow.db.get_value("Saas Signup Generator", cached_state, "image_path") + jingrow.local.response.type = "redirect" + jingrow.local.response.location = get_url( + f"/saas-oauth.html?app={cached_state}&key={account_request.request_key}&domain={get_saas_domain(cached_state)}&logo={logo}" + ) + else: + # fc login or signup + if not jingrow.db.exists("User", email): + account_request = create_account_request( + email=email, + first_name=id_info.get("given_name"), + last_name=id_info.get("family_name"), + phone_number=number, + ) + jingrow.local.response.type = "redirect" + jingrow.local.response.location = ( + f"/dashboard/setup-account/{account_request.request_key}" + ) + # login + else: + jingrow.local.login_manager.login_as(email) + jingrow.local.response.type = "redirect" + jingrow.response.location = "/dashboard" + + +def create_account_request(email, first_name, last_name, phone_number=""): + account_request = jingrow.get_pg( + { + "pagetype": "Account Request", + "team": email, + "email": email, + "first_name": first_name, + "last_name": last_name, + "phone_number": phone_number, + "send_email": False, + "role": "Jcloud Admin", + "oauth_signup": True, + } + ).insert(ignore_permissions=True) + jingrow.db.commit() + + return account_request + + +@jingrow.whitelist(allow_guest=True) +def saas_setup(key, app, country, subdomain): + if not check_subdomain_availability(subdomain, app): + jingrow.throw(f"Subdomain {subdomain} is already taken") + + all_countries = jingrow.db.get_all("Country", pluck="name") + country = find(all_countries, lambda x: x.lower() == country.lower()) + if not country: + jingrow.throw("Country filed should be a valid country name") + + # create team and user + account_request = get_account_request_from_key(key) + if not jingrow.db.exists("Team", {"user": account_request.email}): + setup_account( + key=key, + first_name=account_request.first_name, + last_name=account_request.last_name, + country=country, + accepted_user_terms=True, + oauth_signup=True, + ) + + # create a signup account request + signup_ar = jingrow.get_pg( + { + "pagetype": "Account Request", + "team": account_request.team, + "email": account_request.email, + "first_name": account_request.first_name, + "last_name": account_request.last_name, + "emaill": account_request.email, + "saas": True, + "jerp": False, + "saas_app": app, + "role": "Jcloud Admin", + "country": country, + "subdomain": subdomain, + } + ).insert(ignore_permissions=True) + site_name = signup_ar.get_site_name() + identify( + site_name, + app=app, + oauth=True, + ) + capture("completed_oauth_account_request", "fc_saas", site_name) + create_or_rename_saas_site(app, signup_ar) + jingrow.set_user("Administrator") + create_marketplace_subscription(signup_ar) + + return get_url("/prepare-site?key=" + signup_ar.request_key + "&app=" + app) + + +@jingrow.whitelist(allow_guest=True) +def oauth_authorize_url(provider): + return get_oauth2_authorize_url(provider, None) diff --git a/jcloud/api/partner.py b/jcloud/api/partner.py new file mode 100644 index 0000000..66ea1e9 --- /dev/null +++ b/jcloud/api/partner.py @@ -0,0 +1,320 @@ +import jingrow +from jingrow.core.utils import find +from jingrow.utils import flt +from jingrow.utils.data import add_days, add_months, get_first_day, get_last_day, today + +from jcloud.utils import get_current_team + + +@jingrow.whitelist() +def approve_partner_request(key): + partner_request_pg = jingrow.get_pg("Partner Approval Request", {"key": key}) + if partner_request_pg and partner_request_pg.status == "Pending": + if partner_request_pg.approved_by_partner: + partner_request_pg.approved_by_jingrow = True + partner_request_pg.status = "Approved" + partner_request_pg.save(ignore_permissions=True) + partner_request_pg.reload() + + partner_email = jingrow.db.get_value("Team", partner_request_pg.partner, "partner_email") + jingrow.db.set_value( + "Team", + partner_request_pg.requested_by, + { + "partner_email": partner_email, + "partnership_date": jingrow.utils.getdate(partner_request_pg.creation), + }, + ) + + jingrow.db.commit() + + jingrow.response.type = "redirect" + jingrow.response.location = f"/app/partner-approval-request/{partner_request_pg.name}" + + +@jingrow.whitelist() +def get_partner_request_status(team): + return jingrow.db.get_value("Partner Approval Request", {"requested_by": team}, "status") + + +@jingrow.whitelist() +def update_partnership_date(team, partnership_date): + if team: + team_pg = jingrow.get_pg("Team", team) + team_pg.partnership_date = partnership_date + team_pg.save() + + +@jingrow.whitelist() +def get_partner_details(partner_email): + from jcloud.utils.billing import get_jingrow_io_connection + + client = get_jingrow_io_connection() + data = client.get_pg( + "Partner", + filters={"email": partner_email, "enabled": 1}, + fields=[ + "email", + "partner_type", + "company_name", + "custom_ongoing_period_fc_invoice_contribution", + "custom_fc_invoice_contribution", + "partner_name", + "custom_number_of_certified_members", + "end_date", + ], + ) + if data: + return data[0] + jingrow.throw("Partner Details not found") + return None + + +@jingrow.whitelist() +def get_partner_name(partner_email): + return jingrow.db.get_value( + "Team", + {"partner_email": partner_email, "enabled": 1, "jerp_partner": 1}, + "billing_name", + ) + + +@jingrow.whitelist() +def transfer_credits(amount, customer, partner): + # partner discount map + DISCOUNT_MAP = {"Entry": 0, "Bronze": 0.05, "Silver": 0.1, "Gold": 0.15} + + amt = jingrow.utils.flt(amount) + partner_pg = jingrow.get_pg("Team", partner) + credits_available = partner_pg.get_balance() + partner_level, certificates = partner_pg.get_partner_level() + discount_percent = DISCOUNT_MAP.get(partner_level) + + if credits_available < amt: + jingrow.throw(f"Insufficient Credits to transfer. Credits Available: {credits_available}") + + customer_pg = jingrow.get_pg("Team", customer) + credits_to_transfer = amt + amt -= amt * discount_percent + if customer_pg.currency != partner_pg.currency: + if partner_pg.currency == "USD": + credits_to_transfer = credits_to_transfer * 83 + else: + credits_to_transfer = credits_to_transfer / 83 + + try: + customer_pg.allocate_credit_amount( + credits_to_transfer, + "Transferred Credits", + f"Transferred Credits from {partner_pg.name}", + ) + partner_pg.allocate_credit_amount( + amt * -1, "Transferred Credits", f"Transferred Credits to {customer_pg.name}" + ) + jingrow.db.commit() + return amt + except Exception: + jingrow.throw("Error in transferring credits") + jingrow.db.rollback() + + +@jingrow.whitelist() +def get_partner_contribution_list(partner_email): + partner_currency = jingrow.db.get_value( + "Team", {"jerp_partner": 1, "partner_email": partner_email}, "currency" + ) + month_end = jingrow.utils.get_last_day(today()) + invoices = jingrow.get_all( + "Invoice", + {"partner_email": partner_email, "due_date": month_end, "type": "Subscription"}, + ["due_date", "customer_name", "total", "currency", "status"], + ) + for d in invoices: + if partner_currency != d.currency: + if partner_currency == "USD": + d.update({"partner_total": flt(d.total / 83, 2)}) + else: + d.update({"partner_total": flt(d.total * 83)}) + else: + d.update({"partner_total": d.total}) + return invoices + + +@jingrow.whitelist() +def get_total_partner_contribution(partner_email): + return + + +@jingrow.whitelist() +def get_current_month_partner_contribution(partner_email): + partner_currency = jingrow.db.get_value( + "Team", {"jerp_partner": 1, "partner_email": partner_email}, "currency" + ) + month_end = jingrow.utils.get_last_day(today()) + + invoice = jingrow.qb.PageType("Invoice") + query = ( + jingrow.qb.from_(invoice) + .select(invoice.total, invoice.currency, invoice.total_before_discount) + .where( + (invoice.partner_email == partner_email) + & (invoice.due_date == month_end) + & (invoice.type == "Subscription") + & (invoice.status == "Draft") + ) + ) + invoices = query.run(as_dict=True) + total = 0 + for d in invoices: + if partner_currency != d.currency: + if partner_currency == "USD": + total += flt(d.total_before_discount / 83, 2) + else: + total += flt(d.total_before_discount * 83, 2) + else: + total += d.total_before_discount + + return total + + +@jingrow.whitelist() +def get_prev_month_partner_contribution(partner_email): + partner_currency = jingrow.db.get_value( + "Team", {"jerp_partner": 1, "partner_email": partner_email}, "currency" + ) + first_day = get_first_day(today()) + two_weeks = add_days(first_day, 14) # 15th day of the month + last_month_end = get_last_day(add_months(today(), -1)) + + invoice = jingrow.qb.PageType("Invoice") + query = ( + jingrow.qb.from_(invoice) + .select(invoice.total, invoice.currency, invoice.total_before_discount) + .where( + (invoice.partner_email == partner_email) + & (invoice.due_date == last_month_end) + & (invoice.type == "Subscription") + ) + ) + + if today() >= first_day and jingrow.utils.getdate() <= jingrow.utils.getdate(two_weeks): + # till 15th of the current month unpaid invoices can also be counted in contribution + query = query.where((invoice.status).isin(["Unpaid", "Paid"])) + else: + query = query.where(invoice.status == "Paid") + + invoices = query.run(as_dict=True) + + total = 0 + for d in invoices: + total = 0 + if partner_currency != d.currency: + if partner_currency == "USD": + total += flt(d.total / 83, 2) + else: + total += flt(d.total * 83, 2) + else: + total += d.total + return total + + +@jingrow.whitelist() +def calculate_partner_tier(contribution, currency): + partner_tier = jingrow.qb.PageType("Partner Tier") + query = jingrow.qb.from_(partner_tier).select(partner_tier.name) + if currency == "CNY": + query = query.where(partner_tier.target_in_cny <= contribution).orderby( + partner_tier.target_in_cny, order=jingrow.qb.desc + ) + else: + query = query.where(partner_tier.target_in_usd <= contribution).orderby( + partner_tier.target_in_usd, order=jingrow.qb.desc + ) + + tier = query.run(as_dict=True) + return tier[0] + + +@jingrow.whitelist() +def add_partner(referral_code: str): + team = get_current_team(get_pg=True) + partner = jingrow.get_pg("Team", {"partner_referral_code": referral_code}).name + if jingrow.db.exists( + "Partner Approval Request", + {"partner": partner, "requested_by": team.name, "status": "Pending"}, + ): + return "Request already sent" + + pg = jingrow.get_pg( + { + "pagetype": "Partner Approval Request", + "partner": partner, + "requested_by": team.name, + "status": "Pending", + "send_mail": True, + } + ) + pg.insert(ignore_permissions=True) + return None + + +@jingrow.whitelist() +def validate_partner_code(code): + partner = jingrow.db.get_value( + "Team", + {"enabled": 1, "jerp_partner": 1, "partner_referral_code": code}, + "billing_name", + ) + if partner: + return True, partner + return False, None + + +@jingrow.whitelist() +def get_partner_customers(): + team = get_current_team(get_pg=True) + customers = jingrow.get_all( + "Team", + {"enabled": 1, "jerp_partner": 0, "partner_email": team.partner_email}, + ["name", "user", "payment_mode", "billing_name", "currency"], + ) + return customers # noqa: RET504 + + +@jingrow.whitelist() +def get_partner_members(partner): + from jcloud.utils.billing import get_jingrow_io_connection + + client = get_jingrow_io_connection() + return client.get_list( + "LMS Certificate", + filters={"partner": partner}, + fields=["member_name", "member_email"], + ) + + +@jingrow.whitelist() +def remove_partner(): + team = get_current_team(get_pg=True) + if team.payment_mode == "Paid By Partner": + jingrow.throw( + "Cannot remove partner from the team. Please change the payment mode to Prepaid Credits or Card" + ) + + partner_user = jingrow.get_value( + "Team", {"partner_email": team.partner_email, "jerp_partner": 1}, "user" + ) + member_to_remove = find(team.team_members, lambda x: x.user == partner_user) + if member_to_remove: + team.remove(member_to_remove) + team.partner_email = "" + team.save(ignore_permissions=True) + + +@jingrow.whitelist() +def get_local_payment_setup(): + team = get_current_team() + data = jingrow._dict() + data.mpesa_setup = jingrow.db.get_value("Mpesa Setup", {"team": team}, "mpesa_setup_id") or None + data.payment_gateway = jingrow.db.get_value("Payment Gateway", {"team": team}, "name") or None + return data diff --git a/jcloud/api/payment.py b/jcloud/api/payment.py new file mode 100644 index 0000000..9bf74b4 --- /dev/null +++ b/jcloud/api/payment.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import jingrow + + +@jingrow.whitelist() +def all(): + payments = jingrow.get_all( + "Payment", fields=["name"], filters={"user": jingrow.session.user} + ) + return payments diff --git a/jcloud/api/payment/__init__.py b/jcloud/api/payment/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/api/payment/alipay.py b/jcloud/api/payment/alipay.py new file mode 100644 index 0000000..cd3cf0c --- /dev/null +++ b/jcloud/api/payment/alipay.py @@ -0,0 +1,154 @@ +import jingrow +import traceback +import urllib.parse +import json +from alipay.aop.api.AlipayClientConfig import AlipayClientConfig +from alipay.aop.api.DefaultAlipayClient import DefaultAlipayClient +from alipay.aop.api.util.SignatureUtils import verify_with_rsa +from alipay.aop.api.request.AlipayTradePagePayRequest import AlipayTradePagePayRequest + +class AlipayAPI: + def __init__(self): + # 初始化时不立即加载设置 + self.client = None + self.alipay_client_config = None + self.return_url = None + self.notify_url = None + + def _initialize(self): + # 每次都重新加载配置,确保使用最新设置 + # 从 Jcloud Settings 获取配置 + settings = jingrow.get_single("Jcloud Settings") + + # 配置支付宝客户端 + self.alipay_client_config = AlipayClientConfig() + self.alipay_client_config.server_url = settings.alipay_server_url + self.alipay_client_config.app_id = settings.alipay_app_id + self.alipay_client_config.app_private_key = settings.alipay_app_private_key + self.alipay_client_config.alipay_public_key = settings.alipay_public_key + + # 存储默认回调URL + self.return_url = settings.alipay_return_url + self.notify_url = settings.alipay_notify_url + + # 初始化支付宝客户端 + self.client = DefaultAlipayClient(alipay_client_config=self.alipay_client_config) + + def generate_payment_url(self, order_id, amount, subject, team_name, return_url=None, notify_url=None): + self._initialize() + + request = AlipayTradePagePayRequest() + # 使用传入的URL或默认值 + request.return_url = return_url or self.return_url + request.notify_url = notify_url or self.notify_url + + # 构建请求内容 + biz_content = { + "out_trade_no": order_id, + "total_amount": str(amount), + "subject": subject, + "product_code": "FAST_INSTANT_TRADE_PAY", + "passback_params": urllib.parse.quote(team_name) # 传递team_name并URL编码 + } + + # 直接赋值字典,不要进行JSON序列化 + request.biz_content = biz_content + + # 生成支付请求链接 + response = self.client.page_execute(request, http_method="GET") + return response + + def verify_signature(self, data): + """验证支付宝签名 + + Args: + data: 支付宝回调数据 + + Returns: + bool: 签名验证是否成功 + """ + self._initialize() + + try: + signature = data.get('sign') + # 使用辅助函数生成排序后的参数字符串 + org_message = get_dic_sorted_params(data) + + # 将字符串转换为字节用于验证 + message = bytes(org_message, encoding='utf-8') + + # 使用支付宝公钥验证签名 + result = verify_with_rsa( + public_key=self.alipay_client_config.alipay_public_key, + message=message, + sign=signature + ) + + if result: + jingrow.log_error("支付宝签名验证成功", "Alipay Signature") + else: + jingrow.log_error("支付宝签名验证失败", "Alipay Signature") + + return result + except Exception as e: + error_message = ( + "支付宝签名验证失败:\n" + f"异常信息: {str(e)}\n" + f"堆栈信息:\n{traceback.format_exc()}\n" + f"回调数据: {data}\n" + ) + jingrow.log_error(error_message, "Alipay Signature Error") + return False + +@jingrow.whitelist(allow_guest=True) +def alipay_notify(): + """支付宝回调接口""" + try: + # 验证签名 + api = AlipayAPI() + if api.verify_signature(jingrow.form_dict): + order_id = jingrow.form_dict['out_trade_no'] + trade_no = jingrow.form_dict['trade_no'] + trade_status = jingrow.form_dict['trade_status'] + + if trade_status == 'TRADE_SUCCESS': + # 更新订单状态 + order = jingrow.get_pg("Website Subscription", order_id) + order.payment_status = "Paid" + order.alipay_trade_no = trade_no + order.save() + + # 开通网站 (付款成功后开通) + if order.website_status != "Active": + order.website_status = "Active" + order.save() + jingrow.db.commit() + jingrow.log_error(f"网站 {order_id} 已成功激活", "Alipay Payment Success") + + return "success" + else: + jingrow.log_error("支付宝回调验证签名失败", "Alipay Notification") + except Exception as e: + jingrow.log_error(f"处理支付宝回调时出错: {str(e)}\n{traceback.format_exc()}", "Alipay Notification Error") + + return "fail" + +# 接收字典类型参数,去除sign、sign_type字段,转换成升序字符串 +def get_dic_sorted_params(org_dic_params): + content = '' + + # 创建一个副本,确保不修改原始字典 + params = org_dic_params.copy() + + # 移除签名相关字段 + params.pop('sign', None) + params.pop('sign_type', None) + + # 按键名升序排序 + new_list = sorted(params) + for i in new_list: + p = f"{i}={params.get(i)}&" + content += p + + sorted_params = content.rstrip('&') + return sorted_params diff --git a/jcloud/api/payment/apiclient_key.pem b/jcloud/api/payment/apiclient_key.pem new file mode 100644 index 0000000..6751a2f --- /dev/null +++ b/jcloud/api/payment/apiclient_key.pem @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCuZUBW9O+odKC5 +i0WTCXMbXV4hZead0ylvxAAvYeyjuJoOONsfJQ6ZPxp8AQYDtN3kh8Goz9beTk88 +7Zv3heKkF2edkBtjrQBpEaJlI+vSUCmbE8L5FrNklCbmw5mbUdsMRYD8rB5NJSG/ +ql1lUo3ivDvrBTPF5fie5VhhdGScSQ4L9JxH8z2oisBilr7HXRg6EqVxhOk60KfU +b0b7rbZiFFcN7bYgX3YzPkzA4eFMTCUvTftwkOW5phJCgN5lsLL/plGpUkkCQ+PX +rrbxZ/hkS2r/N9ZKG8ZmJegfn20Z9j8vRZtN85wgvzUrfPxb3KANue8vfSuHmbkd +fNgkU5d3AgMBAAECggEARiWx+IgzMgYfdCKCDL+7TiGDMFnlpkv+mdcz39s6rSwe +w2O5N9DXC2sngwOCYNFKg5bbncRkAC1tj+tfVfPDkw9a/TSu36wBw0F1mcsWGxVl +dtw/C6RG6kjxUNcSnTgSrN/0Hy44Tf2Gkqq7QHsxWScvzrvb9FkpZPiJUUHZCkdq +Y0I54egc/ADeHn1tx013BbfTXqi42TxWpjqeEY277c6RkRuYjRZ/+HJEkqN/zTgy +ZESYVFARu0Y6t3AtX1rfbWzToE4qgEGFJUHWm3EKy9aaqZlDHnYTFYeLcZwX6O4G +iL05l3np5+xw5gnQa3gNrHD1YCfltyAM/HAhWskOaQKBgQDVKvkztGwNiruFgJcs +jCz8YDSsj93/Ga8j4W0iiI7+ke42rYnQTNTCVkZE9Gd5Ood2/myaHqX7i4wcfpMl +WzzzLaGXTkmzh4zEgTuEBk5QMWFdF8/MyDJCXcLaJ6EIk/cn8q+2uSrjoSDIqH05 +vfBzbvUUXS0jeUJynGF69ifjCwKBgQDRb+NIwr6aDlm7SsS1VJwa7TuglNqh0cIu +ONIHlgPBNViBUWnuiltZgnUD0WkqJx0gCrmJ3Znw/ny5ZLDVrx1l/OG6VbuxC7IC +pxLQvnN0i6l/kqqLADc18C4jSxQaJOw6FUlB99WnB4j5Km9FVBU0Bm5eGRYG62ld +038e4hGgxQKBgBVCW+gULlKLa3g9AtamhDYHQjmTf4hZf2YMrV2sUNaCp0c8t2nW +v8FB6XAnD8q/U5NCfIJCXDtiB28wEGzBVKrL+jmkzSTid9+BI/ZeyqCkzBkEruy1 +ao3D3BL1sNGyMAwD4BJRFT3ua96XLyX7E2fvwPFu+Xl4NnBHuFETC5dRAoGBAISN +MeYtCwDpsDTvf0vcfugz2Rcq3qyn2yPx5kvS0BGKN7ew1UCZ5HLkNDb0ZIC/PJm4 +7/CnhfSWvE2BwsGbERopUJvkAuRsqH8xSqE+yEes1Nve8X0D5+2FU04XCGP7ll60 +dicAOgJ6rGA2UTIpoznIEDYR3nUGrSsq3junjMyZAoGAbTLP+h/pwdZEC/duFtee +ciw1Lav/KMaAJHAFtpuK8jXk6Yt75TJovVZsh18LzXa1HqfMwiqePfNppqkvYjr2 +tfsV4H9Gz4Y/GH11JTRowA88tqnJq5D4lwuVBehezREjsClXOLsn2tIOR2StPLuY +o14hLjeO7uQFuDBJxuSz/SA= +-----END PRIVATE KEY----- diff --git a/jcloud/api/payment/payment.py b/jcloud/api/payment/payment.py new file mode 100644 index 0000000..dbec7ca --- /dev/null +++ b/jcloud/api/payment/payment.py @@ -0,0 +1,35 @@ +import jingrow + +@jingrow.whitelist() +def create_payment_link(payment_method, order_id, amount, subject, user_id=None): + """创建支付链接 + + Args: + payment_method: 支付方式 (alipay/wechatpay) + order_id: 订单ID + amount: 金额 + subject: 订单主题 + user_id: 用户ID (可选) + + Returns: + 支付链接或二维码链接 + """ + if not user_id: + user_id = jingrow.session.user + + try: + if payment_method == "alipay": + from jcloud.api.payment.alipay import AlipayAPI + api = AlipayAPI() + return api.generate_payment_url(order_id, amount, subject, user_id) + + elif payment_method == "wechatpay": + from jcloud.api.payment.wechatpay import WeChatPayAPI + api = WeChatPayAPI() + return api.generate_payment_url(order_id, amount, subject, user_id) + + else: + jingrow.throw(f"不支持的支付方式: {payment_method}") + except Exception as e: + jingrow.log_error(f"创建支付链接失败: {str(e)}", f"{payment_method} Payment Error") + jingrow.throw(f"创建支付链接失败: {str(e)}") diff --git a/jcloud/api/payment/wechatpay.py b/jcloud/api/payment/wechatpay.py new file mode 100644 index 0000000..3eaadcc --- /dev/null +++ b/jcloud/api/payment/wechatpay.py @@ -0,0 +1,68 @@ +import json +import jingrow +from wechatpayv3 import WeChatPay, WeChatPayType + +class WeChatPayAPI: + def __init__(self): + # 从 Jcloud Settings 获取配置 + settings = jingrow.get_single("Jcloud Settings") + + # 存储支付回调URL + self.notify_url = settings.wechatpay_notify_url + + # 微信支付参数 + self.mchid = settings.wechatpay_mchid + self.private_key = self.load_key('/home/jingrow/jingrow-bench/apps/jcloud/jcloud/api/payment/apiclient_key.pem') + self.cert_serial_no = settings.wechatpay_cert_serial_no + self.apiv3_key = settings.wechatpay_apiv3_key + self.appid = settings.wechatpay_appid + + # 加载微信支付平台公钥 + self.public_key = self.load_key('/home/jingrow/jingrow-bench/apps/jcloud/jcloud/api/payment/wechatpay_public_key.pem') + self.public_key_id = settings.wechatpay_public_key_id + # 配置选项,直接定义默认值 + self.partner_mode = False # 默认直连商户模式 + self.proxy = None # 默认无代理 + self.timeout = (1, 2) # 设置连接超时为10秒,读取超时为30秒 + + # 日志记录器 + self.logger = jingrow.logger("wechatpay", file_count=5) + + # 初始化微信支付 + self.wxpay = WeChatPay( + wechatpay_type=WeChatPayType.NATIVE, + mchid=self.mchid, + private_key=self.private_key, + cert_serial_no=self.cert_serial_no, + apiv3_key=self.apiv3_key, + appid=self.appid, + notify_url=self.notify_url, + logger=self.logger, + partner_mode=self.partner_mode, + proxy=self.proxy, + timeout=self.timeout, + public_key=self.public_key, + public_key_id=self.public_key_id + ) + + def load_key(self, filename): + with open(filename, 'r') as f: + return f.read() + + def generate_payment_url(self, order_id, amount, subject, team_name, notify_url=None): + # 使用传入的通知URL或默认的通知URL + callback_url = notify_url or self.notify_url + + try: + code, message = self.wxpay.pay( + description=subject, + out_trade_no=order_id, + amount={'total': int(round(float(amount) * 100))}, + pay_type=WeChatPayType.NATIVE, + attach=str(team_name) + ) + if code in range(200, 300): + result = json.loads(message) + return result.get('code_url') + except Exception as e: + jingrow.throw(f"微信支付URL生成失败: {str(e)}") diff --git a/jcloud/api/payment/wechatpay_public_key.pem b/jcloud/api/payment/wechatpay_public_key.pem new file mode 100644 index 0000000..3033bbe --- /dev/null +++ b/jcloud/api/payment/wechatpay_public_key.pem @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp9zXz4TFVqwyXP4j4lZu +XVyVVs/kCkrwIsYpPJiKvIYPAbIOR5zK/j2IYhQumMKtbRi+cy55BBIT2CTHZcsg +kT7bFfl+T2IYvEUjJ+8YXrsuKNg2S/v/KrdxU8qPeuFBHVbG++iXCHxKKm3uOQe6 +8MguzlwNr2RXtQR+JkR4qxAvxWl63Js0WaUxhbGwZ/UWonOq3fmQ7qAE1UXxrao7 +TT3d/W4c3Uo5mGq71w0laZiyrjI+PfVzif3gICkzhYJiCvOj2WhPjQaX6HjAV2Gh +BLeoSp2h5GzPD8mD0O2ZxCu0ctlzPveFhu2snBqjSsq/GgtxF1LsJW6oj91S9LLn +AQIDAQAB +-----END PUBLIC KEY----- diff --git a/jcloud/api/product_trial.py b/jcloud/api/product_trial.py new file mode 100644 index 0000000..8b720ce --- /dev/null +++ b/jcloud/api/product_trial.py @@ -0,0 +1,191 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import random + +import jingrow +import jingrow.utils +from jingrow.rate_limiter import rate_limit + +from jcloud.api.account import get_account_request_from_key +from jcloud.jcloud.pagetype.team.team import Team +from jcloud.saas.pagetype.product_trial.product_trial import send_verification_mail_for_login +from jcloud.utils.telemetry import capture + + +def _get_active_site(product: str, team: str | None) -> str | None: + if team is None: + return None + product_trial_linked_sites = jingrow.get_all( + "Product Trial Request", + {"product_trial": product, "team": team, "status": ["not in", ["Pending", "Error", "Expired"]]}, + pluck="site", + ) + if not product_trial_linked_sites: + return None + existing_sites = jingrow.get_all( + "Site", + { + "name": ["in", product_trial_linked_sites], + "status": ["!=", "Archived"], + }, + pluck="name", + limit=1, + ) + if len(existing_sites) > 0: + return existing_sites[0] + return None + + +@jingrow.whitelist(allow_guest=True) +def send_verification_code_for_login(email: str, product: str): + is_user_exists = jingrow.db.exists("Team", {"user": email}) and _get_active_site( + product, jingrow.db.get_value("Team", {"user": email}, "name") + ) + if not is_user_exists: + jingrow.throw("You have no active sites for this product. Please try signing up.") + # generate otp and store in redis + otp = random.randint(100000, 999999) + jingrow.cache.set_value( + f"product_trial_login_verification_code:{email}", + jingrow.utils.sha256_hash(str(otp)), + expires_in_sec=300, + ) + + send_verification_mail_for_login(email, product, otp) + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=10, seconds=300) +def login_using_code(email: str, product: str, code: str): + team_exists = jingrow.db.exists("Team", {"user": email}) + site = _get_active_site(product, jingrow.db.get_value("Team", {"user": email}, "name")) + if not team_exists: + jingrow.throw("You have no active sites for this product. Please try signing up.") + + # check if team has 2fa enabled and active + team = jingrow.get_value("Team", {"user": email}, ["name", "enforce_2fa", "enabled"], as_dict=True) + if not team.enabled: + jingrow.throw("Your account is disabled. Please contact support.") + if team.enforce_2fa: + jingrow.throw("Your account has 2FA enabled. Please go to jingrow.com to login.") + + # validate code + code_hash_from_cache = jingrow.cache.get_value(f"product_trial_login_verification_code:{email}") + if not code_hash_from_cache: + jingrow.throw("OTP has expired. Please try again.") + if jingrow.utils.sha256_hash(str(code)) != code_hash_from_cache: + jingrow.throw("Invalid OTP. Please try again.") + + # remove code from cache + jingrow.cache.delete_value(f"product_trial_login_verification_code:{email}") + + # login as user + jingrow.set_user(email) + jingrow.local.login_manager.login_as(email) + + # send the product trial request name + return jingrow.get_value( + "Product Trial Request", + {"product_trial": product, "team": team.name, "site": site}, + pluck="name", + ) + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60) +def get_account_request_for_product_signup(): + return jingrow.db.get_value("Account Request", {"email": jingrow.session.user}, "name") + + +@jingrow.whitelist(allow_guest=True, methods=["POST"]) +def setup_account(key: str, country: str | None = None): + ar = get_account_request_from_key(key) + if not ar: + jingrow.throw("Invalid or Expired Key") + if not ar.product_trial: + jingrow.throw("Invalid Product Trial") + + if country: + ar.country = country + ar.save(ignore_permissions=True) + + if not ar.country: + jingrow.throw("Please provide a valid country name") + + jingrow.set_user("Administrator") + # check if team already exists + if jingrow.db.exists("Team", {"user": ar.email}): + # Update first name and last name + team = jingrow.get_pg("Team", {"user": ar.email}) + team.first_name = ar.first_name + team.last_name = ar.last_name + team.save(ignore_permissions=True) + # create team + else: + # check if user exists + is_user_exists = jingrow.db.exists("User", ar.email) + team = Team.create_new( + account_request=ar, + first_name=ar.first_name, + last_name=ar.last_name, + country=ar.country, + is_us_eu=ar.is_us_eu, + user_exists=is_user_exists, + ) + # Telemetry: Created account + capture("completed_signup", "fc_saas", ar.email) + # login + jingrow.set_user(ar.email) + jingrow.local.login_manager.login_as(ar.email) + if _get_active_site(ar.product_trial, team.name): + return { + "account_request": ar.name, + "location": f"/dashboard/saas/{ar.product_trial}/login-to-site?account_request={ar.name}", + } + + return { + "account_request": ar.name, + "location": f"/dashboard/saas/{ar.product_trial}/setup?account_request={ar.name}", + } + + +def _get_existing_trial_request(product: str, team: str): + return jingrow.get_value( + "Product Trial Request", + {"team": team, "status": ["not in", ["Error", "Expired", "Site Created"]], "product_trial": product}, + ["name", "site"], + as_dict=True, + ) + + +@jingrow.whitelist(methods=["POST"]) +def get_request(product: str, account_request: str | None = None): + team = jingrow.local.team() + + # validate if there is already a site + if site := _get_active_site(product, team.name): + site_request = jingrow.get_pg( + "Product Trial Request", {"product_trial": product, "team": team, "site": site} + ) + elif request := _get_existing_trial_request(product, team.name): + site_request = jingrow.get_pg("Product Trial Request", request.name) + else: + # check if account request is valid + is_valid_account_request = jingrow.get_value("Account Request", account_request, "email") == team.user + # create a new one + site_request = jingrow.new_pg( + "Product Trial Request", + product_trial=product, + team=team.name, + account_request=account_request if is_valid_account_request else None, + ).insert(ignore_permissions=True) + + return { + "name": site_request.name, + "site": site_request.site, + "product_trial": site_request.product_trial, + "status": site_request.status, + } diff --git a/jcloud/api/regional_payments/mpesa/utils.py b/jcloud/api/regional_payments/mpesa/utils.py new file mode 100644 index 0000000..b89c2b7 --- /dev/null +++ b/jcloud/api/regional_payments/mpesa/utils.py @@ -0,0 +1,484 @@ +import json + +import jingrow +import requests +from jingrow import _ +from jingrow.query_builder import PageType +from jingrow.utils.password import get_decrypted_password + +from jcloud.utils import get_current_team + +supported_mpesa_currencies = ["KES"] + + +@jingrow.whitelist() +def update_mpesa_setup(mpesa_details): + """Create Mpesa Settings for the team.""" + mpesa_info = jingrow._dict(mpesa_details) + team = get_current_team() + try: + if not jingrow.db.exists("Mpesa Setup", {"team": team}): + mpesa_setup = jingrow.get_pg( + { + "pagetype": "Mpesa Setup", + "team": team, + "mpesa_setup_id": mpesa_info.mpesa_setup_id, + "api_type": "Mpesa Exjcloud", + "consumer_key": mpesa_info.consumer_key, + "consumer_secret": mpesa_info.consumer_secret, + "business_shortcode": mpesa_info.short_code, + "till_number": mpesa_info.till_number, + "pass_key": mpesa_info.pass_key, + "security_credential": mpesa_info.security_credential, + "initiator_name": mpesa_info.initiator_name, + "sandbox": 1 if mpesa_info.sandbox else 0, + } + ) + + mpesa_setup.insert(ignore_permissions=True) + else: + mpesa_setup = jingrow.get_pg("Mpesa Setup", {"team": team}) + mpesa_setup.mpesa_setup_id = mpesa_info.mpesa_setup_id + mpesa_setup.consumer_key = mpesa_info.consumer_key + mpesa_setup.consumer_secret = mpesa_info.consumer_secret + mpesa_setup.business_shortcode = mpesa_info.short_code + mpesa_setup.till_number = mpesa_info.till_number + mpesa_setup.pass_key = mpesa_info.pass_key + mpesa_setup.security_credential = mpesa_info.security_credential + mpesa_setup.initiator_name = mpesa_info.initiator_name + mpesa_setup.sandbox = 1 if mpesa_info.sandbox else 0 + mpesa_setup.save() + mpesa_setup.reload() + + return mpesa_setup.name + except Exception as e: + jingrow.log_error( + message=f"Error creating Mpesa Settings: {e!s}", title="MPesa Settings Creation Error" + ) + return None + + +@jingrow.whitelist() +def fetch_mpesa_setup(): + team = get_current_team() + if jingrow.db.exists("Mpesa Setup", {"team": team}): + mpesa_setup = jingrow.get_pg("Mpesa Setup", {"team": team}) + return { + "mpesa_setup_id": mpesa_setup.mpesa_setup_id, + "consumer_key": mpesa_setup.consumer_key, + "consumer_secret": mpesa_setup.consumer_secret, + "business_shortcode": mpesa_setup.business_shortcode, + "till_number": mpesa_setup.till_number, + "pass_key": mpesa_setup.pass_key, + "initiator_name": mpesa_setup.initiator_name, + "security_credential": mpesa_setup.security_credential, + "api_type": mpesa_setup.api_type, + } + return None + + +@jingrow.whitelist() +def display_invoices_by_partner(): + """Display the list of invoices by partner.""" + team = get_current_team() + invoices = jingrow.get_all( + "Mpesa Payment Record", + filters={"team": team}, + fields=[ + "name", + "posting_date", + "amount", + "local_invoice", + "payment_partner", + "amount_usd", + "exchange_rate", + ], + ) + return invoices # noqa: RET504 + + +@jingrow.whitelist() +def get_exchange_rate(from_currency, to_currency): + """Get the latest exchange rate for the given currencies.""" + exchange_rate = jingrow.db.get_value( + "Currency Exchange", + {"from_currency": from_currency, "to_currency": to_currency}, + "exchange_rate", + order_by="creation DESC", + ) + return exchange_rate # noqa: RET504 + + +@jingrow.whitelist() +def update_payment_gateway_settings(gateway_details): + """Create Payment Gateway Settings for the team.""" + team = get_current_team() + gateway_data = jingrow._dict(gateway_details) + + try: + if jingrow.db.exists("Payment Gateway", {"team": team}): + payment_gateway = jingrow.get_pg("Payment Gateway", {"team": team}) + payment_gateway.update( + { + "gateway": gateway_data.gateway_name, + "currency": gateway_data.currency, + "gateway_settings": gateway_data.gateway_setting, + "gateway_controller": gateway_data.gateway_controller, + "url": gateway_data.url, + "api_key": gateway_data.api_key, + "api_secret": gateway_data.api_secret, + "taxes_and_charges": gateway_data.taxes_and_charges, + "print_format": gateway_data.print_format, + } + ) + return payment_gateway.save().name + payment_gateway_settings = jingrow.get_pg( + { + "pagetype": "Payment Gateway", + "team": team, + "gateway": gateway_data.gateway_name, + "currency": gateway_data.currency, + "gateway_settings": gateway_data.gateway_setting, + "gateway_controller": gateway_data.gateway_controller, + "url": gateway_data.url, + "api_key": gateway_data.api_key, + "api_secret": gateway_data.api_secret, + "taxes_and_charges": gateway_data.taxes_and_charges, + "print_format": gateway_data.print_format, + } + ) + + payment_gateway_settings.insert(ignore_permissions=True) + return payment_gateway_settings + except Exception as e: + jingrow.log_error( + message=f"Error creating Payment Gateway Settings: {e!s}", + title="Payment Gateway Settings Creation Error", + ) + return None + + +@jingrow.whitelist() +def get_payment_gateway_details(): + team = get_current_team() + if jingrow.db.exists("Payment Gateway", {"team": team}): + payment_gateway = jingrow.get_pg("Payment Gateway", {"team": team}) + return { + "gateway_name": payment_gateway.gateway, + "currency": payment_gateway.currency, + "gateway_settings": payment_gateway.gateway_settings, + "gateway_controller": payment_gateway.gateway_controller, + "url": payment_gateway.url, + "api_key": payment_gateway.api_key, + "api_secret": payment_gateway.api_secret, + "taxes_and_charges": payment_gateway.taxes_and_charges, + "print_format": payment_gateway.print_format, + } + return None + + +@jingrow.whitelist() +def get_gateway_controller(): + # """Get the list of controllers for the given pagetype.""" + team = get_current_team(get_pg=True) + gateway_setting = "Mpesa Setup" if team.country == "Kenya" else None + if gateway_setting: + return jingrow.db.get_value(gateway_setting, {"team": team.name}, "name") + return None + + +@jingrow.whitelist() +def get_tax_percentage(payment_partner): + team = jingrow.db.get_value("Team", {"user": payment_partner}, "name") + mpesa_setups = jingrow.get_all( + "Mpesa Setup", filters={"api_type": "Mpesa Exjcloud", "team": team}, fields=["name"] + ) + taxes_and_charges = 0 + for mpesa_setup in mpesa_setups: + payment_gateways = jingrow.get_all( + "Payment Gateway", + filters={"gateway_settings": "Mpesa Setup", "gateway_controller": mpesa_setup}, + fields=["taxes_and_charges"], + ) + if payment_gateways: + taxes_and_charges = payment_gateways[0].taxes_and_charges + break # we don't need the loop entirely + return taxes_and_charges + + +def update_tax_id_or_phone_no(team, tax_id, phone_number): + """Update the tax ID or phone number for the team, only if they are different from existing values.""" + team_pg = jingrow.get_pg("Team", team) + + # Check if updates are needed + new_tax_id = tax_id and team_pg.mpesa_tax_id != tax_id + new_phone_number = phone_number and team_pg.mpesa_phone_number != phone_number + + # Update only if at least one value needs updating + if new_tax_id or new_phone_number: + if tax_id: + team_pg.mpesa_tax_id = tax_id + if phone_number: + team_pg.mpesa_phone_number = phone_number + team_pg.save() + + +@jingrow.whitelist() +def display_mpesa_payment_partners(): + """Display the list of partners in the system with Mpesa integration enabled.""" + + Team = PageType("Team") + MpesaSetup = PageType("Mpesa Setup") + + query = ( + jingrow.qb.from_(Team) + .join(MpesaSetup) + .on(Team.name == MpesaSetup.team) + .select(Team.user) + .where(Team.country == "Kenya") # (MpesaSetup.sandbox == 1) + ) + + mpesa_partners = query.run(as_dict=True) + + return [partner["user"] for partner in mpesa_partners] + + +@jingrow.whitelist() +def display_payment_partners(): + """Display the list of partners in the system.""" + Team = PageType("Team") + query = jingrow.qb.from_(Team).select(Team.user).where(Team.jerp_partner == 1) + + partners = query.run(as_dict=True) + + return [partner["user"] for partner in partners] + + +@jingrow.whitelist() +def display_payment_gateway(): + """Display the payment gateway for the partner.""" + gateways = jingrow.get_all("Payment Gateway", filters={}, fields=["gateway"]) + return [gateway["gateway"] for gateway in gateways] + + +def get_details_from_request_log(transaction_id): + """Get the team and partner associated with the Mpesa Request Log.""" + request_log = jingrow.get_pg("Mpesa Request Log", {"request_id": transaction_id, "status": "Queued"}) + request_data = request_log.data + team = partner = None + # Parse the request_data as a dictionary + if request_data: + try: + request_data_dict = json.loads(request_data) + team = request_data_dict.get("team") + partner_ = request_data_dict.get("partner") + partner = jingrow.get_value("Team", {"user": partner_, "jerp_partner": 1, "enabled": 1}, "name") + requested_amount = request_data_dict.get("request_amount") + amount_usd = request_data_dict.get("amount_usd") + exchange_rate = request_data_dict.get("exchange_rate") + except json.JSONDecodeError: + jingrow.throw(_("Invalid JSON format in request_data")) + team = None + partner = None + + return jingrow._dict( + { + "team": team, + "partner": partner, + "requested_amount": requested_amount, + "amount_usd": amount_usd, + "exchange_rate": exchange_rate, + } + ) + + +def get_payment_gateway(partner_value): + """Get the payment gateway for the partner.""" + partner = jingrow.get_pg("Team", partner_value) + mpesa_setup = get_mpesa_setup_for_team(partner.name) + payment_gateway = jingrow.get_all( + "Payment Gateway", + filters={"gateway_settings": "Mpesa Setup", "gateway_controller": mpesa_setup.name}, + pluck="name", + ) + if not payment_gateway: + jingrow.throw(_("Payment Gateway not found"), title=_("Mpesa Exjcloud Error")) + gateway = jingrow.get_pg("Payment Gateway", payment_gateway[0]) + return gateway.name + + +def get_mpesa_setup_for_team(team_name): + """Fetch Mpesa setup for a given team.""" + + mpesa_setup = jingrow.get_all("Mpesa Setup", {"team": team_name}, pluck="name") + if not mpesa_setup: + jingrow.throw( + _(f"Mpesa Setup not configured for the team {team_name}"), title=_("Mpesa Exjcloud Error") + ) + return jingrow.get_pg("Mpesa Setup", mpesa_setup[0]) + + +def sanitize_mobile_number(number): + """ensures number take the right format""" + """Add country code and strip leading zeroes from the phone number.""" + return "254" + str(number).lstrip("0") + + +def fetch_param_value(response, key, key_field): + """Fetch the specified key from list of dictionary. Key is identified via the key field.""" + for param in response: + if param[key_field] == key: + return param["Value"] + return None + + +@jingrow.whitelist() +def create_exchange_rate(**kwargs): + """Create a new exchange rate record.""" + try: + from_currency = kwargs.get("from_currency", {}).get("value") + to_currency = kwargs.get("to_currency", {}).get("value") + exchange_rate = kwargs.get("exchange_rate") + + if not from_currency or not to_currency or not exchange_rate: + raise ValueError("Missing required fields.") + + exchange_rate_pg = jingrow.get_pg( + { + "pagetype": "Currency Exchange", + "from_currency": from_currency, + "to_currency": to_currency, + "exchange_rate": exchange_rate, + "date": jingrow.utils.today(), + } + ) + + exchange_rate_pg.insert(ignore_permissions=True) + return exchange_rate_pg.name + + except Exception as e: + jingrow.log_error("Error creating exchange rate") + raise e + + +def create_payment_partner_transaction( + team, payment_partner, exchange_rate, amount, paid_amount, payment_gateway, payload=None +): + """Create a Payment Partner Transaction record.""" + transaction_pg = jingrow.get_pg( + { + "pagetype": "Payment Partner Transaction", + "team": team, + "payment_partner": payment_partner, + "exchange_rate": exchange_rate, + "payment_gateway": payment_gateway, + "amount": amount, + "actual_amount": paid_amount, + "payment_transaction_details": payload, + } + ) + transaction_pg.insert(ignore_permissions=True) + transaction_pg.submit() + return transaction_pg.name + + +@jingrow.whitelist() +def fetch_payments(payment_gateway, partner, from_date, to_date): + print("fetching payments", payment_gateway) + partner = jingrow.get_value("Team", {"user": partner}, "name") + filters = { + "docstatus": 1, + "submitted_to_jingrow": 0, + "payment_gateway": payment_gateway, + "payment_partner": partner, + } + + if from_date and to_date: + filters["posting_date"] = ["between", [from_date, to_date]] + + partner_payments = jingrow.get_all( + "Payment Partner Transaction", filters=filters, fields=["name", "amount", "posting_date"] + ) + jingrow.response.message = partner_payments + return partner_payments + + +@jingrow.whitelist() +def create_payment_partner_payout(from_date, to_date, payment_gateway, payment_partner, payments): + """Create a Payment Partner Payout record.""" + partner_commission = jingrow.get_value("Team", {"user": payment_partner}, "partner_commission") + + # Initialize the main document + payout_pg = jingrow.get_pg( + { + "pagetype": "Partner Payment Payout", + "from_date": from_date, + "to_date": to_date, + "payment_gateway": payment_gateway, + "partner": payment_partner, + "partner_commission": partner_commission, + "transfer_items": [], # Initialize an empty child table + } + ) + + # Add each payment to the child table + for payment in payments: + payout_pg.append( + "transfer_items", + { + "transaction_id": payment.get("name"), + "amount": payment.get("amount"), + "posting_date": payment.get("posting_date"), + }, + ) + # Save and submit the document + payout_pg.insert() + payout_pg.submit() + + return payout_pg.name + + +@jingrow.whitelist() +def create_invoice_partner_site(data, gateway_controller): + gateway = jingrow.get_pg("Payment Gateway", gateway_controller) + api_url_ = gateway.url + api_key = gateway.api_key + api_secret = get_decrypted_password("Payment Gateway", gateway.name, fieldname="api_secret") + + transaction_id = data.get("transaction_id") + amount = data.get("amount") + team = data.get("team") + default_currency = data.get("default_currency") + rate = data.get("rate") + + # Validate the necessary fields + if not transaction_id or not amount: + jingrow.throw(_("Invalid transaction data received")) + + api_url = api_url_ + + headers = { + "Authorization": f"token {api_key}:{api_secret}", + } + # Define the payload to send with the POST request + payload = { + "transaction_id": transaction_id, + "amount": amount, + "team": team, + "default_currency": default_currency, + "rate": rate, + } + # Make the POST request to your API + try: + response = requests.post(api_url, data=payload, headers=headers) + if response.status_code == 200: + response_data = response.json() + download_link = response_data.get("message", "") + invoice_name = response_data.get("invoice_name", "") + return download_link, invoice_name + jingrow.log_error(f"API Error: {response.status_code} - {response.text}") + jingrow.throw(_("Failed to create the invoice via API")) + + except requests.exceptions.RequestException as e: + jingrow.log_error(f"Error calling API: {e}") + jingrow.throw(_("There was an issue connecting to the API.")) diff --git a/jcloud/api/saas.py b/jcloud/api/saas.py new file mode 100644 index 0000000..6185c32 --- /dev/null +++ b/jcloud/api/saas.py @@ -0,0 +1,398 @@ +import json +from typing import TYPE_CHECKING + +import jingrow +from jingrow.core.utils import find + +from jcloud.api.account import get_account_request_from_key +from jcloud.jcloud.pagetype.site.jerp_site import get_jerp_domain +from jcloud.jcloud.pagetype.site.saas_pool import get as get_pooled_saas_site +from jcloud.jcloud.pagetype.site.saas_site import ( + SaasSite, + get_default_team_for_app, + get_saas_domain, + get_saas_site_plan, + set_site_in_subscription_docs, +) +from jcloud.jcloud.pagetype.team.team import Team +from jcloud.utils import log_error +from jcloud.utils.telemetry import capture, identify + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.site.site import Site + +# ----------------------------- SIGNUP APIs --------------------------------- + + +@jingrow.whitelist(allow_guest=True) +def account_request( + subdomain, + email, + first_name, + last_name, + country, + app, + url_args=None, +): + """ + return: Stripe setup intent and AR key if stripe flow, else None + """ + from jingrow.utils.html_utils import clean_html + + email = email.strip().lower() + jingrow.utils.validate_email_address(email, True) + + if not check_subdomain_availability(subdomain, app): + jingrow.throw(f"Subdomain {subdomain} is already taken") + + all_countries = jingrow.db.get_all("Country", pluck="name") + country = find(all_countries, lambda x: x.lower() == country.lower()) + if not country: + jingrow.throw("Country field should be a valid country name") + + team = jingrow.db.get_value("Team", {"user": email}) + if team and jingrow.db.exists("Invoice", {"team": team, "status": "Unpaid", "type": "Subscription"}): + jingrow.throw(f"Account {email} already exists with unpaid invoices") + + current_user = jingrow.session.user + try: + jingrow.set_user("Administrator") + account_request = jingrow.get_pg( + { + "pagetype": "Account Request", + "saas": True, + "saas_app": app, + "jerp": False, + "subdomain": subdomain, + "email": email, + "role": "Jcloud Admin", + "first_name": clean_html(first_name), + "last_name": clean_html(last_name), + "country": country, + "url_args": url_args or json.dumps({}), + "send_email": True, + } + ) + site_name = account_request.get_site_name() + identify( + site_name, + app=account_request.saas_app, + source=json.loads(url_args).get("source") if url_args else "fc", + ) + account_request.insert(ignore_permissions=True) + capture("completed_server_account_request", "fc_saas", site_name) + except Exception as e: + log_error("Account Request Creation Failed", data=e) + raise + finally: + jingrow.set_user(current_user) + + create_or_rename_saas_site(app, account_request) + + +def create_or_rename_saas_site(app, account_request): + """ + Creates site for Saas App. These are differentiated by `standby_for` field in site pg + """ + current_user = jingrow.session.user + current_session_data = jingrow.session.data + jingrow.set_user("Administrator") + + try: + enable_hybrid_pools = jingrow.db.get_value("Saas Settings", app, "enable_hybrid_pools") + hybrid_saas_pool = get_hybrid_saas_pool(account_request) if enable_hybrid_pools else "" + + pooled_site = get_pooled_saas_site(app, hybrid_saas_pool) + if pooled_site: + SaasSite(site=pooled_site, app=app).rename_pooled_site(account_request) + else: + saas_site = SaasSite( + account_request=account_request, app=app, hybrid_saas_pool=hybrid_saas_pool + ).insert(ignore_permissions=True) + set_site_in_subscription_docs(saas_site.subscription_docs, saas_site.name) + + capture("completed_server_site_created", "fc_saas", account_request.get_site_name()) + except Exception as e: + log_error("Saas Site Creation or Rename failed", data=e) + + finally: + jingrow.set_user(current_user) + jingrow.session.data = current_session_data + + +@jingrow.whitelist() +def new_saas_site(subdomain, app): + jingrow.only_for("System Manager") + + pooled_site = get_pooled_saas_site(app) + if pooled_site: + site = SaasSite(site=pooled_site, app=app).rename_pooled_site(subdomain=subdomain) + else: + site = SaasSite(app=app, subdomain=subdomain).insert(ignore_permissions=True) + site.create_subscription(get_saas_site_plan(app)) + + site.reload() + site.team = get_default_team_for_app(app) + site.save(ignore_permissions=True) + + jingrow.db.commit() + + return site + + +@jingrow.whitelist() +def get_saas_site_status(site): + if jingrow.db.exists("Site", site): + return {"site": site, "status": jingrow.db.get_value("Site", site, "status")} + + return {"site": site, "status": "Pending"} + + +def get_hybrid_saas_pool(account_request): + """ + 1. Get all hybrid pools and their rules + 2. Filter based on rules and return Hybrid pool + 3. Returns the first rule match + return: The hybrid pool name that site belongs to based on the Account Request + conditions + """ + hybrid_pool = "" + all_pools = jingrow.get_all("Hybrid Saas Pool", {"app": account_request.saas_app}, pluck="name") + ar_rules = jingrow.get_all( + "Account Request Rules", + {"parent": ("in", all_pools)}, + ["parent", "field", "condition", "value"], + group_by="parent", + ) + + for rule in ar_rules: + eval_locals = eval_locals = dict( + account_request=account_request, + ) + + if jingrow.safe_eval( + f"account_request.{rule.field} {rule.condition} '{rule.value}'", None, eval_locals + ): + hybrid_pool = rule.parent + return hybrid_pool # noqa: RET504 + + return hybrid_pool + + +@jingrow.whitelist(allow_guest=True) +def check_subdomain_availability(subdomain, app): + """ + Checks if subdomain is available to create a new site + """ + # Only for JERP domains + + if len(subdomain) <= 4: + return False + + banned_domains = jingrow.get_all("Blocked Domain", {"block_for_all": 1}, pluck="name") + if banned_domains and subdomain in banned_domains: + return False + + exists = bool( + jingrow.db.exists("Blocked Domain", {"name": subdomain, "root_domain": get_jerp_domain()}) + or jingrow.db.exists( + "Site", + { + "subdomain": subdomain, + "domain": get_saas_domain(app), + "status": ("!=", "Archived"), + }, + ) + ) + if exists: + return False + + return True + + +@jingrow.whitelist(allow_guest=True) +def validate_account_request(key): + if not key: + jingrow.throw("Request Key not provided") + + app = jingrow.db.get_value("Account Request", {"request_key": key}, "saas_app") + app_info = jingrow.db.get_value("Saas Setup Account Generator", app, ["headless", "route"], as_dict=True) + + if not app_info: + jingrow.throw("App configurations are missing! Please contact support") + + if app_info.headless: + headless_setup_account(key) + else: + jingrow.local.response["type"] = "redirect" + jingrow.local.response["location"] = f"/{app_info.route}?key={key}" + + +@jingrow.whitelist(allow_guest=True) +def setup_account(key, business_data=None): + """ + Includes the data collection step in setup-account.html + """ + account_request = get_account_request_from_key(key) + if not account_request: + jingrow.throw("Invalid or Expired Key") + + capture( + "init_server_setup_account", + "fc_saas", + account_request.get_site_name(), + ) + jingrow.set_user("Administrator") + + if business_data: + business_data = jingrow.parse_json(business_data) + + if isinstance(business_data, dict): + business_data = { + key: business_data.get(key) + for key in [ + "company", + "no_of_employees", + "industry", + "no_of_users", + "designation", + "phone_number", + "referral_source", + "agreed_to_partner_consent", + ] + } + + account_request.update(business_data) + account_request.save(ignore_permissions=True) + + create_marketplace_subscription(account_request) + capture( + "completed_server_setup_account", + "fc_saas", + account_request.get_site_name(), + ) + + +@jingrow.whitelist(allow_guest=True) +def headless_setup_account(key): + """ + Ignores the data collection step in setup-account.html + """ + account_request = get_account_request_from_key(key) + if not account_request: + jingrow.throw("Invalid or Expired Key") + + capture( + "init_server_setup_account", + "fc_saas", + account_request.get_site_name(), + ) + jingrow.set_user("Administrator") + + create_marketplace_subscription(account_request) + # create team and enable the subscriptions for site + capture( + "completed_server_setup_account", + "fc_saas", + account_request.get_site_name(), + ) + + jingrow.local.response["type"] = "redirect" + jingrow.local.response["location"] = f"/prepare-site?key={key}&app={account_request.saas_app}" + + +def create_marketplace_subscription(account_request): + """ + Create team, subscription for site and Saas Subscription + """ + team_pg = create_team(account_request) + site_name = jingrow.db.get_value("Site", {"account_request": account_request.name}) + if site_name: + jingrow.db.set_value("Site", site_name, "team", team_pg.name) + + subscription = jingrow.db.exists("Subscription", {"document_name": site_name}) + if subscription: + jingrow.db.set_value("Subscription", subscription, "team", team_pg.name) + + marketplace_subscriptions = jingrow.get_all( + "Subscription", + {"document_type": "Marketplace App", "site": site_name, "enabled": 0}, + pluck="name", + ) + for subscription in marketplace_subscriptions: + jingrow.db.set_value( + "Subscription", + subscription, + {"enabled": 1, "team": team_pg.name}, + ) + + jingrow.set_user(team_pg.user) + jingrow.local.login_manager.login_as(team_pg.user) + + return site_name + + +def create_team(account_request, get_stripe_id=False): + """ + Create team and return pg + """ + email = account_request.email + + if not jingrow.db.exists("Team", {"user": email}): + team_pg = Team.create_new( + account_request, + account_request.first_name, + account_request.last_name, + country=account_request.country, + is_us_eu=account_request.is_us_eu, + via_jerp=True, + user_exists=jingrow.db.exists("User", email), + ) + else: + team_pg = jingrow.get_pg("Team", {"user": email}) + + if get_stripe_id: + return team_pg.stripe_customer_id + + return team_pg + + +@jingrow.whitelist(allow_guest=True) +def get_site_status(key, app=None): + """ + return: Site status + """ + account_request = get_account_request_from_key(key) + if not account_request: + jingrow.throw("Invalid or Expired Key") + + domain = get_saas_domain(app) if app else get_jerp_domain() + + site = jingrow.db.get_value( + "Site", + {"subdomain": account_request.subdomain, "domain": domain, "status": "Active"}, + ["status", "subdomain", "name"], + as_dict=1, + ) + if site: + capture("completed_site_allocation", "fc_saas", site.name) + return site + return {"status": "Pending"} + + +@jingrow.whitelist(allow_guest=True) +def get_site_url_and_sid(key, app=None): + """ + return: Site url and session id for login-redirect + """ + account_request = get_account_request_from_key(key) + if not account_request: + jingrow.throw("Invalid or Expired Key") + + domain = get_saas_domain(app) if app else get_jerp_domain() + + name = jingrow.db.get_value("Site", {"subdomain": account_request.subdomain, "domain": domain}) + site: "Site" = jingrow.get_pg("Site", name) + if site.additional_system_user_created: + return site.login_as_team() + return site.login_as_admin() diff --git a/jcloud/api/security.py b/jcloud/api/security.py new file mode 100644 index 0000000..a26d000 --- /dev/null +++ b/jcloud/api/security.py @@ -0,0 +1,84 @@ +import jingrow +from jingrow.utils import get_datetime + +from jcloud.agent import Agent +from jcloud.api.server import all as get_all_servers + + +@jingrow.whitelist() +def get_servers(server_filter): + servers = get_all_servers(server_filter=server_filter) + + for server in servers: + security_updates_count = jingrow.db.count("Security Update", {"server": server.name}) + server["security_updates_status"] = "Up to date" + + if security_updates_count != 0: + server[ + "security_updates_status" + ] = f"{security_updates_count} security update(s) available" + + return servers + + +@jingrow.whitelist() +def fetch_security_updates( + filters=None, order_by=None, limit_start=None, limit_page_length=None +): + return jingrow.get_all( + "Security Update", + filters=filters, + fields=["name", "package", "version", "priority", "priority_level", "datetime"], + order_by=order_by or "priority_level asc", + start=limit_start, + limit=limit_page_length, + ) + + +@jingrow.whitelist() +def get_security_update_details(update_id): + return jingrow.get_pg("Security Update", update_id).as_dict() + + +@jingrow.whitelist() +def fetch_ssh_sessions(server, start=0, limit=10): + return jingrow.get_all( + "SSH Session", + filters={"server": server}, + fields=["name", "user", "datetime"], + order_by="datetime desc", + start=start, + limit=limit, + ) + + +@jingrow.whitelist() +def fetch_ssh_session_logs(server): + logs_to_display = [] + ssh_logs = Agent(server=server).get("security/ssh_session_logs") + + for log in ssh_logs.get("logs", []): + if not log["name"].endswith(".timing"): + log["created_at"] = get_datetime(log["created"]).strftime("%Y-%m-%d %H-%M") + + splited_log = log["name"].split(".") + log["user"] = splited_log[1] + log["session_id"] = splited_log[2] + + logs_to_display.append(log) + + return logs_to_display + + +@jingrow.whitelist() +def fetch_ssh_session_activity(server, filename): + content = Agent(server=server).get(f"security/retrieve_ssh_session_log/{filename}") + splited_filename = filename.split(".") + session_user = splited_filename[1] + session_id = splited_filename[2] + + return { + "session_user": session_user, + "session_id": session_id, + "content": content.get("log_details", "Not Found"), + } diff --git a/jcloud/api/selfhosted.py b/jcloud/api/selfhosted.py new file mode 100644 index 0000000..997adfc --- /dev/null +++ b/jcloud/api/selfhosted.py @@ -0,0 +1,173 @@ +import time + +import jingrow +from dns.resolver import Resolver +from jingrow.utils import strip + +from jcloud.api.server import plans +from jcloud.api.site import NAMESERVERS +from jcloud.runner import Ansible +from jcloud.utils import get_current_team + + +@jingrow.whitelist() +def new(server): + server_details = jingrow._dict(server) + + team = get_current_team(get_pg=True) + validate_team(team) + + proxy_server = get_proxy_server_for_cluster() + + return create_self_hosted_server(server_details, team, proxy_server) + + +def create_self_hosted_server(server_details, team, proxy_server): + try: + self_hosted_server = jingrow.new_pg( + "Self Hosted Server", + **{ + "ip": strip(server_details.get("app_public_ip", "")), + "private_ip": strip(server_details.get("app_private_ip", "")), + "mariadb_ip": strip(server_details.get("db_public_ip", "")), + "mariadb_private_ip": strip(server_details.get("db_private_ip", "")), + "title": server_details.title, + "proxy_server": proxy_server, + "proxy_created": True, + "different_database_server": True, + "team": team.name, + "plan": server_details.plan["name"], + "database_plan": server_details.plan["name"], + "new_server": True, + }, + ).insert() + except jingrow.DuplicateEntryError as e: + # Exception return tupple like ('Self Hosted Server', 'SHS-00018.cloud.jcloudonprem.com') + server_name = e.args[1] + return server_name + + return self_hosted_server.name + + +def validate_team(team): + if not team: + jingrow.throw("You must be part of a team to create a new server") + + if not team.enabled: + jingrow.throw("You cannot create a new server because your account is disabled") + + if not team.self_hosted_servers_enabled: + jingrow.throw( + "You cannot create a new server because Hybrid Cloud is disabled for your account. Please contact support to enable it." + ) + + +def get_proxy_server_for_cluster(cluster=None): + cluster = get_hybrid_cluster() if not cluster else cluster + + return jingrow.get_all("Proxy Server", {"cluster": cluster}, pluck="name")[0] + + +def get_hybrid_cluster(): + return jingrow.db.get_value("Cluster", {"hybrid": 1}, "name") + + +@jingrow.whitelist() +def sshkey(): + return jingrow.db.get_value("SSH Key", {"enabled": 1, "default": 1}, "public_key") + + +@jingrow.whitelist() +def verify(server): + server_pg = jingrow.get_pg("Self Hosted Server", server) + + app_server_verified = verify_server("app", server_pg) + db_server_verified = verify_server("db", server_pg) + + if app_server_verified and db_server_verified: + server_pg.check_minimum_specs() + + server_pg.status = "Pending" + server_pg.save() + + server_pg.reload() + server_pg.create_database_server() + + server_pg.reload() + server_pg.create_application_server() + return True + + return False + + +def verify_server(server_type, server_pg): + ping = Ansible( + playbook="ping.yml", + server=jingrow._dict( + { + "pagetype": "Self Hosted Server", + "name": server_pg.name, + "ssh_user": server_pg.ssh_user, + "ssh_port": server_pg.ssh_port, + "ip": server_pg.ip if server_type == "app" else server_pg.mariadb_ip, + } + ), + ) + result = ping.run() + + if result.status == "Success": + server_pg.validate_private_ip(result.name, server_type=server_type) + + server_pg.fetch_system_specifications(result.name, server_type=server_type) + server_pg.reload() + + return True + + return False + + +@jingrow.whitelist() +def setup(server): + server_pg = jingrow.get_pg("Self Hosted Server", server) + server_pg.start_setup = True + server_pg.save() + server_pg.setup_server() + time.sleep(1) + + +@jingrow.whitelist() +def get_plans(): + server_plan = plans("Self Hosted Server") + return server_plan + + +@jingrow.whitelist() +def check_dns(domain, ip): + try: + resolver = Resolver(configure=False) + resolver.nameservers = NAMESERVERS + domain_ip = resolver.query(domain.strip(), "A")[0].to_text() + if domain_ip == ip: + return True + except Exception: + return False + return False + + +@jingrow.whitelist() +def options_for_new(): + return {"plans": get_plans(), "ssh_key": sshkey()} + + +@jingrow.whitelist() +def create_and_verify_selfhosted(server): + self_hosted_server_name = new(server) + + if verify(self_hosted_server_name): + setup(self_hosted_server_name) + return jingrow.get_value("Self Hosted Server", self_hosted_server_name, "server") + + else: + jingrow.throw( + "Server verification failed. Please check the server details and try again." + ) diff --git a/jcloud/api/server.py b/jcloud/api/server.py new file mode 100644 index 0000000..78c251c --- /dev/null +++ b/jcloud/api/server.py @@ -0,0 +1,564 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +from datetime import datetime +from datetime import timezone as tz +from typing import TYPE_CHECKING + +import jingrow +import requests +from jingrow.utils import convert_utc_to_timezone, flt +from jingrow.utils.password import get_decrypted_password + +from jcloud.api.bench import all as all_benches +from jcloud.api.site import protected +from jcloud.jcloud.pagetype.site_plan.plan import Plan +from jcloud.jcloud.pagetype.team.team import get_child_team_members +from jcloud.utils import get_current_team + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.cluster.cluster import Cluster + + +def poly_get_pg(doctypes, name): + for pagetype in doctypes: + if jingrow.db.exists(pagetype, name): + return jingrow.get_pg(pagetype, name) + return jingrow.get_pg(doctypes[-1], name) + + +MOUNTPOINT_REGEX = "(/|/opt/volumes/mariadb|/opt/volumes/benches)" + + +@jingrow.whitelist() +def all(server_filter=None): # noqa: C901 + if server_filter is None: + server_filter = {"server_type": "", "tag": ""} + + team = get_current_team() + child_teams = [team.name for team in get_child_team_members(team)] + teams = [team, *child_teams] + + db_server = jingrow.qb.PageType("Database Server") + app_server = jingrow.qb.PageType("Server") + res_tag = jingrow.qb.PageType("Resource Tag") + + if server_filter["server_type"] != "Database Servers": + app_server_query = ( + jingrow.qb.from_(app_server) + .select( + app_server.name, + app_server.title, + app_server.status, + app_server.creation, + app_server.cluster, + ) + .where(((app_server.team).isin(teams)) & (app_server.status != "Archived")) + ) + + if server_filter["tag"]: + app_server_query = app_server_query.inner_join(res_tag).on( + (res_tag.parent == app_server.name) & (res_tag.tag_name == server_filter["tag"]) + ) + + if server_filter["server_type"] != "App Servers": + database_server_query = ( + jingrow.qb.from_(db_server) + .select( + db_server.name, + db_server.title, + db_server.status, + db_server.creation, + db_server.cluster, + ) + .where(((db_server.team).isin(teams)) & (db_server.status != "Archived")) + ) + + if server_filter["tag"]: + database_server_query = database_server_query.inner_join(res_tag).on( + (res_tag.parent == db_server.name) & (res_tag.tag_name == server_filter["tag"]) + ) + + if server_filter["server_type"] == "App Servers": + query = app_server_query + elif server_filter["server_type"] == "Database Servers": + query = database_server_query + else: + query = app_server_query + database_server_query + + # union isn't supported in qb for run method + # http://git.jingrow.com:3000/jingrow/jingrow/issues/15609 + servers = jingrow.db.sql(query.get_sql(), as_dict=True) + for server in servers: + server_plan_name = jingrow.get_value("Server", server.name, "plan") + server["plan"] = jingrow.get_pg("Server Plan", server_plan_name) if server_plan_name else None + server["app_server"] = f"f{server.name[1:]}" + server["tags"] = jingrow.get_all("Resource Tag", {"parent": server.name}, pluck="tag_name") + server["region_info"] = jingrow.db.get_value( + "Cluster", server.cluster, ["title", "image"], as_dict=True + ) + return servers + + +@jingrow.whitelist() +def server_tags(): + team = get_current_team() + return jingrow.get_all("Jcloud Tag", {"team": team, "pagetype_name": "Server"}, pluck="tag") + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def get(name): + server = poly_get_pg(["Server", "Database Server"], name) + return { + "name": server.name, + "title": server.title, + "status": server.status, + "team": server.team, + "app_server": server.name + if server.is_self_hosted + else f"f{server.name[1:]}", # Don't use `f` series if self hosted + "region_info": jingrow.db.get_value( + "Cluster", server.cluster, ["name", "title", "image"], as_dict=True + ), + "server_tags": [{"name": x.tag, "tag": x.tag_name} for x in server.tags], + "tags": jingrow.get_all("Jcloud Tag", {"team": server.team, "pagetype_name": "Server"}, ["name", "tag"]), + "type": "database-server" if server.meta.name == "Database Server" else "server", + } + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def overview(name): + server = poly_get_pg(["Server", "Database Server"], name) + plan = jingrow.get_pg("Server Plan", server.plan) if server.plan else None + if plan: + # override plan disk size with the actual disk size + # TODO: Remove this once we remove old dashboard + plan.disk = jingrow.db.get_value("Virtual Machine", name, "disk_size") + + return { + "plan": plan if plan else None, + "info": { + "owner": jingrow.db.get_value( + "User", + jingrow.get_value("Team", server.team, "user"), + ["first_name", "last_name", "user_image"], + as_dict=True, + ), + "created_on": server.creation, + }, + } + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def archive(name): + server = poly_get_pg(["Server", "Database Server"], name) + server.drop_server() + + +@jingrow.whitelist() +def new(server): + team = get_current_team(get_pg=True) + if not team.enabled: + jingrow.throw("You cannot create a new server because your account is disabled") + + cluster: Cluster = jingrow.get_pg("Cluster", server["cluster"]) + + db_plan = jingrow.get_pg("Server Plan", server["db_plan"]) + db_server, job = cluster.create_server("Database Server", server["title"], db_plan, team=team.name) + + proxy_server = jingrow.get_all( + "Proxy Server", + {"status": "Active", "cluster": cluster.name, "is_primary": True}, + limit=1, + )[0] + + # to be used by app server + cluster.database_server = db_server.name + cluster.proxy_server = proxy_server.name + + app_plan = jingrow.get_pg("Server Plan", server["app_plan"]) + app_server, job = cluster.create_server("Server", server["title"], app_plan, team=team.name) + + return {"server": app_server.name, "job": job.name} + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def usage(name): + query_map = { + "vcpu": ( + f"""((count(count(node_cpu_seconds_total{{instance="{name}",job="node"}}) by (cpu))) - avg(sum by (mode)(rate(node_cpu_seconds_total{{mode='idle',instance="{name}",job="node"}}[120s])))) / count(count(node_cpu_seconds_total{{instance="{name}",job="node"}}) by (cpu))""", + lambda x: x, + ), + "disk": ( + f"""sum(node_filesystem_size_bytes{{instance="{name}", job="node", mountpoint=~"{MOUNTPOINT_REGEX}"}} - node_filesystem_avail_bytes{{instance="{name}", job="node", mountpoint=~"{MOUNTPOINT_REGEX}"}}) by ()/ (1024 * 1024 * 1024)""", + lambda x: x, + ), + "memory": ( + f"""(node_memory_MemTotal_bytes{{instance="{name}",job="node"}} - node_memory_MemFree_bytes{{instance="{name}",job="node"}} - (node_memory_Cached_bytes{{instance="{name}",job="node"}} + node_memory_Buffers_bytes{{instance="{name}",job="node"}})) / (1024 * 1024)""", + lambda x: x, + ), + } + + result = {} + for usage_type, query in query_map.items(): + response = prometheus_query(query[0], query[1], "Asia/Kolkata", 120, 120)["datasets"] + if response: + result[usage_type] = response[0]["values"][-1] + return result + + +@protected(["Server", "Database Server"]) +def total_resource(name): + query_map = { + "vcpu": ( + f"""(count(count(node_cpu_seconds_total{{instance="{name}",job="node"}}) by (cpu)))""", + lambda x: x, + ), + "disk": ( + f"""sum(node_filesystem_size_bytes{{instance="{name}", job="node", mountpoint=~"{MOUNTPOINT_REGEX}"}}) by () / (1024 * 1024 * 1024)""", + lambda x: x, + ), + "memory": ( + f"""(node_memory_MemTotal_bytes{{instance="{name}",job="node"}}) / (1024 * 1024)""", + lambda x: x, + ), + } + + result = {} + for usage_type, query in query_map.items(): + response = prometheus_query(query[0], query[1], "Asia/Kolkata", 120, 120)["datasets"] + if response: + result[usage_type] = response[0]["values"][-1] + return result + + +def calculate_swap(name): + query_map = { + "swap_used": ( + f"""((node_memory_SwapTotal_bytes{{instance="{name}",job="node"}} - node_memory_SwapFree_bytes{{instance="{name}",job="node"}}) / node_memory_SwapTotal_bytes{{instance="{name}",job="node"}}) * 100""", + lambda x: x, + ), + "swap": ( + f"""node_memory_SwapTotal_bytes{{instance="{name}",job="node"}} / (1024 * 1024 * 1024)""", + lambda x: x, + ), + "required": ( + f"""( + (node_memory_MemTotal_bytes{{instance="{name}",job="node"}} + + node_memory_SwapTotal_bytes{{instance="{name}",job="node"}} + ) - + (node_memory_MemFree_bytes{{instance="{name}",job="node"}} + + node_memory_SwapFree_bytes{{instance="{name}",job="node"}} + + node_memory_Cached_bytes{{instance="{name}",job="node"}} + + node_memory_Buffers_bytes{{instance="{name}",job="node"}} + + node_memory_SwapCached_bytes{{instance="{name}",job="node"}} + ) + ) / + (1024 * 1024 * 1024)""", + lambda x: x, + ), + } + + result = {} + for usage_type, query in query_map.items(): + response = prometheus_query(query[0], query[1], "Asia/Kolkata", 120, 120)["datasets"] + if response: + result[usage_type] = response[0]["values"][-1] + return result + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def analytics(name, query, timezone, duration): + timespan, timegrain = get_timespan_timegrain(duration) + + query_map = { + "cpu": ( + f"""sum by (mode)(rate(node_cpu_seconds_total{{instance="{name}", job="node"}}[{timegrain}s])) * 100""", + lambda x: x["mode"], + ), + "network": ( + f"""rate(node_network_receive_bytes_total{{instance="{name}", job="node", device=~"ens.*"}}[{timegrain}s]) * 8""", + lambda x: x["device"], + ), + "iops": ( + f"""rate(node_disk_reads_completed_total{{instance="{name}", job="node"}}[{timegrain}s])""", + lambda x: x["device"], + ), + "space": ( + f"""100 - ((node_filesystem_avail_bytes{{instance="{name}", job="node", mountpoint=~"{MOUNTPOINT_REGEX}"}} * 100) / node_filesystem_size_bytes{{instance="{name}", job="node", mountpoint=~"{MOUNTPOINT_REGEX}"}})""", + lambda x: x["mountpoint"], + ), + "loadavg": ( + f"""{{__name__=~"node_load1|node_load5|node_load15", instance="{name}", job="node"}}""", + lambda x: f"Load Average {x['__name__'][9:]}", + ), + "memory": ( + f"""node_memory_MemTotal_bytes{{instance="{name}",job="node"}} - node_memory_MemFree_bytes{{instance="{name}",job="node"}} - (node_memory_Cached_bytes{{instance="{name}",job="node"}} + node_memory_Buffers_bytes{{instance="{name}",job="node"}})""", + lambda x: "Used", + ), + "database_uptime": ( + f"""mysql_up{{instance="{name}",job="mariadb"}}""", + lambda x: "Uptime", + ), + "database_commands_count": ( + f"""sum(round(increase(mysql_global_status_commands_total{{instance='{name}', command=~"select|update|insert|delete|begin|commit|rollback"}}[{timegrain}s]))) by (command)""", + lambda x: x["command"], + ), + "database_connections": ( + f"""{{__name__=~"mysql_global_status_threads_connected|mysql_global_variables_max_connections", instance="{name}"}}""", + lambda x: "Max Connections" + if x["__name__"] == "mysql_global_variables_max_connections" + else "Connected Clients", + ), + "innodb_bp_size": ( + f"""mysql_global_variables_innodb_buffer_pool_size{{instance='{name}'}}""", + lambda x: "Buffer Pool Size", + ), + "innodb_bp_size_of_total_ram": ( + f"""avg by (instance) ((mysql_global_variables_innodb_buffer_pool_size{{instance=~"{name}"}} * 100)) / on (instance) (avg by (instance) (node_memory_MemTotal_bytes{{instance=~"{name}"}}))""", + lambda x: "Buffer Pool Size of Total Ram", + ), + "innodb_bp_miss_percent": ( + f""" +avg by (instance) ( + rate(mysql_global_status_innodb_buffer_pool_reads{{instance=~"{name}"}}[{timegrain}s]) + / + rate(mysql_global_status_innodb_buffer_pool_read_requests{{instance=~"{name}"}}[{timegrain}s]) +) +""", + lambda x: "Buffer Pool Miss Percentage", + ), + "innodb_avg_row_lock_time": ( + f"""(rate(mysql_global_status_innodb_row_lock_time{{instance="{name}"}}[{timegrain}s]) / 1000)/rate(mysql_global_status_innodb_row_lock_waits{{instance="{name}"}}[{timegrain}s])""", + lambda x: "Avg Row Lock Time", + ), + } + + return prometheus_query(query_map[query][0], query_map[query][1], timezone, timespan, timegrain) + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def get_request_by_site(name, query, timezone, duration): + from jcloud.api.analytics import ResourceType, get_request_by_ + + timespan, timegrain = get_timespan_timegrain(duration) + + return get_request_by_(name, query, timezone, timespan, timegrain, ResourceType.SERVER) + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def get_slow_logs_by_site(name, query, timezone, duration, normalize=False): + from jcloud.api.analytics import ResourceType, get_slow_logs + + timespan, timegrain = get_timespan_timegrain(duration) + + return get_slow_logs(name, query, timezone, timespan, timegrain, ResourceType.SERVER, normalize) + + +def prometheus_query(query, function, timezone, timespan, timegrain): + monitor_server = jingrow.db.get_single_value("Jcloud Settings", "monitor_server") + if not monitor_server: + return {"datasets": [], "labels": []} + + url = f"https://{monitor_server}/prometheus/api/v1/query_range" + password = get_decrypted_password("Monitor Server", monitor_server, "grafana_password") + + end = datetime.utcnow().replace(tzinfo=tz.utc) + start = jingrow.utils.add_to_date(end, seconds=-timespan) + query = { + "query": query, + "start": start.timestamp(), + "end": end.timestamp(), + "step": f"{timegrain}s", + } + + response = requests.get(url, params=query, auth=("jingrow", password)).json() + + datasets = [] + labels = [] + + if not response["data"]["result"]: + return {"datasets": datasets, "labels": labels} + + for timestamp, _ in response["data"]["result"][0]["values"]: + labels.append( + convert_utc_to_timezone( + datetime.fromtimestamp(timestamp, tz=tz.utc).replace(tzinfo=None), timezone + ) + ) + + for index in range(len(response["data"]["result"])): + dataset = { + "name": function(response["data"]["result"][index]["metric"]), + "values": [], + } + for _, value in response["data"]["result"][index]["values"]: + dataset["values"].append(flt(value, 2)) + datasets.append(dataset) + + return {"datasets": datasets, "labels": labels} + + +@jingrow.whitelist() +def options(): + if not get_current_team(get_pg=True).servers_enabled: + jingrow.throw("Servers feature is not yet enabled on your account") + regions = jingrow.get_all( + "Cluster", + {"cloud_provider": ("!=", "Generic"), "public": True}, + ["name", "title", "image", "beta"], + ) + return { + "regions": regions, + "app_plans": plans("Server"), + "db_plans": plans("Database Server"), + } + + +@jingrow.whitelist() +def plans(name, cluster=None, platform="x86_64"): + return Plan.get_plans( + pagetype="Server Plan", + fields=[ + "name", + "title", + "price_usd", + "price_cny", + "vcpu", + "memory", + "disk", + "cluster", + "instance_type", + "premium", + ], + filters={"server_type": name, "platform": platform, "cluster": cluster} + if cluster + else { + "server_type": name, + "platform": platform, + }, + ) + + +@jingrow.whitelist() +def play(play): + play = jingrow.get_pg("Ansible Play", play) + play = play.as_dict() + whitelisted_fields = [ + "name", + "play", + "creation", + "status", + "start", + "end", + "duration", + ] + for key in list(play.keys()): + if key not in whitelisted_fields: + play.pop(key, None) + + play.steps = jingrow.get_all( + "Ansible Task", + filters={"play": play.name}, + fields=["task", "status", "start", "end", "duration", "output"], + order_by="creation", + ) + return play + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def change_plan(name, plan): + poly_get_pg(["Server", "Database Server"], name).change_plan(plan) + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def jcloud_jobs(name): + jobs = [] + for job in jingrow.get_all("Jcloud Job", {"server": name}, pluck="name"): + jobs.append(jingrow.get_pg("Jcloud Job", job).detail()) + return jobs + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def jobs(filters=None, order_by=None, limit_start=None, limit_page_length=None): + jobs = jingrow.get_all( + "Agent Job", + fields=["name", "job_type", "creation", "status", "start", "end", "duration"], + filters=filters, + start=limit_start, + limit=limit_page_length, + order_by=order_by or "creation desc", + ) + + for job in jobs: + job["status"] = "Pending" if job["status"] == "Undelivered" else job["status"] + + return jobs + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def plays(filters=None, order_by=None, limit_start=None, limit_page_length=None): + return jingrow.get_all( + "Ansible Play", + fields=["name", "play", "creation", "status", "start", "end", "duration"], + filters=filters, + start=limit_start, + limit=limit_page_length, + order_by=order_by or "creation desc", + ) + + +@jingrow.whitelist() +@protected("Server") +def get_title_and_cluster(name): + return jingrow.db.get_value("Server", name, ["title", "cluster"], as_dict=True) + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def groups(name): + server = poly_get_pg(["Server", "Database Server"], name) + if server.pagetype == "Database Server": + app_server = jingrow.db.get_value("Server", {"database_server": server.name}, "name") + server = jingrow.get_pg("Server", app_server) + + return all_benches(server=server.name) + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def reboot(name): + return poly_get_pg(["Server", "Database Server"], name).reboot() + + +@jingrow.whitelist() +@protected(["Server", "Database Server"]) +def rename(name, title): + pg = poly_get_pg(["Server", "Database Server"], name) + pg.title = title + pg.save() + + +def get_timespan_timegrain(duration: str) -> tuple[int, int]: + timespan, timegrain = { + "1 Hour": (60 * 60, 2 * 60), + "6 Hour": (6 * 60 * 60, 5 * 60), + "24 Hour": (24 * 60 * 60, 30 * 60), + "7 Days": (7 * 24 * 60 * 60, 2 * 30 * 60), + "15 Days": (15 * 24 * 60 * 60, 3 * 30 * 60), + }[duration] + + return timespan, timegrain diff --git a/jcloud/api/site.py b/jcloud/api/site.py new file mode 100644 index 0000000..e7fde8d --- /dev/null +++ b/jcloud/api/site.py @@ -0,0 +1,2412 @@ +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import json +from typing import TYPE_CHECKING + +import dns.exception +import jingrow +import requests +import wrapt +from boto3 import client +from botocore.exceptions import ClientError +from dns.resolver import Resolver +from jingrow.core.utils import find +from jingrow.desk.pagetype.tag.tag import add_tag +from jingrow.utils import flt, sbool, time_diff_in_hours +from jingrow.utils.password import get_decrypted_password +from jingrow.utils.user import is_system_user + +from jcloud.exceptions import ( + AAAARecordExists, + ConflictingCAARecord, + ConflictingDNSRecord, + MultipleARecords, + MultipleCNAMERecords, +) +from jcloud.jcloud.pagetype.agent_job.agent_job import job_detail +from jcloud.jcloud.pagetype.marketplace_app.marketplace_app import ( + get_plans_for_app, + get_total_installs_by_app, +) +from jcloud.jcloud.pagetype.remote_file.remote_file import get_remote_key +from jcloud.jcloud.pagetype.server.server import is_dedicated_server +from jcloud.jcloud.pagetype.site_plan.plan import Plan +from jcloud.jcloud.pagetype.site_update.site_update import benches_with_available_update +from jcloud.utils import ( + get_client_blacklisted_keys, + get_current_team, + get_jingrow_backups, + get_last_pg, + has_role, + log_error, + unique, +) + +if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.bench.bench import Bench + from jcloud.jcloud.pagetype.bench_app.bench_app import BenchApp + from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate + from jcloud.jcloud.pagetype.deploy_candidate_app.deploy_candidate_app import ( + DeployCandidateApp, + ) + + +NAMESERVERS = ["1.1.1.1", "1.0.0.1", "8.8.8.8", "8.8.4.4"] + + +def protected(doctypes): + """ + This decorator is stupid. It works in magical ways. It checks whether the + owner of the Pagetype (one of `doctypes`) is the same as the current team. + + The stupid magical part of this decorator is how it gets the name of the + Pagetype (see: `get_protected_pagetype_name`); in order of precedence: + 1. kwargs value with key `name` + 2. first value in kwargs value with key `filters` i.e. ≈ `kwargs['filters'].values()[0]` + 3. first value in the args tuple + 4. kwargs value with key `snake_case(doctypes[0])` + """ + + if not isinstance(doctypes, list): + doctypes = [doctypes] + + @wrapt.decorator + def wrapper(wrapped, instance, args, kwargs): + user_type = jingrow.session.data.user_type or jingrow.get_cached_value( + "User", jingrow.session.user, "user_type" + ) + if user_type == "System User": + return wrapped(*args, **kwargs) + + name = get_protected_pagetype_name(args, kwargs, doctypes) + if not name: + jingrow.throw("找不到名称,不允许API访问", jingrow.PermissionError) + + team = get_current_team() + for pagetype in doctypes: + owner = jingrow.db.get_value(pagetype, name, "team") + + if owner == team or has_role("Jcloud Support Agent"): + return wrapped(*args, **kwargs) + + jingrow.throw("不允许访问", jingrow.PermissionError) # noqa: RET503 + + return wrapper + + +def get_protected_pagetype_name(args: list, kwargs: dict, doctypes: list[str]): + # 1. Name from kwargs["name"] + if name := kwargs.get("name"): + return name + + # 2. Name from first value in filters + filters = kwargs.get("filters", {}) + if name := get_name_from_filters(filters): + return name + + # 3. Name from first value in args + if len(args) >= 1 and args[0]: + return args[0] + + if len(doctypes) == 0: + return None + + # 4. Name from snakecased first `doctypes` name + pagetype = doctypes[0] + key = pagetype.lower().replace(" ", "_") + return kwargs.get(key) + + +def get_name_from_filters(filters: dict): + values = [v for v in filters.values()] + if len(values) == 0: + return None + + value = values[0] + if isinstance(value, (int, str)): + return value + + return None + + +def _new(site, server: str | None = None, ignore_plan_validation: bool = False): + team = get_current_team(get_pg=True) + if not team.enabled: + jingrow.throw("您无法创建新站点,因为您的账户已被禁用") + + files = site.get("files", {}) + + apps = [{"app": app} for app in site["apps"]] + + group = get_group_for_new_site_and_set_localisation_app(site, apps) + domain = site.get("domain") + if not (domain and jingrow.db.exists("Root Domain", {"name": domain})): + jingrow.throw("站点没有根域名") + + cluster = site.get("cluster") or jingrow.db.get_single_value("Jcloud Settings", "cluster") + + proxy_servers = jingrow.get_all("Proxy Server Domain", {"domain": domain}, pluck="parent") + proxy_servers = jingrow.get_all( + "Proxy Server", + {"status": "Active", "name": ("in", proxy_servers)}, + pluck="name", + ) + proxy_servers = tuple(proxy_servers) if len(proxy_servers) > 1 else f"('{proxy_servers[0]}')" + + query_sub_str = "" + if server: + query_sub_str = f"AND server.name = '{server}'" + + bench = jingrow.db.sql( + f""" + SELECT + bench.name, bench.server, bench.cluster = '{cluster}' as in_primary_cluster + FROM + tabBench bench + LEFT JOIN + tabServer server + ON + bench.server = server.name + WHERE + server.proxy_server in {proxy_servers} AND + bench.status = "Active" AND + bench.group = '{site["group"]}' + {query_sub_str} + ORDER BY + in_primary_cluster DESC, server.use_for_new_sites DESC, bench.creation DESC + LIMIT 1 + """, + as_dict=True, + )[0] + plan = site["plan"] + app_plans = site.get("selected_app_plans") + if not ignore_plan_validation: + validate_plan(bench.server, plan) + + # 获取计划信息并计算到期日期 + plan_pg = jingrow.get_pg("Site Plan", plan) + # 获取计划周期(月) + billing_period_months = 1 # 默认为1个月 + + # 根据计划类型设置不同的周期 + if hasattr(plan_pg, "interval") and plan_pg.interval: + if plan_pg.interval == "Monthly": + billing_period_months = 1 + elif plan_pg.interval == "Annually": + billing_period_months = 12 + + # 计算到期日期 + site_end_date = jingrow.utils.add_months(jingrow.utils.today(), billing_period_months) + + site = jingrow.get_pg( + { + "pagetype": "Site", + "subdomain": site["name"], + "domain": domain, + "group": group, + "server": server, + "cluster": cluster, + "apps": apps, + "app_plans": app_plans, + "team": team.name, + "free": team.free_account, + "subscription_plan": plan, + "site_end_date": site_end_date, # 设置计算后的到期日期 + "remote_config_file": files.get("config"), + "remote_database_file": files.get("database"), + "remote_public_file": files.get("public"), + "remote_private_file": files.get("private"), + "skip_failing_patches": site.get("skip_failing_patches", False), + }, + ) + + if app_plans and len(app_plans) > 0: + subscription_docs = get_app_subscriptions(app_plans, team.name) + + # Set the secret keys for subscription in config + secret_keys = {f"sk_{s.document_name}": s.secret_key for s in subscription_docs} + site._update_configuration(secret_keys, save=False) + + site.insert(ignore_permissions=True) + + if app_plans and len(app_plans) > 0: + # Set site in subscription docs + for pg in subscription_docs: + pg.site = site.name + pg.save(ignore_permissions=True) + + return { + "site": site.name, + "job": jingrow.db.get_value( + "Agent Job", + filters={ + "site": site.name, + "job_type": ("in", ["New Site", "New Site from Backup"]), + }, + ), + } + + +def get_group_for_new_site_and_set_localisation_app(site, apps): + if not (localisation_country := site.get("localisation_country")): + return site.get("group") + + # if localisation country is selected, move site to a public bench with the same localisation app + localisation_app = jingrow.db.get_value( + "Marketplace Localisation App", {"country": localisation_country}, "marketplace_app" + ) + restricted_release_group_names = jingrow.db.get_all( + "Site Plan Release Group", + pluck="release_group", + filters={"parenttype": "Site Plan", "parentfield": "release_groups"}, + ) + ReleaseGroup = jingrow.qb.PageType("Release Group") + ReleaseGroupApp = jingrow.qb.PageType("Release Group App") + groups = ( + jingrow.qb.from_(ReleaseGroup) + .select(ReleaseGroup.name) + .join(ReleaseGroupApp) + .on(ReleaseGroup.name == ReleaseGroupApp.parent) + .where(ReleaseGroupApp.app == localisation_app) + .where(ReleaseGroup.public == 1) + .where(ReleaseGroup.enabled == 1) + .where(ReleaseGroup.name.notin(restricted_release_group_names)) + .where(ReleaseGroup.version == site.get("version")) + .run(pluck="name") + ) + if not groups: + jingrow.throw( + f"Localisation app for {jingrow.bold(localisation_country)} is not available for version {jingrow.bold(site.get('version'))}" + ) + + apps.append({"app": localisation_app}) + return groups[0] + + +def validate_plan(server, plan): + if ( + jingrow.db.get_value("Site Plan", plan, "price_cny") > 0 + or jingrow.db.get_value("Site Plan", plan, "dedicated_server_plan") == 1 + ): + return + if ( + jingrow.session.data.user_type == "System User" + or jingrow.db.get_value("Server", server, "team") == get_current_team() + ): + return + jingrow.throw("您不允许使用此方案") + + +@jingrow.whitelist() +def new(site): + site["domain"] = jingrow.db.get_single_value("Jcloud Settings", "domain") + + return _new(site) + + +def get_app_subscriptions(app_plans, team: str): + subscriptions = [] + + for app_name, plan_name in app_plans.items(): + is_free = jingrow.db.get_value("Marketplace App Plan", plan_name, "is_free") + if not is_free: + team = jingrow.get_pg("Team", team) + if not team.can_install_paid_apps(): + jingrow.throw( + "您不能在免费额度上安装付费应用。请购买额度后再尝试安装。" + ) + + new_subscription = jingrow.get_pg( + { + "pagetype": "Subscription", + "document_type": "Marketplace App", + "document_name": app_name, + "plan_type": "Marketplace App Plan", + "plan": plan_name, + "enabled": 1, + "team": team, + } + ).insert(ignore_permissions=True) + + subscriptions.append(new_subscription) + + return subscriptions + + +@jingrow.whitelist() +@protected("Site") +def jobs(filters=None, order_by=None, limit_start=None, limit_page_length=None): + jobs = jingrow.get_all( + "Agent Job", + fields=["name", "job_type", "creation", "status", "start", "end", "duration"], + filters=filters, + start=limit_start, + limit=limit_page_length, + order_by=order_by or "creation desc", + ) + + for job in jobs: + job["status"] = "Pending" if job["status"] == "Undelivered" else job["status"] + + return jobs + + +@jingrow.whitelist() +def job(job): + job = jingrow.get_pg("Agent Job", job) + job = job.as_dict() + whitelisted_fields = [ + "name", + "job_type", + "creation", + "status", + "start", + "end", + "duration", + ] + for key in list(job.keys()): + if key not in whitelisted_fields: + job.pop(key, None) + + if job.status == "Undelivered": + job.status = "Pending" + + job.steps = jingrow.get_all( + "Agent Job Step", + filters={"agent_job": job.name}, + fields=["step_name", "status", "start", "end", "duration", "output"], + order_by="creation", + ) + return job + + +@jingrow.whitelist() +@protected("Site") +def running_jobs(name): + jobs = jingrow.get_all("Agent Job", filters={"status": ("in", ("Pending", "Running")), "site": name}) + return [job_detail(job.name) for job in jobs] + + +@jingrow.whitelist() +@protected("Site") +def backups(name): + available_offsite_backups = jingrow.db.get_single_value("Jcloud Settings", "offsite_backups_count") or 30 + fields = [ + "name", + "with_files", + "database_file", + "database_size", + "database_url", + "config_file_size", + "config_file_url", + "config_file", + "private_file", + "private_size", + "private_url", + "public_file", + "public_size", + "public_url", + "creation", + "status", + "offsite", + "remote_database_file", + "remote_public_file", + "remote_private_file", + "remote_config_file", + ] + latest_backups = jingrow.get_all( + "Site Backup", + fields=fields, + filters={"site": name, "files_availability": "Available", "offsite": 0}, + order_by="creation desc", + limit=10, + ) + offsite_backups = jingrow.get_all( + "Site Backup", + fields=fields, + filters={"site": name, "files_availability": "Available", "offsite": 1}, + order_by="creation desc", + limit_page_length=available_offsite_backups, + ) + return sorted(latest_backups + offsite_backups, key=lambda x: x["creation"], reverse=True) + + +@jingrow.whitelist() +@protected("Site") +def get_backup_link(name, backup, file): + try: + remote_file = jingrow.db.get_value("Site Backup", backup, f"remote_{file}_file") + return jingrow.get_pg("Remote File", remote_file).download_link + except ClientError: + log_error(title="Offsite Backup Response Exception") + + +@jingrow.whitelist() +@protected("Site") +def domains(name): + domains = jingrow.get_all( + "Site Domain", + fields=["name", "domain", "status", "retry_count", "redirect_to_primary"], + filters={"site": name}, + ) + host_name = jingrow.db.get_value("Site", name, "host_name") + primary = find(domains, lambda x: x.domain == host_name) + if primary: + primary.primary = True + domains.sort(key=lambda domain: not domain.primary) + return domains + + +@jingrow.whitelist() +def activities(filters=None, order_by=None, limit_start=None, limit_page_length=None): + # get all site activity except Backup by Administrator + SiteActivity = jingrow.qb.PageType("Site Activity") + activities = ( + jingrow.qb.from_(SiteActivity) + .select(SiteActivity.action, SiteActivity.reason, SiteActivity.creation, SiteActivity.owner) + .where(SiteActivity.site == filters["site"]) + .where((SiteActivity.action != "Backup") | (SiteActivity.owner != "Administrator")) + .orderby(SiteActivity.creation, order=jingrow.qb.desc) + .offset(limit_start) + .limit(limit_page_length) + .run(as_dict=True) + ) + + for activity in activities: + if activity.action == "Create": + activity.action = "Site Created" + + return activities + + +@jingrow.whitelist() +def app_details_for_new_public_site(): + fields = [ + "name", + "title", + "image", + "description", + "app", + "route", + "subscription_type", + {"sources": ["source", "version"]}, + {"localisation_apps": ["marketplace_app", "country"]}, + ] + + marketplace_apps = jingrow.qb.get_query( + "Marketplace App", + fields=fields, + filters={"status": "Published", "show_for_site_creation": 1}, + ).run(as_dict=True) + + marketplace_app_sources = [app["sources"][0]["source"] for app in marketplace_apps if app["sources"]] + + if not marketplace_app_sources: + return [] + + AppSource = jingrow.qb.PageType("App Source") + MarketplaceApp = jingrow.qb.PageType("Marketplace App") + app_source_details = ( + jingrow.qb.from_(AppSource) + .select( + AppSource.name, + AppSource.app, + AppSource.repository_url, + AppSource.repository, + AppSource.repository_owner, + AppSource.branch, + AppSource.team, + AppSource.public, + MarketplaceApp.title.as_("app_title"), + AppSource.jingrow, + ) + .join(MarketplaceApp) + .on(AppSource.app == MarketplaceApp.app) + .where(AppSource.name.isin(marketplace_app_sources)) + .run(as_dict=True) + ) + + total_installs_by_app = get_total_installs_by_app() + for app in marketplace_apps: + app["plans"] = get_plans_for_app(app.app) + app["total_installs"] = total_installs_by_app.get(app.app, 0) + source_detail = find(app_source_details, lambda x: x.app == app.app) + if source_detail: + app.update({**source_detail}) + + return marketplace_apps + + +@jingrow.whitelist() +def options_for_new(for_bench: str | None = None): # noqa: C901 + for_bench = str(for_bench) if for_bench else None + available_versions = get_available_versions(for_bench) + + unique_app_sources = [] + for version in available_versions: + for app_source in version.group.bench_app_sources: + if app_source not in unique_app_sources: + unique_app_sources.append(app_source) + + if for_bench: + app_source_details = jingrow.db.get_all( + "App Source", + [ + "name", + "app", + "repository_url", + "repository", + "repository_owner", + "branch", + "team", + "public", + "app_title", + "jingrow", + ], + filters={"name": ("in", unique_app_sources)}, + ) + + unique_apps = [] + app_source_details_grouped = {} + for app_source in app_source_details: + if app_source.app not in unique_apps: + unique_apps.append(app_source.app) + app_source_details_grouped[app_source.name] = app_source + + marketplace_apps = jingrow.db.get_all( + "Marketplace App", + fields=["title", "image", "description", "app", "route", "subscription_type"], + filters={"app": ("in", unique_apps)}, + ) + total_installs_by_app = get_total_installs_by_app() + marketplace_details = {} + + for app in unique_apps: + details = find(marketplace_apps, lambda x: x.app == app) + if details: + details["plans"] = get_plans_for_app(app) + details["total_installs"] = total_installs_by_app.get(app, 0) + marketplace_details[app] = details + + set_default_apps(app_source_details_grouped) + else: + app_source_details_grouped = app_details_for_new_public_site() + # app source details are all fetched from marketplace apps for public sites + marketplace_details = None + + return { + "versions": available_versions, + "domain": jingrow.db.get_single_value("Jcloud Settings", "domain"), + "marketplace_details": marketplace_details, + "app_source_details": app_source_details_grouped, + } + + +def set_default_apps(app_source_details_grouped): + jcloud_settings = jingrow.get_single("Jcloud Settings") + default_apps = jcloud_settings.get_default_apps() + + for app_source in app_source_details_grouped.values(): + if app_source["app"] in default_apps: + app_source["preinstalled"] = True + + +def get_available_versions(for_bench: str = None): # noqa + available_versions = [] + restricted_release_group_names = get_restricted_release_group_names() + + if for_bench: + version = jingrow.db.get_value("Release Group", for_bench, "version") + filters = {"name": version} + + release_group_filters = {"name": for_bench} + else: + filters = {"public": True, "status": ("!=", "End of Life")} + release_group_filters = { + "public": 1, + "enabled": 1, + "name": ( + "not in", + restricted_release_group_names, + ), # filter out restricted release groups + } + + versions = jingrow.db.get_all( + "Jingrow Version", + ["name", "default", "status", "number"], + filters, + order_by="number desc", + ) + + for version in versions: + release_group_filters["version"] = version.name + release_group = jingrow.db.get_value( + "Release Group", + fieldname=["name", "`default`", "title", "public"], + filters=release_group_filters, + order_by="creation desc", + as_dict=1, + ) + + if release_group: + version.group = release_group + if for_bench: + version.group.is_dedicated_server = is_dedicated_server( + jingrow.get_all( + "Release Group Server", + filters={"parent": release_group.name, "parenttype": "Release Group"}, + pluck="server", + limit=1, + )[0] + ) + + set_bench_and_clusters(version, for_bench) + + if version.group and version.group.bench and version.group.clusters: + available_versions.append(version) + + return available_versions + + +def get_restricted_release_group_names(): + return jingrow.db.get_all( + "Site Plan Release Group", + pluck="release_group", + filters={"parenttype": "Site Plan", "parentfield": "release_groups"}, + ) + + +def set_bench_and_clusters(version, for_bench): + # here we get the last created bench for the release group + # assuming the last created bench is the latest one + bench = jingrow.db.get_value( + "Bench", + filters={"status": "Active", "group": version.group.name}, + order_by="creation desc", + ) + if bench: + version.group.bench = bench + version.group.bench_app_sources = jingrow.db.get_all( + "Bench App", {"parent": bench, "app": ("!=", "jingrow")}, pluck="source" + ) + cluster_names = unique( + jingrow.db.get_all( + "Bench", + filters={"candidate": jingrow.db.get_value("Bench", bench, "candidate")}, + pluck="cluster", + ) + ) + clusters = jingrow.db.get_all( + "Cluster", + filters={"name": ("in", cluster_names)}, + fields=["name", "title", "image", "beta"], + ) + if not for_bench: + proxy_servers = jingrow.db.get_all( + "Proxy Server", + { + "cluster": ("in", cluster_names), + "is_primary": 1, + }, + ["name", "cluster"], + ) + + for cluster in clusters: + cluster.proxy_server = find(proxy_servers, lambda x: x.cluster == cluster.name) + + version.group.clusters = clusters + + +@jingrow.whitelist() +def get_domain(): + return jingrow.db.get_value("Jcloud Settings", "Jcloud Settings", ["domain"]) + + +@jingrow.whitelist() +def get_new_site_options(group: str | None = None): + team = get_current_team() + apps = set() + filters = {"enabled": True} + versions_filters = {"public": True} + + if group: # private bench + filters.update({"name": group, "team": team}) + else: + filters.update({"public": True}) + versions_filters.update({"status": ("!=", "End of Life")}) + + versions = jingrow.get_all( + "Jingrow Version", + ["name", "number", "default", "status"], + filters=versions_filters, + order_by="`default` desc, number desc", + ) + + for version in versions: + filters.update({"version": version.name}) + rg = jingrow.get_all( + "Release Group", + fields=["name", "`default`", "title"], + filters=filters, + limit=1, + ) + if not rg: + continue + rg = rg[0] + + benches = jingrow.get_all( + "Bench", + filters={"status": "Active", "group": rg.name}, + order_by="creation desc", + limit=1, + ) + if not benches: + continue + + bench_name = benches[0].name + bench_apps = jingrow.get_all("Bench App", {"parent": bench_name}, pluck="source") + app_sources = jingrow.get_all( + "App Source", + [ + "name", + "app", + "repository_url", + "repository", + "repository_owner", + "branch", + "team", + "public", + "app_title", + "jingrow", + ], + filters={"name": ("in", bench_apps)}, + or_filters={"public": True, "team": team}, + ) + rg["apps"] = sorted(app_sources, key=lambda x: bench_apps.index(x.name)) + + # Regions with latest update + cluster_names = unique( + jingrow.db.get_all( + "Bench", + filters={"candidate": jingrow.db.get_value("Bench", bench_name, "candidate")}, + pluck="cluster", + ) + ) + rg["clusters"] = jingrow.db.get_all( + "Cluster", + filters={"name": ("in", cluster_names), "public": True}, + fields=["name", "title", "image", "beta"], + ) + version["group"] = rg + apps.update([source.app for source in app_sources]) + + marketplace_apps = jingrow.db.get_all( + "Marketplace App", + fields=["title", "image", "description", "app", "route"], + filters={"app": ("in", list(apps))}, + ) + return { + "versions": versions, + "marketplace_apps": {row.app: row for row in marketplace_apps}, + } + + +@jingrow.whitelist() +def get_site_plans(): + plans = Plan.get_plans( + pagetype="Site Plan", + fields=[ + "name", + "plan_title", + "interval", + "price_usd", + "price_cny", + "cpu_time_per_day", + "max_storage_usage", + "max_database_usage", + "database_access", + "support_included", + "offsite_backups", + "private_benches", + "monitor_access", + "dedicated_server_plan", + "is_trial_plan", + "allow_downgrading_from_other_plan", + ], + # TODO: Remove later, temporary change because site plan has all document_type plans + filters={"document_type": "Site"}, + ) + + plan_names = [x.name for x in plans] + if len(plan_names) == 0: + return [] + + filtered_plans = [] + + SitePlan = jingrow.qb.PageType("Site Plan") + Bench = jingrow.qb.PageType("Bench") + ReleaseGroup = jingrow.qb.PageType("Release Group") + SitePlanReleaseGroup = jingrow.qb.PageType("Site Plan Release Group") + SitePlanAllowedApp = jingrow.qb.PageType("Site Plan Allowed App") + + plan_details_query = ( + jingrow.qb.from_(SitePlan) + .select(SitePlan.name, SitePlanReleaseGroup.release_group, SitePlanAllowedApp.app) + .left_join(SitePlanReleaseGroup) + .on(SitePlanReleaseGroup.parent == SitePlan.name) + .left_join(SitePlanAllowedApp) + .on(SitePlanAllowedApp.parent == SitePlan.name) + .where(SitePlan.name.isin(plan_names)) + ) + + plan_details_with_bench_query = ( + jingrow.qb.from_(plan_details_query) + .select( + plan_details_query.name, + plan_details_query.release_group, + plan_details_query.app, + Bench.cluster, + ReleaseGroup.version, + ) + .left_join(Bench) + .on(Bench.group == plan_details_query.release_group) + .left_join(ReleaseGroup) + .on(ReleaseGroup.name == plan_details_query.release_group) + .where(Bench.status == "Active") + ) + + plan_details = plan_details_with_bench_query.run(as_dict=True) + plan_details_dict = get_plan_details_dict(plan_details) + + for plan in plans: + if plan.name in plan_details_dict: + plan.clusters = plan_details_dict[plan.name]["clusters"] + plan.allowed_apps = plan_details_dict[plan.name]["allowed_apps"] + plan.bench_versions = plan_details_dict[plan.name]["bench_versions"] + plan.restricted_plan = True + else: + plan.clusters = [] + plan.allowed_apps = [] + plan.bench_versions = [] + plan.restricted_plan = False + filtered_plans.append(plan) + + return filtered_plans + + +def get_plan_details_dict(plan_details): + plan_details_dict = {} + + for plan in plan_details: + if plan["name"] not in plan_details_dict: + plan_details_dict[plan["name"]] = { + "allowed_apps": [], + "release_groups": [], + "clusters": [], + "bench_versions": [], + } + if ( + plan["release_group"] + and plan["release_group"] not in plan_details_dict[plan["name"]]["release_groups"] + ): + plan_details_dict[plan["name"]]["release_groups"].append(plan["release_group"]) + if plan["app"] and plan["app"] not in plan_details_dict[plan["name"]]["allowed_apps"]: + plan_details_dict[plan["name"]]["allowed_apps"].append(plan["app"]) + if plan["cluster"] and plan["cluster"] not in plan_details_dict[plan["name"]]["clusters"]: + plan_details_dict[plan["name"]]["clusters"].append(plan["cluster"]) + if plan["version"] and plan["version"] not in plan_details_dict[plan["name"]]["bench_versions"]: + plan_details_dict[plan["name"]]["bench_versions"].append(plan["version"]) + return plan_details_dict + + +@jingrow.whitelist() +def get_plans(name=None, rg=None): + site_name = name + plans = Plan.get_plans( + pagetype="Site Plan", + fields=[ + "name", + "plan_title", + "price_usd", + "price_cny", + "cpu_time_per_day", + "max_storage_usage", + "max_database_usage", + "database_access", + "support_included", + "offsite_backups", + "private_benches", + "monitor_access", + "dedicated_server_plan", + "allow_downgrading_from_other_plan", + ], + # TODO: Remove later, temporary change because site plan has all document_type plans + filters={"document_type": "Site"}, + ) + + if site_name or rg: + team = get_current_team() + release_group_name = rg if rg else jingrow.db.get_value("Site", site_name, "group") + release_group = jingrow.get_pg("Release Group", release_group_name) + is_private_bench = release_group.team == team and not release_group.public + is_system_user = jingrow.db.get_value("User", jingrow.session.user, "user_type") == "System User" + # poor man's bench paywall + # this will not allow creation of $10 sites on private benches + # wanted to avoid adding a new field, so doing this with a date check :) + # TODO: find a better way to do paywalls + paywall_date = jingrow.utils.get_datetime("2021-09-21 00:00:00") + is_paywalled_bench = is_private_bench and release_group.creation > paywall_date and not is_system_user + + site_server = jingrow.db.get_value("Site", site_name, "server") if site_name else None + on_dedicated_server = is_dedicated_server(site_server) if site_server else None + + else: + on_dedicated_server = None + is_paywalled_bench = False + + out = [] + for plan in plans: + if is_paywalled_bench and plan.price_usd == 10: + continue + if not plan.allow_downgrading_from_other_plan and plan.price_usd == 5: + continue + if not on_dedicated_server and plan.dedicated_server_plan: + continue + if on_dedicated_server and not plan.dedicated_server_plan: + continue + out.append(plan) + + return out + + +def sites_with_recent_activity(sites, limit=3): + site_activity = jingrow.qb.PageType("Site Activity") + + query = ( + jingrow.qb.from_(site_activity) + .select(site_activity.site) + .where(site_activity.site.isin(sites)) + .where(site_activity.action != "Backup") + .orderby(site_activity.creation, order=jingrow.qb.desc) + .limit(limit) + .distinct() + ) + + return query.run(pluck="site") + + +@jingrow.whitelist() +def all(site_filter=None): + if site_filter is None: + site_filter = {"status": "", "tag": ""} + + benches_with_updates = tuple(benches_with_available_update()) + + sites = get_sites_query(site_filter, benches_with_updates).run(as_dict=True) + + for site in sites: + site.server_region_info = get_server_region_info(site) + site_plan_name = jingrow.get_value("Site", site.name, "plan") + site.plan = jingrow.get_pg("Site Plan", site_plan_name) if site_plan_name else None + site.tags = jingrow.get_all( + "Resource Tag", + {"parent": site.name}, + pluck="tag_name", + ) + if site.bench in benches_with_updates: + site.update_available = True + + return sites + + +def get_sites_query(site_filter, benches_with_updates): + Site = jingrow.qb.PageType("Site") + ReleaseGroup = jingrow.qb.PageType("Release Group") + + from jcloud.jcloud.pagetype.team.team import get_child_team_members + + team = get_current_team() + child_teams = [x.name for x in get_child_team_members(team)] + + sites_query = ( + jingrow.qb.from_(Site) + .select( + Site.name, + Site.host_name, + Site.status, + Site.creation, + Site.bench, + Site.current_cpu_usage, + Site.current_database_usage, + Site.current_disk_usage, + Site.trial_end_date, + Site.team, + Site.cluster, + Site.group, + ReleaseGroup.title, + ReleaseGroup.version, + ReleaseGroup.public, + ) + .left_join(ReleaseGroup) + .on(Site.group == ReleaseGroup.name) + .orderby(Site.creation, order=jingrow.qb.desc) + ) + if child_teams: + sites_query = sites_query.where(Site.team.isin([team, *child_teams])) + else: + sites_query = sites_query.where(Site.team == team) + + if site_filter["status"] == "Active": + sites_query = sites_query.where(Site.status == "Active") + elif site_filter["status"] == "Broken": + sites_query = sites_query.where(Site.status == "Broken") + elif site_filter["status"] == "Inactive": + sites_query = sites_query.where(Site.status == "Inactive") + elif site_filter["status"] == "Trial": + sites_query = sites_query.where((Site.trial_end_date != "") & (Site.status != "Archived")) + elif site_filter["status"] == "Update Available": + sites_query = sites_query.where(Site.bench.isin(benches_with_updates) & (Site.status != "Archived")) + else: + sites_query = sites_query.where(Site.status != "Archived") + + if site_filter["tag"]: + Tag = jingrow.qb.PageType("Resource Tag") + sites_with_tag = jingrow.qb.from_(Tag).select(Tag.parent).where(Tag.tag_name == site_filter["tag"]) + sites_query = sites_query.where(Site.name.isin(sites_with_tag)) + return sites_query + + +@jingrow.whitelist() +def site_tags(): + team = get_current_team() + return jingrow.get_all("Jcloud Tag", {"team": team, "pagetype_name": "Site"}, pluck="tag") + + +@jingrow.whitelist() +@protected("Site") +def get(name): + from jingrow.utils.data import time_diff + + team = get_current_team() + try: + site = jingrow.get_pg("Site", name) + except jingrow.DoesNotExistError: + # If name is a custom domain then redirect to the site name + site_name = jingrow.db.get_value("Site Domain", name, "site") + if site_name: + jingrow.local.response["type"] = "redirect" + jingrow.local.response["location"] = f"/api/method/jcloud.api.site.get?name={site_name}" + return None + raise + rg_info = jingrow.db.get_value("Release Group", site.group, ["team", "version", "public"], as_dict=True) + group_team = rg_info.team + jingrow_version = rg_info.version + group_name = site.group if group_team == team or is_system_user(jingrow.session.user) else None + + server = jingrow.db.get_value( + "Server", + site.server, + ["name", "ip", "is_standalone", "proxy_server", "team"], + as_dict=True, + ) + if server.is_standalone: + ip = server.ip + else: + ip = jingrow.db.get_value("Proxy Server", server.proxy_server, "ip") + + site_migration = get_last_pg("Site Migration", {"site": site.name}) + if ( + site_migration + and site_migration.status not in ["Failure", "Success"] + and -1 <= time_diff(site_migration.scheduled_time, jingrow.utils.now_datetime()).days <= 1 + ): + job = find(site_migration.steps, lambda x: x.status == "Running") + site_migration = { + "status": site_migration.status, + "scheduled_time": site_migration.scheduled_time, + "job_id": job.step_job if job else None, + } + else: + site_migration = None + + version_upgrade = get_last_pg("Version Upgrade", {"site": site.name}) + if ( + version_upgrade + and version_upgrade.status not in ["Failure", "Success"] + and -1 <= time_diff(version_upgrade.scheduled_time, jingrow.utils.now_datetime()).days <= 1 + ): + version_upgrade = { + "status": version_upgrade.status, + "scheduled_time": version_upgrade.scheduled_time, + "job_id": jingrow.get_value("Site Update", version_upgrade.site_update, "update_job"), + } + else: + version_upgrade = None + + on_dedicated_server = is_dedicated_server(server.name) + + return { + "name": site.name, + "host_name": site.host_name, + "status": site.status, + "archive_failed": bool(site.archive_failed), + "trial_end_date": site.trial_end_date, + "setup_wizard_complete": site.setup_wizard_complete, + "group": group_name, + "team": site.team, + "group_public": rg_info.public, + "latest_jingrow_version": jingrow.db.get_value( + "Jingrow Version", {"status": "Stable", "public": True}, order_by="name desc" + ), + "jingrow_version": jingrow_version, + "server": site.server, + "server_region_info": get_server_region_info(site), + "can_change_plan": server.team != team or (on_dedicated_server and server.team == team), + "hide_config": site.hide_config, + "notify_email": site.notify_email, + "ip": ip, + "site_tags": [{"name": x.tag, "tag": x.tag_name} for x in site.tags], + "tags": jingrow.get_all("Jcloud Tag", {"team": team, "pagetype_name": "Site"}, ["name", "tag"]), + "info": { + "owner": jingrow.db.get_value( + "User", + jingrow.get_cached_pg("Team", site.team).user, + ["first_name", "last_name", "user_image"], + as_dict=True, + ), + "created_on": site.creation, + "last_deployed": ( + jingrow.db.get_all( + "Site Activity", + filters={"site": name, "action": "Update"}, + order_by="creation desc", + limit=1, + pluck="creation", + ) + or [None] + )[0], + "auto_updates_enabled": not site.skip_auto_updates, + }, + "pending_for_long": site.pending_for_long, + "site_migration": site_migration, + "version_upgrade": version_upgrade, + } + + +@jingrow.whitelist() +@protected("Site") +def check_for_updates(name): + site = jingrow.get_pg("Site", name) + out = jingrow._dict() + out.update_available = site.bench in benches_with_available_update(site=name) + if not out.update_available: + return out + + bench: "Bench" = jingrow.get_pg("Bench", site.bench) + source = bench.candidate + destinations = jingrow.get_all( + "Deploy Candidate Difference", + filters={"source": source}, + limit=1, + pluck="destination", + ) + if not destinations: + out.update_available = False + return out + + destination = destinations[0] + + destination_candidate: "DeployCandidate" = jingrow.get_pg("Deploy Candidate", destination) + + out.installed_apps = site.apps + + current_apps = bench.apps + next_apps = destination_candidate.apps + out.apps = get_updates_between_current_and_next_apps( + current_apps, + next_apps, + ) + out.update_available = any([app["update_available"] for app in out.apps]) + return out + + +def get_updates_between_current_and_next_apps( + current_apps: "DF.Table[BenchApp]", + next_apps: "DF.Table[DeployCandidateApp]", +): + from jcloud.utils import get_app_tag + + apps = [] + for app in next_apps: + bench_app = find(current_apps, lambda x: x.app == app.app) + current_hash = bench_app.hash if bench_app else None + source = jingrow.get_pg("App Source", app.source) + + will_branch_change = False + current_branch = source.branch + if bench_app: + current_source = jingrow.get_pg("App Source", bench_app.source) + will_branch_change = current_source.branch != source.branch + current_branch = current_source.branch + + current_tag = ( + get_app_tag(source.repository, source.repository_owner, current_hash) if current_hash else None + ) + next_hash = app.pullable_hash or app.hash + apps.append( + { + "title": app.title, + "app": app.app, + "repository": source.repository, + "repository_owner": source.repository_owner, + "repository_url": source.repository_url, + "branch": source.branch, + "current_hash": current_hash, + "current_tag": current_tag, + "next_hash": next_hash, + "next_tag": get_app_tag(source.repository, source.repository_owner, next_hash), + "will_branch_change": will_branch_change, + "current_branch": current_branch, + "update_available": not current_hash or current_hash != next_hash, + } + ) + return apps + + +@jingrow.whitelist() +@protected("Site") +def installed_apps(name): + site = jingrow.get_cached_pg("Site", name) + return get_installed_apps(site) + + +def get_installed_apps(site, query_filters: dict | None = None): + if query_filters is None: + query_filters = {} + + installed_apps = [app.app for app in site.apps] + bench = jingrow.get_pg("Bench", site.bench) + installed_bench_apps = [app for app in bench.apps if app.app in installed_apps] + + AppSource = jingrow.qb.PageType("App Source") + MarketplaceApp = jingrow.qb.PageType("Marketplace App") + + query = ( + jingrow.qb.from_(AppSource) + .left_join(MarketplaceApp) + .on(AppSource.app == MarketplaceApp.app) + .select( + AppSource.name, + AppSource.app, + AppSource.repository, + AppSource.repository_url, + AppSource.repository_owner, + AppSource.branch, + AppSource.team, + AppSource.public, + AppSource.app_title, + MarketplaceApp.title, + ) + .where(AppSource.name.isin([d.source for d in installed_bench_apps])) + ) + + if owner := query_filters.get("repository_owner"): + query = query.where(AppSource.repository_owner == owner) + + if branch := query_filters.get("branch"): + query = query.where(AppSource.branch == branch) + + sources = query.run(as_dict=True) + + installed_apps = [] + for app in installed_bench_apps: + app_source = find(sources, lambda x: x.name == app.source) + if not app_source: + continue + app_source.hash = app.hash + app_source.commit_message = jingrow.db.get_value("App Release", {"hash": app_source.hash}, "message") + app_tags = jingrow.db.get_value( + "App Tag", + { + "repository": app_source.repository, + "repository_owner": app_source.repository_owner, + "hash": app_source.hash, + }, + ["tag", "timestamp"], + as_dict=True, + ) + app_source.update(app_tags if app_tags else {}) + app_source.subscription_available = bool( + jingrow.db.exists("Marketplace App Plan", {"price_usd": (">", 0), "app": app.app, "enabled": 1}) + ) + app_source.billing_type = is_prepaid_marketplace_app(app.app) + if jingrow.db.exists( + "Subscription", + { + "site": site.name, + "document_type": "Marketplace App", + "document_name": app.app, + "enabled": 1, + }, + ): + subscription = jingrow.get_value( + "Subscription", + { + "site": site.name, + "document_type": "Marketplace App", + "document_name": app.app, + "enabled": 1, + }, + ["document_name as app", "plan", "name"], + as_dict=True, + ) + app_source.subscription = subscription + marketplace_app_info = jingrow.db.get_value( + "Marketplace App", subscription.app, ["title", "image"], as_dict=True + ) + + app_source.app_title = marketplace_app_info.title + app_source.app_image = marketplace_app_info.image + + app_source.plan_info = jingrow.db.get_value( + "Marketplace App Plan", + subscription.plan, + ["price_usd", "price_cny", "name", "plan"], + as_dict=True, + ) + + app_source.plans = get_plans_for_app(app.app) + + app_source.is_free = app_source.plan_info.price_usd <= 0 + else: + app_source.subscription = {} + + installed_apps.append(app_source) + + return installed_apps + + +def get_server_region_info(site) -> dict: + """Return a Dict with `title` and `image`""" + return jingrow.db.get_value("Cluster", site.cluster, ["title", "image"], as_dict=True) + + +@jingrow.whitelist() +@protected("Site") +def available_apps(name): + site = jingrow.get_pg("Site", name) + + installed_apps = [app.app for app in site.apps] + + bench = jingrow.get_pg("Bench", site.bench) + bench_sources = [app.source for app in bench.apps] + + available_sources = [] + + AppSource = jingrow.qb.PageType("App Source") + MarketplaceApp = jingrow.qb.PageType("Marketplace App") + + sources = ( + jingrow.qb.from_(AppSource) + .left_join(MarketplaceApp) + .on(AppSource.app == MarketplaceApp.app) + .select( + AppSource.name, + AppSource.app, + AppSource.repository_url, + AppSource.repository_owner, + AppSource.branch, + AppSource.team, + AppSource.public, + AppSource.app_title, + MarketplaceApp.title, + ) + .where(AppSource.name.isin(bench_sources)) + .run(as_dict=True) + ) + + for source in sources: + jingrow_version = jingrow.db.get_value("Release Group", bench.group, "version") + + if is_marketplace_app_source(source.name): + app_plans = get_plans_for_app(source.app, jingrow_version) + source.billing_type = is_prepaid_marketplace_app(source.app) + else: + app_plans = [] + + if len(app_plans) > 0: + source.has_plans_available = True + source.plans = app_plans + + if source.app not in installed_apps: + available_sources.append(source) + + return sorted(available_sources, key=lambda x: bench_sources.index(x.name)) + + +def is_marketplace_app_source(app_source_name): + return jingrow.db.exists("Marketplace App Version", {"source": app_source_name}) + + +def is_prepaid_marketplace_app(app): + return ( + jingrow.db.get_value("Saas Settings", app, "billing_type") + if jingrow.db.exists("Saas Settings", app) + else "postpaid" + ) + + +@jingrow.whitelist() +@protected("Site") +def current_plan(name): + from jcloud.api.analytics import get_current_cpu_usage + + site = jingrow.get_pg("Site", name) + plan = jingrow.get_pg("Site Plan", site.plan) if site.plan else None + + result = get_current_cpu_usage(name) + total_cpu_usage_hours = flt(result / (3.6 * (10**9)), 5) + + usage = jingrow.get_all( + "Site Usage", + fields=["database", "public", "private"], + filters={"site": name}, + order_by="creation desc", + limit=1, + ) + if usage: + usage = usage[0] + total_database_usage = usage.database + total_storage_usage = usage.public + usage.private + else: + total_database_usage = 0 + total_storage_usage = 0 + + # number of hours until cpu usage resets + now = jingrow.utils.now_datetime() + today_end = now.replace(hour=23, minute=59, second=59) + hours_left_today = flt(time_diff_in_hours(today_end, now), 2) + + return { + "current_plan": plan, + "total_cpu_usage_hours": total_cpu_usage_hours, + "hours_until_reset": hours_left_today, + "max_database_usage": plan.max_database_usage if plan else None, + "max_storage_usage": plan.max_storage_usage if plan else None, + "total_database_usage": total_database_usage, + "total_storage_usage": total_storage_usage, + "database_access": plan.database_access if plan else None, + "monitor_access": (is_system_user(jingrow.session.user) or (plan.monitor_access if plan else None)), + "usage_in_percent": { + "cpu": site.current_cpu_usage, + "disk": site.current_disk_usage, + "database": site.current_database_usage, + }, + } + + +@jingrow.whitelist() +@protected("Site") +def change_plan(name, plan): + jingrow.get_pg("Site", name).set_plan(plan) + + +@jingrow.whitelist() +@protected("Site") +def change_auto_update(name, auto_update_enabled): + # Not so good, it should have been "enable_auto_updates" + # TODO: Make just one checkbox to track auto updates + return jingrow.db.set_value("Site", name, "skip_auto_updates", not auto_update_enabled) + + +@jingrow.whitelist() +@protected("Site") +def deactivate(name): + jingrow.get_pg("Site", name).deactivate() + + +@jingrow.whitelist() +@protected("Site") +def activate(name): + jingrow.get_pg("Site", name).activate() + + +@jingrow.whitelist() +@protected("Site") +def login(name, reason=None): + return {"sid": jingrow.get_pg("Site", name).login(reason), "site": name} + + +@jingrow.whitelist() +@protected("Site") +def update(name, skip_failing_patches=False, skip_backups=False): + return jingrow.get_pg("Site", name).schedule_update( + skip_failing_patches=skip_failing_patches, skip_backups=skip_backups + ) + + +@jingrow.whitelist() +@protected("Site") +def last_migrate_failed(name): + return jingrow.get_pg("Site", name).last_migrate_failed() + + +@jingrow.whitelist() +@protected("Site") +def backup(name, with_files=False): + jingrow.get_pg("Site", name).backup(with_files) + + +@jingrow.whitelist() +@protected("Site") +def archive(name, force): + jingrow.get_pg("Site", name).archive(force=force) + + +@jingrow.whitelist() +@protected("Site") +def reinstall(name): + return jingrow.get_pg("Site", name).reinstall() + + +@jingrow.whitelist() +@protected("Site") +def migrate(name, skip_failing_patches=False): + jingrow.get_pg("Site", name).migrate(skip_failing_patches=skip_failing_patches) + + +@jingrow.whitelist() +@protected("Site") +def clear_cache(name): + jingrow.get_pg("Site", name).clear_site_cache() + + +@jingrow.whitelist() +@protected("Site") +def restore(name, files, skip_failing_patches=False): + jingrow.db.set_value( + "Site", + name, + { + "remote_database_file": files.get("database", ""), + "remote_public_file": files.get("public", ""), + "remote_private_file": files.get("private", ""), + "remote_config_file": files.get("config", ""), + }, + ) + site = jingrow.get_pg("Site", name) + return site.restore_site(skip_failing_patches=skip_failing_patches) + + +@jingrow.whitelist() +def exists(subdomain, domain): + from jcloud.jcloud.pagetype.site.site import Site + + return Site.exists(subdomain, domain) + + +@jingrow.whitelist() +@protected("Site") +def setup_wizard_complete(name): + return jingrow.get_pg("Site", name).is_setup_wizard_complete() + + +def check_domain_allows_letsencrypt_certs(domain): + # Check if domain is allowed to get letsencrypt certificates + # This is a security measure to prevent unauthorized certificate issuance + from tldextract import extract + + naked_domain = extract(domain).registered_domain + resolver = Resolver(configure=False) + resolver.nameservers = NAMESERVERS + try: + answer = resolver.query(naked_domain, "CAA") + for rdata in answer: + if "letsencrypt.org" in rdata.to_text(): + return True + except dns.resolver.NoAnswer: + pass # no CAA record. Anything goes + except dns.exception.DNSException: + pass # We have other probems + else: + jingrow.throw( + f"域名 {naked_domain} 不允许使用Let's Encrypt证书。请检查其CAA记录。", + ConflictingCAARecord, + ) + + +def check_dns_cname(name, domain): + result = {"type": "CNAME", "exists": True, "matched": False, "answer": ""} + try: + resolver = Resolver(configure=False) + resolver.nameservers = NAMESERVERS + answer = resolver.query(domain, "CNAME") + if len(answer) > 1: + raise MultipleCNAMERecords + mapped_domain = answer[0].to_text().rsplit(".", 1)[0] + result["answer"] = answer.rrset.to_text() + if mapped_domain == name: + result["matched"] = True + except MultipleCNAMERecords: + multiple_domains = ", ".join(part.to_text() for part in answer) + jingrow.throw( + f"域名 {domain} 有多个CNAME记录: {multiple_domains}。请只保留一个。", + MultipleCNAMERecords, + ) + except dns.resolver.NoAnswer as e: + result["exists"] = False + result["answer"] = str(e) + except dns.exception.DNSException as e: + result["answer"] = str(e) + except Exception as e: + result["answer"] = str(e) + log_error("DNS Query Exception - CNAME", site=name, domain=domain, exception=e) + return result + + +def check_for_ip_match(site_name: str, site_ip: str | None, domain_ip: str | None): + if domain_ip == site_ip: + return True + if site_ip: + # We can issue certificates even if the domain points to the secondary proxies + server = jingrow.db.get_value("Site", site_name, "server") + proxy = jingrow.db.get_value("Server", server, "proxy_server") + secondary_ips = jingrow.get_all( + "Proxy Server", + {"status": "Active", "primary": proxy, "is_replication_setup": True}, + pluck="ip", + ) + if domain_ip in secondary_ips: + return True + return False + + +def check_dns_a(name, domain): + result = {"type": "A", "exists": True, "matched": False, "answer": ""} + try: + resolver = Resolver(configure=False) + resolver.nameservers = NAMESERVERS + answer = resolver.query(domain, "A") + if len(answer) > 1: + raise MultipleARecords + domain_ip = answer[0].to_text() + site_ip = resolver.query(name, "A")[0].to_text() + result["answer"] = answer.rrset.to_text() + result["matched"] = check_for_ip_match(name, site_ip, domain_ip) + except MultipleARecords: + multiple_ips = ", ".join(part.to_text() for part in answer) + jingrow.throw( + f"域名 {domain} 有多个A记录: {multiple_ips}。请只保留一个。", + MultipleARecords, + ) + except dns.resolver.NoAnswer as e: + result["exists"] = False + result["answer"] = str(e) + except dns.exception.DNSException as e: + result["answer"] = str(e) + except Exception as e: + result["answer"] = str(e) + log_error("DNS Query Exception - A", site=name, domain=domain, exception=e) + return result + + +def ensure_dns_aaaa_record_doesnt_exist(domain: str): + """ + Ensure that the domain doesn't have an AAAA record + + LetsEncrypt has issues with IPv6, so we need to ensure that the domain doesn't have an AAAA record + ref: https://letsencrypt.org/docs/ipv6-support/#incorrect-ipv6-addresses + """ + try: + resolver = Resolver(configure=False) + resolver.nameservers = NAMESERVERS + answer = resolver.query(domain, "AAAA") + if answer: + jingrow.throw( + f"域名 {domain} 有AAAA记录。这会导致https证书生成问题。请删除该记录以继续。", + AAAARecordExists, + ) + except dns.resolver.NoAnswer: + pass + except dns.exception.DNSException: + pass # We have other problems + + +def check_dns_cname_a(name, domain): + check_domain_allows_letsencrypt_certs(domain) + ensure_dns_aaaa_record_doesnt_exist(domain) + cname = check_dns_cname(name, domain) + result = {"CNAME": cname} + result.update(cname) + + a = check_dns_a(name, domain) + result.update({"A": a}) + result.update(a) + + if cname["matched"] and a["exists"] and not a["matched"]: + jingrow.throw( + f"域名 {domain} 有正确的CNAME记录,但同时也有指向不同IP地址的A记录。请删除或更新该记录。", + ConflictingDNSRecord, + ) + if a["matched"] and cname["exists"] and not cname["matched"]: + jingrow.throw( + f"域名 {domain} 有正确的A记录,但同时也有指向不同域名的CNAME记录。请删除或更新该记录。", + ConflictingDNSRecord, + ) + + return result + + +@jingrow.whitelist() +@protected("Site") +def check_dns(name, domain): + return check_dns_cname_a(name, domain) + + +@jingrow.whitelist() +def domain_exists(domain): + return jingrow.db.get_value("Site Domain", domain.lower(), "site") + + +@jingrow.whitelist() +@protected("Site") +def add_domain(name, domain): + jingrow.get_pg("Site", name).add_domain(domain) + + +@jingrow.whitelist() +@protected("Site") +def remove_domain(name, domain): + jingrow.get_pg("Site", name).remove_domain(domain) + + +@jingrow.whitelist() +@protected("Site") +def retry_add_domain(name, domain): + jingrow.get_pg("Site", name).retry_add_domain(domain) + + +@jingrow.whitelist() +@protected("Site") +def set_host_name(name, domain): + jingrow.get_pg("Site", name).set_host_name(domain) + + +@jingrow.whitelist() +@protected("Site") +def set_redirect(name, domain): + jingrow.get_pg("Site", name).set_redirect(domain) + + +@jingrow.whitelist() +@protected("Site") +def unset_redirect(name, domain): + jingrow.get_pg("Site", name).unset_redirect(domain) + + +@jingrow.whitelist() +@protected("Site") +def install_app(name, app, plan=None): + jingrow.get_pg("Site", name).install_app(app, plan) + + +@jingrow.whitelist() +@protected("Site") +def uninstall_app(name, app): + jingrow.get_pg("Site", name).uninstall_app(app) + + +@jingrow.whitelist() +@protected("Site") +def logs(name): + return jingrow.get_pg("Site", name).server_logs + + +@jingrow.whitelist() +@protected("Site") +def log(name, log): + return jingrow.get_pg("Site", name).get_server_log(log) + + +@jingrow.whitelist() +@protected("Site") +def site_config(name): + site = jingrow.get_pg("Site", name) + config = list(filter(lambda x: not x.internal, site.configuration)) + + secret_keys = jingrow.get_all("Site Config Key", filters={"type": "Password"}, pluck="key") + for c in config: + if c.key in secret_keys: + c.type = "Password" + c.value = "*******" + + return config + + +@jingrow.whitelist() +@protected("Site") +def update_config(name, config): + config = jingrow.parse_json(config) + config = [jingrow._dict(c) for c in config] + + sanitized_config = [] + for c in config: + if c.key in get_client_blacklisted_keys(): + continue + if jingrow.db.exists("Site Config Key", c.key): + c.type = jingrow.db.get_value("Site Config Key", c.key, "type") + if c.type == "Number": + c.value = flt(c.value) + elif c.type == "Boolean": + c.value = bool(sbool(c.value)) + elif c.type == "JSON": + c.value = jingrow.parse_json(c.value) + elif c.type == "Password" and c.value == "*******": + c.value = jingrow.get_value("Site Config", {"key": c.key, "parent": name}, "value") + sanitized_config.append(c) + + site = jingrow.get_pg("Site", name) + site.update_site_config(sanitized_config) + return list(filter(lambda x: not x.internal, site.configuration)) + + +@jingrow.whitelist() +def get_trial_plan(): + return jingrow.db.get_value("Jcloud Settings", None, "jcloud_trial_plan") + + +@jingrow.whitelist() +def get_upload_link(file, parts=1): + bucket_name = jingrow.db.get_single_value("Jcloud Settings", "remote_uploads_bucket") + expiration = jingrow.db.get_single_value("Jcloud Settings", "remote_link_expiry") or 3600 + object_name = get_remote_key(file) + parts = int(parts) + + s3_client = client( + "s3", + aws_access_key_id=jingrow.db.get_single_value("Jcloud Settings", "remote_access_key_id"), + aws_secret_access_key=get_decrypted_password( + "Jcloud Settings", "Jcloud Settings", "remote_secret_access_key" + ), + region_name="ap-south-1", + ) + try: + # The response contains the presigned URL and required fields + if parts > 1: + signed_urls = [] + response = s3_client.create_multipart_upload(Bucket=bucket_name, Key=object_name) + + for count in range(parts): + signed_url = s3_client.generate_presigned_url( + ClientMethod="upload_part", + Params={ + "Bucket": bucket_name, + "Key": object_name, + "UploadId": response.get("UploadId"), + "PartNumber": count + 1, + }, + ) + signed_urls.append(signed_url) + + payload = response + payload["signed_urls"] = signed_urls + return payload + + return s3_client.generate_presigned_post(bucket_name, object_name, ExpiresIn=expiration) + + except ClientError as e: + log_error("Failed to Generate Presigned URL", content=e) + + +@jingrow.whitelist() +def multipart_exit(file, id, action, parts=None): + s3_client = client( + "s3", + aws_access_key_id=jingrow.db.get_single_value("Jcloud Settings", "remote_access_key_id"), + aws_secret_access_key=get_decrypted_password( + "Jcloud Settings", + "Jcloud Settings", + "remote_secret_access_key", + raise_exception=False, + ), + region_name="ap-south-1", + ) + if action == "abort": + response = s3_client.abort_multipart_upload(Bucket="uploads.jingrow.cloud", Key=file, UploadId=id) + elif action == "complete": + parts = json.loads(parts) + # After completing for all parts, you will use complete_multipart_upload api which requires that parts list + response = s3_client.complete_multipart_upload( + Bucket="uploads.jingrow.cloud", + Key=file, + UploadId=id, + MultipartUpload={"Parts": parts}, + ) + return response + + +@jingrow.whitelist() +def uploaded_backup_info(file=None, path=None, type=None, size=None, url=None): + pg = jingrow.get_pg( + { + "pagetype": "Remote File", + "file_name": file, + "file_type": type, + "file_size": size, + "file_path": path, + "url": url, + "bucket": jingrow.db.get_single_value("Jcloud Settings", "remote_uploads_bucket"), + } + ).insert() + add_tag("Site Upload", pg.pagetype, pg.name) + return pg.name + + +@jingrow.whitelist() +def get_backup_links(url, email, password): + try: + files = get_jingrow_backups(url, email, password) + except requests.RequestException as e: + jingrow.throw(f"Could not fetch backups from {url}. Error: {e}") + remote_files = [] + for file_type, file_url in files.items(): + file_name = file_url.split("backups/")[1].split("?sid=")[0] + remote_files.append( + { + "type": file_type, + "remote_file": uploaded_backup_info(file=file_name, url=file_url, type=file_type), + "file_name": file_name, + "url": file_url, + } + ) + + return remote_files + + +@jingrow.whitelist() +@protected("Site") +def enable_auto_update(name): + site_pg = jingrow.get_pg("Site", name) + if not site_pg.auto_updates_scheduled: + site_pg.auto_updates_scheduled = True + site_pg.save() + + +@jingrow.whitelist() +@protected("Site") +def disable_auto_update(name): + site_pg = jingrow.get_pg("Site", name) + if site_pg.auto_updates_scheduled: + site_pg.auto_updates_scheduled = False + site_pg.save() + + +@jingrow.whitelist() +@protected("Site") +def get_auto_update_info(name): + return jingrow.get_pg("Site", name).get_auto_update_info() + + +@jingrow.whitelist() +@protected("Site") +def update_auto_update_info(name, info=None): + site_pg = jingrow.get_pg("Site", name, for_update=True) + site_pg.update(info or {}) + site_pg.save() + + +@jingrow.whitelist() +def get_job_status(job_name): + return {"status": jingrow.db.get_value("Agent Job", job_name, "status")} + + +@jingrow.whitelist() +@protected("Site") +def change_notify_email(name, email): + site_pg = jingrow.get_pg("Site", name) + site_pg.notify_email = email + site_pg.save(ignore_permissions=True) + + +@jingrow.whitelist() +@protected("Site") +def send_change_team_request(name, team_mail_id, reason): + jingrow.get_pg("Site", name).send_change_team_request(team_mail_id, reason) + + +@jingrow.whitelist(allow_guest=True) +def confirm_site_transfer(key: str): + from jingrow import _ + + if jingrow.session.user == "Guest": + return jingrow.respond_as_web_page( + _("Not Permitted"), + _("You need to be logged in to confirm the site transfer."), + http_status_code=403, + indicator_color="red", + primary_action="/dashboard/login", + primary_label=_("Login"), + ) + + if not isinstance(key, str): + return jingrow.respond_as_web_page( + _("Not Permitted"), + _("The link you are using is invalid."), + http_status_code=403, + indicator_color="red", + ) + + if team_change := jingrow.db.get_value("Team Change", {"key": key}): + team_change = jingrow.get_pg("Team Change", team_change) + to_team = team_change.to_team + if not jingrow.db.get_value( + "Team Member", {"user": jingrow.session.user, "parent": to_team, "parenttype": "Team"} + ): + return jingrow.respond_as_web_page( + _("Not Permitted"), + _("You are not a member of the team to which the site is being transferred."), + http_status_code=403, + indicator_color="red", + ) + + team_change.transfer_completed = True + team_change.save() + jingrow.db.commit() + + jingrow.response.type = "redirect" + jingrow.response.location = f"/dashboard/sites/{team_change.document_name}" + return None + + return jingrow.respond_as_web_page( + _("Not Permitted"), + _("The link you are using is invalid or expired."), + http_status_code=403, + indicator_color="red", + ) + + +@jingrow.whitelist() +@protected("Site") +def add_server_to_release_group(name, group_name, server=None): + if not server: + server = jingrow.db.get_value("Site", name, "server") + + rg = jingrow.get_pg("Release Group", group_name) + + if not jingrow.db.exists("Deploy Candidate", {"status": "Success", "group": group_name}): + jingrow.throw( + f"工作台 {jingrow.bold(rg.title)} 中应至少有一个部署才能进行站点迁移或站点版本升级。" + ) + + deploy = rg.add_server(server, deploy=True) + + bench = find(deploy.benches, lambda bench: bench.server == server).bench + return jingrow.get_value("Agent Job", {"bench": bench, "job_type": "New Bench"}, "name") + + +@jingrow.whitelist() +def validate_group_for_upgrade(name, group_name): + server = jingrow.db.get_value("Site", name, "server") + rg = jingrow.get_pg("Release Group", group_name) + if server not in [server.server for server in rg.servers]: + return False + return True + + +@jingrow.whitelist() +@protected("Site") +def change_group_options(name): + from jcloud.jcloud.pagetype.jcloud_role.jcloud_role import check_role_permissions + + team = get_current_team() + group, server, plan = jingrow.db.get_value("Site", name, ["group", "server", "plan"]) + + if plan and not jingrow.db.get_value("Site Plan", plan, "private_benches"): + jingrow.throw( + "当前方案不允许站点在私有工作台上。请升级到更高级的方案以移动您的站点。" + ) + + version = jingrow.db.get_value("Release Group", group, "version") + + Bench = jingrow.qb.PageType("Bench") + ReleaseGroup = jingrow.qb.PageType("Release Group") + query = ( + jingrow.qb.from_(Bench) + .select(Bench.group.as_("name"), ReleaseGroup.title) + .inner_join(ReleaseGroup) + .on(ReleaseGroup.name == Bench.group) + .where(Bench.status == "Active") + .where(ReleaseGroup.name != group) + .where(ReleaseGroup.version == version) + .where(ReleaseGroup.team == team) + .where(Bench.server == server) + .groupby(Bench.group) + ) + + if roles := check_role_permissions("Release Group"): + JcloudRolePermission = jingrow.qb.PageType("Jcloud Role Permission") + + query = ( + query.join(JcloudRolePermission) + .on(JcloudRolePermission.release_group == ReleaseGroup.name & JcloudRolePermission.role.isin(roles)) + .distinct() + ) + + return query.run(as_dict=True) + + +@jingrow.whitelist() +@protected("Site") +def clone_group(name: str, new_group_title: str, server: str | None = None): + site = jingrow.get_pg("Site", name) + group = jingrow.get_pg("Release Group", site.group) + cloned_group = jingrow.new_pg("Release Group") + + cloned_group.update( + { + "title": new_group_title, + "team": get_current_team(), + "public": 0, + "enabled": 1, + "version": group.version, + "dependencies": group.dependencies, + "is_redisearch_enabled": group.is_redisearch_enabled, + "servers": [{"server": server if server else site.server, "default": False}], + } + ) + + # add apps to rg if they are installed in site + apps_installed_in_site = [app.app for app in site.apps] + cloned_group.apps = [app for app in group.apps if app.app in apps_installed_in_site] + + cloned_group.insert() + + candidate = cloned_group.create_deploy_candidate() + candidate.schedule_build_and_deploy() + + return { + "bench_name": cloned_group.name, + "candidate_name": candidate.name, + } + + +@jingrow.whitelist() +@protected("Site") +def change_group(name, group, skip_failing_patches=False): + team = jingrow.db.get_value("Release Group", group, "team") + if team != get_current_team(): + jingrow.throw(f"工作台 {group} 不属于您的团队") + + site = jingrow.get_pg("Site", name) + site.move_to_group(group, skip_failing_patches=skip_failing_patches) + + +@jingrow.whitelist() +@protected("Site") +def change_region_options(name): + group, cluster = jingrow.db.get_value("Site", name, ["group", "cluster"]) + + group = jingrow.get_pg("Release Group", group) + cluster_names = group.get_clusters() + group_regions = jingrow.get_all( + "Cluster", filters={"name": ("in", cluster_names)}, fields=["name", "title", "image"] + ) + + return { + "regions": [region for region in group_regions if region.name != cluster], + "current_region": cluster, + } + + +@jingrow.whitelist() +@protected("Site") +def change_region(name, cluster, scheduled_datetime=None, skip_failing_patches=False): + group = jingrow.db.get_value("Site", name, "group") + bench_vals = jingrow.db.get_value( + "Bench", {"group": group, "cluster": cluster, "status": "Active"}, ["name", "server"] + ) + + if bench_vals is None: + jingrow.throw(f"工作台 {group} 在 {cluster} 中没有现有部署") + + bench, server = bench_vals + + site_migration = jingrow.get_pg( + { + "pagetype": "Site Migration", + "site": name, + "destination_group": group, + "destination_bench": bench, + "destination_server": server, + "destination_cluster": cluster, + "scheduled_time": scheduled_datetime, + "skip_failing_patches": skip_failing_patches, + } + ).insert() + + if not scheduled_datetime: + site_migration.start() + + +@jingrow.whitelist() +@protected("Site") +def get_private_groups_for_upgrade(name, version): + from jcloud.jcloud.pagetype.jcloud_role.jcloud_role import check_role_permissions + + team = get_current_team() + version_number = jingrow.db.get_value("Jingrow Version", version, "number") + next_version = jingrow.db.get_value( + "Jingrow Version", + { + "number": version_number + 1, + "status": ("in", ("Stable", "End of Life")), + "public": True, + }, + "name", + ) + + ReleaseGroup = jingrow.qb.PageType("Release Group") + ReleaseGroupServer = jingrow.qb.PageType("Release Group Server") + + query = ( + jingrow.qb.from_(ReleaseGroup) + .select(ReleaseGroup.name, ReleaseGroup.title) + .join(ReleaseGroupServer) + .on(ReleaseGroupServer.parent == ReleaseGroup.name) + .where(ReleaseGroup.enabled == 1) + .where(ReleaseGroup.team == team) + .where(ReleaseGroup.public == 0) + .where(ReleaseGroup.version == next_version) + .distinct() + ) + + if roles := check_role_permissions("Release Group"): + JcloudRolePermission = jingrow.qb.PageType("Jcloud Role Permission") + + query = ( + query.join(JcloudRolePermission) + .on(JcloudRolePermission.release_group == ReleaseGroup.name & JcloudRolePermission.role.isin(roles)) + .distinct() + ) + + return query.run(as_dict=True) + + +@jingrow.whitelist() +@protected("Site") +def version_upgrade( + name, destination_group, scheduled_datetime=None, skip_failing_patches=False, skip_backups=False +): + site = jingrow.get_pg("Site", name) + current_version, shared_site, central_site = jingrow.db.get_value( + "Release Group", site.group, ["version", "public", "central_bench"] + ) + next_version = f"Version {int(current_version.split(' ')[1]) + 1}" + + if shared_site or central_site: + ReleaseGroup = jingrow.qb.PageType("Release Group") + ReleaseGroupServer = jingrow.qb.PageType("Release Group Server") + + destination_group = ( + jingrow.qb.from_(ReleaseGroup) + .select(ReleaseGroup.name) + .join(ReleaseGroupServer) + .on(ReleaseGroupServer.parent == ReleaseGroup.name) + .where(ReleaseGroup.version == next_version) + .where(ReleaseGroup.public == shared_site) + .where(ReleaseGroup.central_bench == central_site) + .where(ReleaseGroup.enabled == 1) + .where(ReleaseGroupServer.server == site.server) + .run(as_dict=True, pluck="name") + ) + + if destination_group: + destination_group = destination_group[0] + else: + jingrow.throw(f"没有版本为 {jingrow.bold(next_version)} 的公共分组。") + + version_upgrade = jingrow.get_pg( + { + "pagetype": "Version Upgrade", + "site": name, + "destination_group": destination_group, + "scheduled_time": scheduled_datetime, + "skip_failing_patches": skip_failing_patches, + "skip_backups": skip_backups, + } + ).insert() + + if not scheduled_datetime: + version_upgrade.start() + + +@jingrow.whitelist() +@protected("Site") +def change_server_options(name): + site_server = jingrow.db.get_value("Site", name, "server") + return jingrow.db.get_all( + "Server", + {"team": get_current_team(), "status": "Active", "name": ("!=", site_server)}, + ["name", "title"], + ) + + +@jingrow.whitelist() +@protected("Site") +def is_server_added_in_group(name, server): + site_group = jingrow.get_value("Site", name, "group") + rg = jingrow.get_pg("Release Group", site_group) + if server not in [s.server for s in rg.servers]: + return False + return True + + +@jingrow.whitelist() +@protected("Site") +def change_server(name, server, scheduled_datetime=None, skip_failing_patches=False): + group = jingrow.db.get_value("Site", name, "group") + bench = jingrow.db.get_value("Bench", {"group": group, "status": "Active", "server": server}, "name") + + if not bench: + if jingrow.db.exists( + "Agent Job", + { + "job_type": "New Bench", + "status": ("in", ("Pending", "Running")), + "server": server, + }, + ): + jingrow.throw( + f"如果您刚刚向工作台添加了新服务器,请等待在服务器 {jingrow.bold(server)} 中创建新部署。" + ) + else: + jingrow.throw( + f"服务器 {jingrow.bold(server)} 中不存在部署。请在您的工作台上安排新的部署,然后重试。" + ) + + site_migration = jingrow.get_pg( + { + "pagetype": "Site Migration", + "site": name, + "destination_bench": bench, + "scheduled_time": scheduled_datetime, + "skip_failing_patches": skip_failing_patches, + } + ).insert() + + if not scheduled_datetime: + site_migration.start() + + +@jingrow.whitelist() +def get_site_config_standard_keys(): + return jingrow.get_all( + "Site Config Key", + {"internal": 0}, + ["name", "key", "title", "description", "type"], + order_by="title asc", + ) diff --git a/jcloud/api/site_backup.py b/jcloud/api/site_backup.py new file mode 100644 index 0000000..8d07b54 --- /dev/null +++ b/jcloud/api/site_backup.py @@ -0,0 +1,44 @@ +from typing import TYPE_CHECKING + +import jingrow + +from jcloud.jcloud.pagetype.site_backup.site_backup import OngoingSnapshotError + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.site_backup.site_backup import SiteBackup + +from botocore.exceptions import ClientError + + +@jingrow.whitelist(allow_guest=True, methods="POST") +def create_snapshot(name: str, key: str): + """ + This API will be called by agent during physical backup of database server. + Once, agent prepare the specific database for backup, it will call this API to create a snapshot of the database. + Because we need to hold the lock on the database for the duration of the backup. + Only after the snapshot is created, the agent will release the lock on the database. + """ + current_user = jingrow.session.user + try: + jingrow.set_user("Administrator") + site_backup: SiteBackup = jingrow.get_pg("Site Backup", name) + if not (key and site_backup.snapshot_request_key == key): + jingrow.throw("Invalid key for snapshot creation") + site_backup.create_database_snapshot() + site_backup.reload() + # Re-verify if the snapshot was created and linked to the site backup + if not site_backup.database_snapshot: + jingrow.throw("Failed to create a snapshot for the database server") + except ClientError as e: + if e.response["Error"]["Code"] == "SnapshotCreationPerVolumeRateExceeded": + # Agent will wait atleast 15s and then will retry + # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html#:~:text=SnapshotCreationPerVolumeRateExceeded + jingrow.throw("Snapshot creation per volume rate exceeded") + else: + raise e + except OngoingSnapshotError: + jingrow.throw("There are concurrent snapshot creation requests. Try again later.") + except Exception as e: + raise e + finally: + jingrow.set_user(current_user) diff --git a/jcloud/api/site_login.py b/jcloud/api/site_login.py new file mode 100644 index 0000000..d62657d --- /dev/null +++ b/jcloud/api/site_login.py @@ -0,0 +1,180 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import jingrow +from jingrow import _ +from jingrow.rate_limiter import rate_limit + + +@jingrow.whitelist(allow_guest=True, methods=["POST"]) +def sync_product_site_user(**data): + """ + Sync user info from product site + + Warning: Do not change the function name since it is used in production sites to sync user info + """ + import json + + headers = jingrow.request.headers + site = headers.get("x-site") + site_token = headers.get("x-site-token") + + if not jingrow.db.exists("Site", site): + jingrow.throw("Invalid site") + + if not site_token: + jingrow.throw("Invalid communication secret") + + site = jingrow.db.get_value("Site", site, ["saas_communication_secret", "name"], as_dict=True) + + if site.saas_communication_secret != site_token: + jingrow.throw("Invalid token") + + user_info = data.get("user_info") + + if not user_info: + jingrow.throw("No user info provided") + + if type(user_info) is str: + user_info = json.loads(user_info) + + user_mail = user_info.get("email") + enabled = user_info.get("enabled") + if jingrow.db.exists("Site User", {"site": site.name, "user": user_mail}): + user = jingrow.db.get_value( + "Site User", {"site": site.name, "user": user_mail}, ["name", "enabled"], as_dict=True + ) + if user.enabled != enabled: + jingrow.db.set_value("Site User", user.name, "enabled", enabled) + else: + jingrow.get_pg( + { + "pagetype": "Site User", + "site": site.name, + "user": user_mail, + "enabled": enabled, + } + ).insert(ignore_permissions=True) + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=10, seconds=60) +def get_product_sites_of_user(user: str): + """ + Get all product sites of a user + """ + if not jingrow.db.exists("Site User", {"user": user}): + return [] + + session_id = jingrow.local.request.cookies.get("site_user_sid") + if ( + not session_id + or not isinstance(session_id, str) + or not jingrow.db.exists("Site User Session", {"user": user, "session_id": session_id}) + ) and (jingrow.session.user == "Guest"): + return jingrow.throw("Invalid session") + + sites = jingrow.db.get_all( + "Site User", filters={"user": user, "enabled": 1}, fields=["site"], pluck="site" + ) + + return jingrow.db.get_all( + "Site", + filters={"name": ["in", sites], "status": "Active"}, + fields=[ + "name", + "label", + "trial_end_date", + "plan.plan_title as plan_title", + "plan.price_usd as price_usd", + "plan.price_cny as price_cny", + "host_name", + ], + ) + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60 * 5) +def send_otp(email: str): + """ + Send OTP to the user trying to login to the product site from /site-login page + """ + + last_otp = jingrow.db.get_value("Site User Session", {"user": email}, "otp_generated_at") + if last_otp and (jingrow.utils.now_datetime() - last_otp).seconds < 30: + return jingrow.throw("Please wait for 30 seconds before sending the OTP again") + + session = jingrow.get_pg({"pagetype": "Site User Session", "user": email}).insert(ignore_permissions=True) + return session.send_otp() + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60 * 60) +def verify_otp(email: str, otp: str): + """ + Verify OTP + """ + + session = jingrow.db.get_value( + "Site User Session", {"user": email}, ["name", "session_id", "otp", "otp_generated_at"], as_dict=True + ) + if not session: + return jingrow.throw("Invalid session") + + if not session.otp: + return jingrow.throw("OTP is not set") + + if (jingrow.utils.now_datetime() - session.otp_generated_at).seconds > 300: + return jingrow.throw("OTP is expired") + + if session.otp != otp: + return jingrow.throw("Invalid OTP") + + jingrow.db.set_value("Site User Session", session.name, {"otp": None, "verified": 1}) + + five_days_in_seconds = 5 * 24 * 60 * 60 + return jingrow.local.cookie_manager.set_cookie( + "site_user_sid", session.session_id, max_age=five_days_in_seconds, httponly=True + ) + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60) +def login_to_site(email: str, site: str): + """ + Login to the product site + """ + session_id = jingrow.local.request.cookies.get("site_user_sid") + if not session_id or not isinstance(session_id, str): + if jingrow.session.user == "Guest": + return jingrow.throw("Invalid session") + jingrow.get_pg({"pagetype": "Site User Session", "user": email}).insert(ignore_permissions=True) + + site_user_name = jingrow.db.get_value("Site User", {"user": email, "site": site}, "name") + if not site_user_name: + return jingrow.throw(f"User {email} not found in site {site}") + site_user = jingrow.get_pg("Site User", site_user_name) + if not site_user.enabled: + jingrow.throw(_(f"User is disabled for the site {site}")) + + return site_user.login_to_site() + + +@jingrow.whitelist(allow_guest=True) +@rate_limit(limit=5, seconds=60) +def check_session_id(): + """ + Check if the session id is valid + """ + + session_id = jingrow.local.request.cookies.get("site_user_sid") + if not session_id or not isinstance(session_id, str): + return False + + session_user = jingrow.db.get_value("Site User Session", {"session_id": session_id}, "user") + if not session_user: + return False + + return session_user diff --git a/jcloud/api/spaces.py b/jcloud/api/spaces.py new file mode 100644 index 0000000..d5b8118 --- /dev/null +++ b/jcloud/api/spaces.py @@ -0,0 +1,177 @@ +from typing import Dict, List + +import jingrow + +from jcloud.api.site import protected +from jcloud.utils import get_current_team + + +@jingrow.whitelist() +def spaces(space_filter: Dict | None) -> Dict: + """ + Returns all spaces and code servers for the current team + """ + if space_filter is None: + space_filter = {"status": ""} + + CodeServer = jingrow.qb.PageType("Code Server") + ReleaseGroup = jingrow.qb.PageType("Release Group") + + servers_query = ( + jingrow.qb.from_(CodeServer) + .select( + CodeServer.name, + CodeServer.status, + CodeServer.creation, + CodeServer.bench, + ReleaseGroup.title, + ) + .left_join(ReleaseGroup) + .on(CodeServer.group == ReleaseGroup.name) + .where(CodeServer.team == get_current_team()) + .orderby(CodeServer.creation, order=jingrow.qb.desc) + ) + + if space_filter["status"] == "Active": + servers_query = servers_query.where(CodeServer.status == "Active") + elif space_filter["status"] == "Broken": + servers_query = servers_query.where(CodeServer.status == "Broken") + else: + servers_query = servers_query.where(CodeServer.status != "Archived") + + return { + "spaces": {}, + "servers": servers_query.run(as_dict=True), + } + + +@jingrow.whitelist() +def code_server_domain(): + """ + Returns the domain for code servers + """ + return jingrow.db.get_single_value("Jcloud Settings", "spaces_domain") + + +@jingrow.whitelist() +def code_server_group_options(): + return jingrow.get_all( + "Release Group", + { + "team": get_current_team(), + "public": False, + "enabled": True, + "is_code_server_enabled": True, + }, + ["name", "title"], + ) + + +@jingrow.whitelist() +def code_server_bench_options(group): + valid_candidates = jingrow.get_all( + "Deploy Candidate", + filters=[ + ["Deploy Candidate Build Step", "step", "like", "%Code Server%"], + ["Deploy Candidate", "group", "=", group], + ["Deploy Candidate", "team", "=", get_current_team()], + ], + pluck="name", + ) + return jingrow.get_all( + "Bench", + { + "status": "Active", + "group": group, + "candidate": ("in", valid_candidates), + "is_code_server_enabled": True, + }, + pluck="name", + order_by="creation desc", + ) + + +@jingrow.whitelist() +@protected("Code Server") +def code_server(name): + return jingrow.get_pg("Code Server", name) + + +@jingrow.whitelist() +@protected("Code Server") +def stop_code_server(name) -> None: + jingrow.get_pg("Code Server", name).stop() + + +@jingrow.whitelist() +@protected("Code Server") +def start_code_server(name) -> None: + jingrow.get_pg("Code Server", name).start() + + +@jingrow.whitelist() +def code_server_password(name) -> str: + if get_current_team() != jingrow.db.get_value("Code Server", name, "team"): + jingrow.throw("Not allowed", jingrow.PermissionError) + return jingrow.utils.password.get_decrypted_password("Code Server", name, "password") + + +@jingrow.whitelist() +@protected("Code Server") +def drop_code_server(name) -> None: + jingrow.get_pg("Code Server", name).archive() + + +@jingrow.whitelist() +def create_code_server(subdomain, domain, bench) -> str: + """ + Create a new code server pg + """ + team = get_current_team() + if not jingrow.db.get_value("Team", team, "code_servers_enabled"): + return + + code_server = jingrow.get_pg( + { + "pagetype": "Code Server", + "subdomain": subdomain, + "bench": bench, + "domain": domain, + "team": team, + } + ).insert(ignore_permissions=True) + return code_server.name + + +@jingrow.whitelist() +def exists(subdomain, domain) -> bool: + """ + Checks if a subdomain is already taken + """ + banned_domains = jingrow.get_all("Blocked Domain", {"block_for_all": 1}, pluck="name") + if banned_domains and subdomain in banned_domains: + return True + else: + return bool( + jingrow.db.exists("Blocked Domain", {"name": subdomain, "root_domain": domain}) + or jingrow.db.exists( + "Code Server", + {"subdomain": subdomain, "domain": domain, "status": ("!=", "Archived")}, + ) + ) + + +@jingrow.whitelist() +@protected("Code Server") +def code_server_jobs( + filters=None, order_by=None, limit_start=None, limit_page_length=None +) -> List: + jobs = jingrow.get_all( + "Agent Job", + fields=["name", "job_type", "creation", "status", "start", "end", "duration"], + filters=filters, + start=limit_start, + limit=limit_page_length, + order_by=order_by or "creation desc", + ) + return jobs diff --git a/jcloud/api/telegram.py b/jcloud/api/telegram.py new file mode 100644 index 0000000..2dcc558 --- /dev/null +++ b/jcloud/api/telegram.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow + +from jcloud.telegram_utils import Telegram +from jcloud.utils import log_error + + +@jingrow.whitelist(allow_guest=True, xss_safe=True) +def hook(*args, **kwargs): + try: + # set user to Administrator, to not have to do ignore_permissions everywhere + jingrow.set_user("Administrator") + + client = Telegram() + client.respond(kwargs.get("message", kwargs.get("edited_message"))) + + except Exception: + log_error("Telegram Webhook Error", args=args, kwargs=kwargs) + finally: + jingrow.set_user("Guest") diff --git a/jcloud/api/tests/test_account.py b/jcloud/api/tests/test_account.py new file mode 100644 index 0000000..2b91a18 --- /dev/null +++ b/jcloud/api/tests/test_account.py @@ -0,0 +1,50 @@ +from unittest import TestCase +from unittest.mock import Mock, patch + +import jingrow + +from jcloud.api.account import signup, validate_pincode +from jcloud.jcloud.pagetype.account_request.account_request import AccountRequest + + +class TestAccountApi(TestCase): + """End-to-End Tests for account/team creation via API""" + + def tearDown(self): + jingrow.db.rollback() + + def _fake_signup(self, email: str = "user@test.com") -> Mock: + """Call jcloud.api.account.signup without sending verification mail.""" + with patch.object(AccountRequest, "send_verification_email") as mock_send_email: + signup(email) + return mock_send_email + + def test_account_request_is_created_from_signup(self): + acc_req_count_before = jingrow.db.count("Account Request") + self._fake_signup() + acc_req_count_after = jingrow.db.count("Account Request") + self.assertGreater(acc_req_count_after, acc_req_count_before) + + def test_pincode_is_correctly_set(self): + """Test if pincode is correctly set on account creation.""" + test_billing_details = jingrow._dict( + { + "billing_name": "John Doe", + "address": "Rose Street", + "city": "Mumbai", + "state": "Maharashtra", + "postal_code": "40004", + "country": "China", + } + ) + + self.assertRaises(jingrow.ValidationError, validate_pincode, test_billing_details) + + test_billing_details["postal_code"] = "400001" + test_billing_details["state"] = "Karnataka" + self.assertRaisesRegex( + jingrow.ValidationError, + f"Postal Code {test_billing_details.postal_code} is not associated with {test_billing_details.state}", + validate_pincode, + test_billing_details, + ) diff --git a/jcloud/api/tests/test_bench.py b/jcloud/api/tests/test_bench.py new file mode 100644 index 0000000..ca5218f --- /dev/null +++ b/jcloud/api/tests/test_bench.py @@ -0,0 +1,640 @@ +import json +import os +import time +from unittest import skip +from unittest.mock import Mock, patch + +import docker +import jingrow +import requests +from jingrow.core.utils import find +from jingrow.tests.utils import JingrowTestCase, timeout + +from jcloud.api.bench import ( + all, + bench_config, + dependencies, + deploy, + deploy_and_update, + deploy_information, + get, + new, + update_config, + update_dependencies, +) +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.app_release.test_app_release import create_test_app_release +from jcloud.jcloud.pagetype.bench.test_bench import create_test_bench +from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate +from jcloud.jcloud.pagetype.jcloud_settings.test_jcloud_settings import ( + create_test_jcloud_settings, +) +from jcloud.jcloud.pagetype.release_group.test_release_group import ( + create_test_release_group, +) +from jcloud.jcloud.pagetype.server.test_server import create_test_server +from jcloud.jcloud.pagetype.team.test_team import create_test_jcloud_admin_team +from jcloud.utils import get_current_team +from jcloud.utils.test import foreground_enqueue_pg + + +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +class TestAPIBench(JingrowTestCase): + def setUp(self): + self.team = create_test_jcloud_admin_team() + self.version = "Version 15" + self.app = create_test_app() + self.app_source = self.app.add_source( + self.version, + repository_url="http://git.jingrow.com:3000/jingrow/jingrow", + branch="version-15", + team=get_current_team(), + public=True, + ) + self.server = create_test_server() + self.server.db_set("use_for_new_benches", True) + + def tearDown(self): + jingrow.set_user("Administrator") + jingrow.db.rollback() + + def test_new_fn_creates_release_group_awaiting_deploy_when_called_by_jcloud_admin_team( + self, + ): + jingrow.set_user(self.team.user) + name = new( + { + "title": "Test Bench", + "apps": [{"name": self.app.name, "source": self.app_source.name}], + "version": self.version, + "cluster": "Default", + "saas_app": None, + "server": None, + } + ) + group = jingrow.get_last_pg("Release Group") + self.assertEqual(group.title, "Test Bench") + self.assertEqual(group.name, name) + get_res = get(group.name) + self.assertEqual(get_res["status"], "Awaiting Deploy") + self.assertEqual(get_res["public"], False) + + @skip("Local builds deprecated. Builds need to be set for GHA.") + @patch( + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + @patch( + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.db.commit", new=Mock() + ) + def test_deploy_fn_deploys_bench_container(self): + # mark jingrow as approved so that the deploy can happen + release = jingrow.get_last_pg("App Release", {"source": self.app_source.name}) + release.status = "Approved" + release.save() + + set_jcloud_settings_for_docker_build() + jingrow.set_user(self.team.user) + group = new( + { + "title": "Test Bench", + "apps": [{"name": self.app.name, "source": self.app_source.name}], + "version": self.version, + "cluster": "Default", + "saas_app": None, + "server": None, + } + ) + + dc_count_before = jingrow.db.count("Deploy Candidate", filters={"group": group}) + d_count_before = jingrow.db.count("Deploy", filters={"group": group}) + deploy(group, [{"app": self.app.name}]) + dc_count_after = jingrow.db.count("Deploy Candidate", filters={"group": group}) + d_count_after = jingrow.db.count("Deploy", filters={"group": group}) + self.assertEqual(dc_count_after, dc_count_before + 1) + self.assertEqual(d_count_after, d_count_before + 1) + + self._check_if_docker_image_was_built(group) + + @patch( + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + @patch.object(DeployCandidate, "schedule_build_and_deploy", new=Mock()) + @patch( + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.db.commit", new=Mock() + ) + def test_deploy_and_update_fn_creates_bench_update(self): + group = new( + { + "title": "Test Bench", + "apps": [{"name": self.app.name, "source": self.app_source.name}], + "version": self.version, + "cluster": "Default", + "saas_app": None, + "server": None, + } + ) + + bu_count_before = jingrow.db.count("Bench Update", filters={"group": group}) + dc_count_before = jingrow.db.count("Deploy Candidate", filters={"group": group}) + + release = create_test_app_release(jingrow.get_pg("App Source", self.app_source.name)) + deploy_and_update(group, [{"release": release.name}], []) + + bu_count_after = jingrow.db.count("Bench Update", filters={"group": group}) + dc_count_after = jingrow.db.count("Deploy Candidate", filters={"group": group}) + + self.assertEqual(dc_count_after, dc_count_before + 1) + self.assertEqual(bu_count_after, bu_count_before + 1) + + @patch( + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + @patch( + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.db.commit", new=Mock() + ) + def test_deploy_and_update_fn_fails_without_release_argument(self): + group = new( + { + "title": "Test Bench", + "apps": [{"name": self.app.name, "source": self.app_source.name}], + "version": self.version, + "cluster": "Default", + "saas_app": None, + "server": None, + } + ) + + self.assertRaises( + jingrow.exceptions.MandatoryError, + deploy_and_update, + group, + [{"app": self.app.name}], + [], + ) + + @patch( + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.db.commit", new=Mock() + ) + def test_deploy_fn_fails_without_apps(self): + jingrow.set_user(self.team.user) + group = new( + { + "title": "Test Bench", + "apps": [{"name": self.app.name, "source": self.app_source.name}], + "version": self.version, + "cluster": "Default", + "saas_app": None, + "server": None, + } + ) + self.assertRaises(TypeError, deploy, group) + + @patch( + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.db.commit", new=Mock() + ) + def test_deploy_fn_fails_with_empty_apps(self): + jingrow.set_user(self.team.user) + group = new( + { + "title": "Test Bench", + "apps": [{"name": self.app.name, "source": self.app_source.name}], + "version": self.version, + "cluster": "Default", + "saas_app": None, + "server": None, + } + ) + self.assertRaises(jingrow.exceptions.MandatoryError, deploy, group, []) + + @timeout(20) + def _check_if_docker_image_was_built(self, group: str): + client = docker.from_env() + dc = jingrow.get_last_pg("Deploy Candidate") + image_name = f"registry.local.jingrow.dev/fc.dev/{group}:{dc.name}" + try: + image = client.images.get(image_name) + except docker.errors.ImageNotFound: + self.fail(f"Image {image_name} not found. Found {client.images.list()}") + self.assertIn(image_name, [tag for tag in image.tags]) + + test_port = 10501 + client.containers.run( + image=image_name, remove=True, detach=True, ports={"8000/tcp": test_port} + ) + while True: + # Ensure that gunicorn at least responds. Usually we'll get 404 as there's no site installed *yet* + try: + response = requests.get(f"http://localhost:{test_port}") + print("Received Response", response.text) + if response.status_code < 500: + break + except IOError as e: + print("Waitng for container to respond", str(e)) + time.sleep(0.5) + + +class TestAPIBenchConfig(JingrowTestCase): + def setUp(self): + app = create_test_app() + self.rg = create_test_release_group([app]) + + self.config = [ + {"key": "max_file_size", "value": "1234", "type": "Number"}, + {"key": "mail_login", "value": "a@a.com", "type": "String"}, + {"key": "skip_setup_wizard", "value": "1", "type": "Boolean"}, + {"key": "limits", "value": '{"limit": "val"}', "type": "JSON"}, + {"key": "http_timeout", "value": 120, "type": "Number", "internal": False}, + ] + + update_config(self.rg.name, self.config) + self.rg.reload() + + def tearDown(self): + jingrow.db.rollback() + + def test_bench_config_api(self): + configs = bench_config(self.rg.name) + self.assertListEqual(configs, self.config) + + def test_bench_config_updation(self): + new_bench_config = jingrow.parse_json(self.rg.bench_config) + + self.assertEqual( + jingrow.parse_json(self.rg.common_site_config), + { + "max_file_size": 1234, + "mail_login": "a@a.com", + "skip_setup_wizard": True, + "limits": {"limit": "val"}, + }, + ) + self.assertEqual(new_bench_config, {"http_timeout": 120}) + + def test_bench_config_is_updated_in_subsequent_benches(self): + bench = create_test_bench(group=self.rg) + bench.reload() + + self.assertIn(("http_timeout", 120), jingrow.parse_json(bench.bench_config).items()) + + for key, value in jingrow.parse_json(self.rg.common_site_config).items(): + self.assertEqual(value, jingrow.parse_json(bench.config).get(key)) + + def test_update_dependencies_set_dependencies_correctly(self): + update_dependencies( + self.rg.name, + json.dumps( + [ + {"key": "NODE_VERSION", "value": "16.11"}, # updated + {"key": "NVM_VERSION", "value": "0.36.0"}, + {"key": "PYTHON_VERSION", "value": "3.6"}, # updated + {"key": "WKHTMLTOPDF_VERSION", "value": "0.12.5"}, + {"key": "BENCH_VERSION", "value": "5.15.2"}, + ] + ), + ) + self.assertFalse(self.rg.last_dependency_update) + self.rg.reload() + self.assertTrue(self.rg.last_dependency_update) + self.assertEqual( + find(self.rg.dependencies, lambda d: d.dependency == "NODE_VERSION").version, "16.11" + ) + self.assertEqual( + find(self.rg.dependencies, lambda d: d.dependency == "PYTHON_VERSION").version, + "3.6", + ) + + def test_update_dependencies_throws_error_for_invalid_dependencies(self): + self.assertRaisesRegex( + Exception, + "Invalid dependencies: asdf", + update_dependencies, + self.rg.name, + json.dumps( + [ + {"key": "NVM_VERSION", "value": "0.36.0"}, + {"key": "NODE_VERSION", "value": "16.36.0"}, + {"key": "WKHTMLTOPDF_VERSION", "value": "0.12.5"}, + {"key": "BENCH_VERSION", "value": "5.15.2"}, + { + "key": "asdf", + "value": "10.9", + }, # invalid dependency + ], + ), + ) + + def test_update_dependencies_throws_error_for_invalid_version(self): + self.assertRaisesRegex( + Exception, + "Invalid version.*", + update_dependencies, + self.rg.name, + json.dumps( + [ + {"key": "NODE_VERSION", "value": "v16.11"}, # v is invalid + {"key": "NVM_VERSION", "value": "0.36.0"}, + {"key": "PYTHON_VERSION", "value": "3.6"}, + {"key": "WKHTMLTOPDF_VERSION", "value": "0.12.5"}, + {"key": "BENCH_VERSION", "value": "5.15.2"}, + ], + ), + ) + + def test_cannot_remove_dependencies(self): + self.assertRaisesRegex( + Exception, + "Need all required dependencies", + update_dependencies, + self.rg.name, + json.dumps( + [ + {"key": "NODE_VERSION", "value": "16.11"}, + {"key": "NVM_VERSION", "value": "0.36.0"}, + {"key": "PYTHON_VERSION", "value": "3.6"}, + {"key": "WKHTMLTOPDF_VERSION", "value": "0.12.5"}, + ], + ), + ) + + def test_cannot_add_additional_invalid_dependencies(self): + self.assertRaisesRegex( + Exception, + "Need all required dependencies", + update_dependencies, + self.rg.name, + json.dumps( + [ + {"key": "NODE_VERSION", "value": "16.11"}, + {"key": "NVM_VERSION", "value": "0.36.0"}, + {"key": "PYTHON_VERSION", "value": "3.6"}, + {"key": "WKHTMLTOPDF_VERSION", "value": "0.12.5"}, + {"key": "BENCH_VERSION", "value": "5.15.2"}, + { + "key": "MARIADB_VERSION", + "value": "10.9", + }, # invalid dependency + ], + ), + ) + + def test_update_of_dependency_child_table_sets_last_dependency_update(self): + self.assertFalse(self.rg.last_dependency_update) + self.rg.append("dependencies", {"dependency": "MARIADB_VERSION", "version": "10.9"}) + self.rg.save() + self.rg.reload() + dependency_update_1 = self.rg.last_dependency_update + self.assertTrue(dependency_update_1) + update_dependencies( + self.rg.name, + json.dumps( + [ + {"key": "NODE_VERSION", "value": "16.11"}, + {"key": "NVM_VERSION", "value": "0.36.0"}, + {"key": "PYTHON_VERSION", "value": "3.6"}, + {"key": "WKHTMLTOPDF_VERSION", "value": "0.12.5"}, + {"key": "BENCH_VERSION", "value": "5.15.2"}, + {"key": "MARIADB_VERSION", "value": "10.9"}, + ] + ), + ) + self.rg.reload() + dependency_update_2 = self.rg.last_dependency_update + self.assertTrue(dependency_update_2) + self.assertGreater(dependency_update_2, dependency_update_1) + + def test_deploy_information_shows_update_available_for_bench_when_apps_are_updated_after_dependency_updated_deploy( + self, + ): + update_dependencies( + self.rg.name, + json.dumps( + [ + {"key": "NODE_VERSION", "value": "16.11"}, + {"key": "NVM_VERSION", "value": "0.36.0"}, + {"key": "PYTHON_VERSION", "value": "3.6"}, + {"key": "WKHTMLTOPDF_VERSION", "value": "0.12.5"}, + {"key": "BENCH_VERSION", "value": "5.15.2"}, + ] + ), + ) + create_test_bench( + group=self.rg + ) # now update available due to dependency shouldn't be there (cuz create_test_bench created deploy candidate) + self.assertFalse(deploy_information(self.rg.name)["update_available"]) + create_test_app_release(jingrow.get_pg("App Source", self.rg.apps[0].source)) + self.assertTrue(deploy_information(self.rg.name)["update_available"]) + + def test_deploy_information_shows_update_available_when_dependencies_are_updated(self): + self.assertFalse(self.rg.last_dependency_update) + create_test_bench(group=self.rg) # avoid update available due to no deploys + self.assertFalse(deploy_information(self.rg.name)["update_available"]) + update_dependencies( + self.rg.name, + json.dumps( + [ + {"key": "NODE_VERSION", "value": "16.11"}, + {"key": "NVM_VERSION", "value": "0.36.0"}, + {"key": "PYTHON_VERSION", "value": "3.6"}, + {"key": "WKHTMLTOPDF_VERSION", "value": "0.12.5"}, + {"key": "BENCH_VERSION", "value": "5.15.2"}, + ] + ), + ) + self.rg.reload() + self.assertTrue(deploy_information(self.rg.name)["update_available"]) + + def test_dependencies_lists_all_dependencies(self): + deps = [ + {"key": "NODE_VERSION", "value": "16.11"}, + {"key": "NVM_VERSION", "value": "0.36.0"}, + {"key": "PYTHON_VERSION", "value": "3.6"}, + {"key": "WKHTMLTOPDF_VERSION", "value": "0.12.5"}, + {"key": "BENCH_VERSION", "value": "5.15.2"}, + ] + update_dependencies( + self.rg.name, + json.dumps(deps), + ) + active_dependencies = dependencies(self.rg.name)["active_dependencies"] + self.assertListEqual( + sorted(active_dependencies, key=lambda x: x["key"]), + sorted(deps, key=lambda x: x["key"]), + ) + + def test_dependencies_shows_dependency_update_available_on_update_of_the_same(self): + deps = [ + {"key": "NODE_VERSION", "value": "16.11"}, + {"key": "NVM_VERSION", "value": "0.36.0"}, + {"key": "PYTHON_VERSION", "value": "3.6"}, + {"key": "WKHTMLTOPDF_VERSION", "value": "0.12.5"}, + {"key": "BENCH_VERSION", "value": "5.15.2"}, + ] + self.assertFalse(dependencies(self.rg.name)["update_available"]) + create_test_bench( + group=self.rg + ) # don't show dependency update available for new deploys + deps[0]["value"] = "16.12" + update_dependencies( + self.rg.name, + json.dumps(deps), + ) + self.assertTrue(dependencies(self.rg.name)["update_available"]) + + def test_setting_limit_fields_creates_update_bench_config_job_as_such(self): + bench = create_test_bench(group=self.rg) + bench.memory_high = 1024 + bench.memory_max = 2048 + bench.memory_swap = 4096 + bench.vcpu = 2 + bench.save() + + job = jingrow.get_last_pg( + "Agent Job", {"job_type": "Update Bench Configuration", "bench": bench.name} + ) + data = json.loads(job.request_data) + + self.assertEqual(data["bench_config"]["memory_high"], 1024) + self.assertEqual(data["bench_config"]["memory_max"], 2048) + self.assertEqual(data["bench_config"]["memory_swap"], 4096) + self.assertEqual(data["bench_config"]["vcpu"], 2) + + def test_memory_swap_cannot_be_set_lower_than_memory_max(self): + bench = create_test_bench(group=self.rg) + bench.memory_high = 1024 + bench.memory_max = 2048 + bench.memory_swap = 1024 + self.assertRaises( + jingrow.exceptions.ValidationError, + bench.save, + ) + bench.reload() + bench.memory_high = 1024 + bench.memory_max = 1024 + bench.memory_swap = -1 + try: + bench.save() + except Exception as e: + print(e) + self.fail("Memory swap should be allowed to be set to -1") + + def test_memory_max_cant_be_set_without_swap(self): + bench = create_test_bench(group=self.rg) + bench.memory_max = 2048 + self.assertRaises( + jingrow.exceptions.ValidationError, + bench.save, + ) + + def test_memory_high_cant_be_set_higher_than_memory_max(self): + bench = create_test_bench(group=self.rg) + bench.memory_max = 2048 + bench.memory_high = 4096 + bench.memory_swap = 4096 + self.assertRaises( + jingrow.exceptions.ValidationError, + bench.save, + ) + + def test_force_update_limits_creates_job_with_parameters(self): + bench = create_test_bench(group=self.rg) + bench.memory_high = 1024 + bench.memory_max = 2048 + bench.memory_swap = 4096 + bench.vcpu = 2 + bench.force_update_limits() + job = jingrow.get_last_pg( + "Agent Job", {"job_type": "Force Update Bench Limits", "bench": bench.name} + ) + job_data = json.loads(job.request_data) + self.assertEqual(job_data["memory_high"], 1024) + self.assertEqual(job_data["memory_max"], 2048) + self.assertEqual(job_data["memory_swap"], 4096) + self.assertEqual(job_data["vcpu"], 2) + + +class TestAPIBenchList(JingrowTestCase): + def setUp(self): + from jcloud.jcloud.pagetype.jcloud_tag.test_jcloud_tag import create_and_add_test_tag + + app = create_test_app() + + active_group = create_test_release_group([app]) + create_test_bench(group=active_group) + self.active_bench_dict = { + "number_of_sites": 0, + "name": active_group.name, + "title": active_group.title, + "version": active_group.version, + "creation": active_group.creation, + "tags": [], + "number_of_apps": 1, + "status": "Active", + } + + group_awaiting_deploy = create_test_release_group([app]) + self.bench_awaiting_deploy_dict = { + "number_of_sites": 0, + "name": group_awaiting_deploy.name, + "title": group_awaiting_deploy.title, + "version": group_awaiting_deploy.version, + "creation": group_awaiting_deploy.creation, + "tags": [], + "number_of_apps": 1, + "status": "Awaiting Deploy", + } + + group_with_tag = create_test_release_group([app]) + test_tag = create_and_add_test_tag(group_with_tag.name, "Release Group") + create_test_bench(group=group_with_tag) + self.bench_with_tag_dict = { + "number_of_sites": 0, + "name": group_with_tag.name, + "title": group_with_tag.title, + "version": group_with_tag.version, + "creation": group_with_tag.creation, + "tags": [test_tag.tag], + "number_of_apps": 1, + "status": "Active", + } + + def tearDown(self): + jingrow.db.rollback() + + def test_list_all_benches(self): + self.assertCountEqual( + all(), + [self.active_bench_dict, self.bench_awaiting_deploy_dict, self.bench_with_tag_dict], + ) + + def test_list_active_benches(self): + self.assertCountEqual( + all(bench_filter={"status": "Active", "tag": ""}), + [self.active_bench_dict, self.bench_with_tag_dict], + ) + + def test_list_awaiting_deploy_benches(self): + self.assertEqual( + all(bench_filter={"status": "Awaiting Deploy", "tag": ""}), + [self.bench_awaiting_deploy_dict], + ) + + def test_list_tagged_benches(self): + self.assertEqual( + all(bench_filter={"status": "", "tag": "test_tag"}), [self.bench_with_tag_dict] + ) + + +def set_jcloud_settings_for_docker_build() -> None: + jcloud_settings = create_test_jcloud_settings() + cwd = os.getcwd() + back = os.path.join(cwd, "..") + bench_dir = os.path.abspath(back) + build_dir = os.path.join(bench_dir, "test_builds") + clone_dir = os.path.join(bench_dir, "test_clones") + jcloud_settings.db_set("build_directory", build_dir) + jcloud_settings.db_set("clone_directory", clone_dir) + jcloud_settings.db_set("docker_registry_url", "registry.local.jingrow.dev") diff --git a/jcloud/api/tests/test_billing.py b/jcloud/api/tests/test_billing.py new file mode 100644 index 0000000..566c0f9 --- /dev/null +++ b/jcloud/api/tests/test_billing.py @@ -0,0 +1,223 @@ +from unittest import TestCase + +import jingrow +from jingrow.core.utils import find + +from jcloud.api.billing import ( + get_cleaned_up_transactions, + get_processed_balance_transactions, +) + +test_bts = [ + { + "name": "BT-2022-00065", + "type": "Applied To Invoice", + "source": "", + "amount": -200.0, + "ending_balance": 200.0, + "invoice": "INV-2022-00122", + "description": None, + }, + { + "name": "BT-2022-00064", + "type": "Applied To Invoice", + "source": "", + "amount": -500.0, + "ending_balance": 400.0, + "invoice": "INV-2022-00121", + "description": None, + }, + { + "name": "BT-2022-00063", + "type": "Adjustment", + "source": "Free Credits", + "amount": 200.0, + "ending_balance": 900.0, + "invoice": None, + "description": "Reverse amount ¥ 200.00 of BT-2022-00059 from invoice INV-2022-00121", + }, + { + "name": "BT-2022-00062", + "type": "Adjustment", + "source": "Prepaid Credits", + "amount": 200.0, + "ending_balance": 700.0, + "invoice": None, + "description": "Reverse amount ¥ 200.00 of BT-2022-00058 from invoice INV-2022-00121", + }, + { + "name": "BT-2022-00061", + "type": "Adjustment", + "source": "Prepaid Credits", + "amount": 500.0, + "ending_balance": 500.0, + "invoice": None, + "description": None, + }, + { + "name": "BT-2022-00060", + "type": "Applied To Invoice", + "source": "", + "amount": -400.0, + "ending_balance": 0.0, + "invoice": "INV-2022-00121", + "description": None, + }, + { + "name": "BT-2022-00059", + "type": "Adjustment", + "source": "Free Credits", + "amount": 200.0, + "ending_balance": 400.0, + "invoice": None, + "description": "Reverse amount ¥ 200.00 of BT-2022-00056 from invoice INV-2022-00121", + }, + { + "name": "BT-2022-00058", + "type": "Adjustment", + "source": "Prepaid Credits", + "amount": 200.0, + "ending_balance": 200.0, + "invoice": None, + "description": "Reverse amount ¥ 200.00 of BT-2022-00055 from invoice INV-2022-00121", + }, + { + "name": "BT-2022-00057", + "type": "Applied To Invoice", + "source": "", + "amount": -400.0, + "ending_balance": 0.0, + "invoice": "INV-2022-00121", + "description": None, + }, + { + "name": "BT-2022-00056", + "type": "Adjustment", + "source": "Free Credits", + "amount": 200.0, + "ending_balance": 400.0, + "invoice": None, + "description": "Reverse amount ¥ 200.00 of BT-2022-00052 from invoice INV-2022-00121", + }, + { + "name": "BT-2022-00055", + "type": "Adjustment", + "source": "Prepaid Credits", + "amount": 200.0, + "ending_balance": 200.0, + "invoice": None, + "description": "Reverse amount ¥ 200.00 of BT-2022-00051 from invoice INV-2022-00121", + }, + { + "name": "BT-2022-00054", + "type": "Applied To Invoice", + "source": "", + "amount": -400.0, + "ending_balance": 0.0, + "invoice": "INV-2022-00121", + "description": None, + }, + { + "name": "BT-2022-00053", + "type": "Applied To Invoice", + "source": "", + "amount": -300.0, + "ending_balance": 400.0, + "invoice": "INV-2022-00120", + "description": None, + }, + { + "name": "BT-2022-00052", + "type": "Adjustment", + "source": "Free Credits", + "amount": 200.0, + "ending_balance": 700.0, + "invoice": None, + "description": "Reverse amount ¥ 200.00 of BT-2022-00049 from invoice INV-2022-00120", + }, + { + "name": "BT-2022-00051", + "type": "Adjustment", + "source": "Prepaid Credits", + "amount": 500.0, + "ending_balance": 500.0, + "invoice": None, + "description": None, + }, + { + "name": "BT-2022-00050", + "type": "Applied To Invoice", + "source": "", + "amount": -200.0, + "ending_balance": 0.0, + "invoice": "INV-2022-00120", + "description": None, + }, + { + "name": "BT-2022-00049", + "type": "Adjustment", + "source": "Free Credits", + "amount": 200.0, + "ending_balance": 200.0, + "invoice": None, + "description": None, + }, +] + + +class TestBalances(TestCase): + def test_clean_up_balances(self): + clean_transactions = get_cleaned_up_transactions([jingrow._dict(d) for d in test_bts]) + + self.assertEqual(len(clean_transactions), 6) + + # Reversal transactions, must not be present + self.assertFalse(find(clean_transactions, lambda x: x.name == "BT-2022-00063")) + self.assertFalse(find(clean_transactions, lambda x: x.name == "BT-2022-00059")) + self.assertFalse(find(clean_transactions, lambda x: x.name == "BT-2022-00058")) + + # Applied to invoices, but have been reversed, hence must not be present + self.assertFalse(find(clean_transactions, lambda x: x.name == "BT-2022-00050")) + self.assertFalse(find(clean_transactions, lambda x: x.name == "BT-2022-00060")) + + # Applied to invoice, not reversed, hence must be present + self.assertTrue(find(clean_transactions, lambda x: x.name == "BT-2022-00053")) + self.assertTrue(find(clean_transactions, lambda x: x.name == "BT-2022-00061")) + self.assertTrue(find(clean_transactions, lambda x: x.name == "BT-2022-00065")) + + # Added credits, must be present + self.assertTrue(find(clean_transactions, lambda x: x.name == "BT-2022-00049")) + self.assertTrue(find(clean_transactions, lambda x: x.name == "BT-2022-00051")) + + def test_processed_balances(self): + processed_transactions = get_processed_balance_transactions( + [jingrow._dict(d) for d in test_bts] + ) + + self.assertEqual(len(processed_transactions), 6) + + # Testing the order of transactions + self.assertEqual(processed_transactions[0].name, "BT-2022-00065") + self.assertEqual(processed_transactions[-1].name, "BT-2022-00049") + + # Testing first and last ending balances + self.assertEqual(processed_transactions[0].ending_balance, 200) + self.assertEqual(processed_transactions[-1].ending_balance, 200) + + # Testing ending balance calculation + self.assertEqual(processed_transactions[-1].ending_balance, 200) + self.assertEqual( + processed_transactions[-2].ending_balance, 700 + ) # Added 500 in credits + self.assertEqual( + processed_transactions[-3].ending_balance, 400 + ) # Applied to invoice, -300 + self.assertEqual( + processed_transactions[-4].ending_balance, 900 + ) # Added 500 in credits + self.assertEqual( + processed_transactions[-5].ending_balance, 400 + ) # Applied to invoice, -500 + self.assertEqual( + processed_transactions[-6].ending_balance, 200 + ) # Applied to invoice, -200 diff --git a/jcloud/api/tests/test_marketplace.py b/jcloud/api/tests/test_marketplace.py new file mode 100644 index 0000000..9052a92 --- /dev/null +++ b/jcloud/api/tests/test_marketplace.py @@ -0,0 +1,374 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# See license.txt + + +import unittest +from unittest.mock import Mock, patch + +import jingrow +import responses + +from jcloud.api.marketplace import ( + add_app, + add_version, + become_publisher, + branches, + change_app_plan, + change_branch, + create_app_plan, + create_approval_request, + get_app, + get_apps, + get_apps_with_plans, + get_latest_approval_request, + get_marketplace_subscriptions_for_site, + get_publisher_profile_info, + get_subscriptions_list, + new_app, + options_for_quick_install, + releases, + remove_version, + reset_features_for_plan, + subscriptions, + update_app_description, + update_app_links, + update_app_plan, + update_app_summary, + update_app_title, + update_publisher_profile, +) +from jcloud.marketplace.pagetype.marketplace_app_plan.test_marketplace_app_plan import ( + create_test_marketplace_app_plan, +) +from jcloud.marketplace.pagetype.marketplace_app_subscription.test_marketplace_app_subscription import ( + create_test_marketplace_app_subscription, +) +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.app_release.test_app_release import create_test_app_release +from jcloud.jcloud.pagetype.app_source.test_app_source import create_test_app_source +from jcloud.jcloud.pagetype.marketplace_app.test_marketplace_app import ( + create_test_marketplace_app, +) +from jcloud.jcloud.pagetype.release_group.test_release_group import ( + create_test_release_group, +) +from jcloud.jcloud.pagetype.site.test_site import create_test_bench, create_test_site +from jcloud.jcloud.pagetype.team.test_team import create_test_jcloud_admin_team + +PAYLOAD = [ + { + "name": "develop", + "commit": { + "sha": "d11768d928ec7996810898cf627c4d57e8bb917d", + "url": "http://git.jingrow.com:3000/api/v1/repos/jingrow/jingrow/commits/d11768d928ec7996810898cf627c4d57e8bb917d", + }, + "protected": True, + }, + { + "name": "enterprise-staging", + "commit": { + "sha": "3716ef769bbb45d5376c5d6f6ed9a2d52583ef1c", + "url": "http://git.jingrow.com:3000/api/v1/repos/jingrow/jingrow/commits/3716ef769bbb45d5376c5d6f6ed9a2d52583ef1c", + }, + "protected": False, + }, +] + + +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +class TestAPIMarketplace(unittest.TestCase): + def setUp(self): + self.app = create_test_app("jerp", "JERP") + self.team = create_test_jcloud_admin_team() + self.version = "Version 14" + self.app_source = create_test_app_source( + version=self.version, app=self.app, team=self.team.name + ) + self.app_release = create_test_app_release(self.app_source) + self.marketplace_app = create_test_marketplace_app( + app=self.app.name, + team=self.team.name, + sources=[{"version": self.version, "source": self.app_source.name}], + ) + self.plan_data = { + "price_cny": 820, + "price_usd": 10, + "plan_title": "Test Marketplace Plan", + "features": ["feature 1", "feature 2"], + } + + def tearDown(self): + jingrow.set_user("Administrator") + jingrow.db.rollback() + + def test_create_marketplace_app_plan(self): + jingrow.set_user(self.team.user) + before_count = jingrow.db.count("Marketplace App Plan") + create_app_plan(self.marketplace_app.name, self.plan_data) + after_count = jingrow.db.count("Marketplace App Plan") + self.assertEqual(before_count + 1, after_count) + + def test_reset_features_for_plan(self): + plan_pg = create_app_plan(self.marketplace_app.name, self.plan_data) + new_features = ["feature 3", "feature 4"] + reset_features_for_plan(plan_pg, new_features) + + self.assertEqual([feature.description for feature in plan_pg.features], new_features) + + def test_options_for_quick_install(self): + jingrow_app = create_test_app() + + jingrow_source = create_test_app_source(version=self.version, app=jingrow_app) + jingrow_release = create_test_app_release(jingrow_source) + create_test_marketplace_app( + app=jingrow_app.name, + sources=[{"version": self.version, "source": jingrow_source.name}], + ) + + group1 = create_test_release_group([jingrow_app], jingrow_version=self.version) + group2 = create_test_release_group([jingrow_app, self.app]) + group1.db_set("team", self.team.name) + group2.db_set("team", self.team.name) + bench1 = create_test_bench( + group=group1, + apps=[ + { + "app": jingrow_app.name, + "hash": jingrow_release.hash, + "source": jingrow_source.name, + "release": jingrow_release.name, + } + ], + ) + bench2 = create_test_bench( + group=group2, + apps=[ + { + "app": jingrow_app.name, + "hash": jingrow_release.hash, + "source": jingrow_source.name, + "release": jingrow_release.name, + }, + { + "app": self.app.name, + "hash": self.app_release.hash, + "source": self.app_source.name, + "release": self.app_release.name, + }, + ], + ) + + create_test_site(subdomain="test1", bench=bench1.name, team=self.team.name) + create_test_site(subdomain="test2", bench=bench2.name, team=self.team.name) + + jingrow.set_user(self.team.user) + options = options_for_quick_install(self.app.name) + + self.assertEqual(options["release_groups"][0]["name"], group1.name) + + def test_add_app(self): + app = create_test_app("test_app", "Test App") + app_source = create_test_app_source(version=self.version, app=app) + marketplace_app = add_app(source=app_source.name, app=app.name) + self.assertIsNotNone(jingrow.db.exists("Marketplace App", marketplace_app)) + + def test_add_app_already_added(self): + app = create_test_app("test_app3", "Test App 3") + app_source = create_test_app_source(version=self.version, app=app) + create_test_marketplace_app( + app=app.name, + team=self.team.name, + sources=[{"version": self.version, "source": app_source.name}], + ) + new_source = create_test_app_source(version="Nightly", app=app) + jingrow.set_user(self.team.user) + marketplace_app = add_app(source=new_source.name, app=app.name) + self.assertIsNotNone(jingrow.db.exists("Marketplace App", marketplace_app)) + + def test_get_marketplace_subscriptions_for_site(self): + site = create_test_site(subdomain="test1", team=self.team.name) + plan = create_test_marketplace_app_plan(self.marketplace_app.name) + create_test_marketplace_app_subscription( + site=site.name, app=self.app.name, team=self.team.name, plan=plan.name + ) + + self.assertIsNotNone(get_marketplace_subscriptions_for_site(site.name)) + + def test_change_app_plan(self): + subscription = create_test_marketplace_app_subscription() + new_plan = create_test_marketplace_app_plan() + change_app_plan(subscription.name, new_plan.name) + + self.assertEqual( + new_plan.name, + jingrow.db.get_value("Subscription", subscription.name, "plan"), + ) + + def test_get_subscription_list(self): + self.assertEqual([], get_subscriptions_list("jingrow")) + create_test_marketplace_app_subscription(app="jingrow") + self.assertIsNotNone(get_subscriptions_list("jingrow")) + + def test_update_app_plan(self): + m_plan = create_test_marketplace_app_plan() + + updated_plan_data = { + "price_cny": m_plan.price_cny + 100, + "price_usd": m_plan.price_usd + 1, + "title": m_plan.title + " updated", + "features": ["feature 3", "feature 4"], + } + update_app_plan(m_plan.name, updated_plan_data) + m_plan.reload() + + self.assertEqual(m_plan.price_cny, updated_plan_data["price_cny"]) + self.assertEqual(m_plan.price_usd, updated_plan_data["price_usd"]) + self.assertEqual(m_plan.title, updated_plan_data["title"]) + self.assertEqual(m_plan.features[0].description, updated_plan_data["features"][0]) + self.assertEqual(m_plan.features[1].description, updated_plan_data["features"][1]) + + def test_become_publisher(self): + jingrow.set_user(self.team.user) + become_publisher() + self.team.reload() + self.assertTrue(self.team.is_developer) + + def test_get_apps(self): + jingrow.set_user(self.team.user) + self.marketplace_app.db_set("team", self.team.name) + apps = get_apps() + self.assertEqual(apps[0].name, self.marketplace_app.name) + + def test_get_app(self): + app = get_app("jerp") + self.assertEqual(app.name, "jerp") + + def test_update_app_title(self): + jingrow.set_user(self.team.user) + update_app_title(self.marketplace_app.name, "New Title") + self.marketplace_app.reload() + self.assertEqual(self.marketplace_app.title, "New Title") + + def test_update_app_links(self): + jingrow.set_user(self.team.user) + update_app_links( + self.marketplace_app.name, + { + "website": "https://github.com", + "support": "https://github.com", + "documentation": "https://github.com", + "privacy_policy": "https://github.com", + "terms_of_service": "https://github.com", + }, + ) + self.marketplace_app.reload() + self.assertEqual(self.marketplace_app.website, "https://github.com") + self.assertEqual(self.marketplace_app.support, "https://github.com") + + def test_update_app_summary(self): + jingrow.set_user(self.team.user) + summary = jingrow.mock("paragraph") + update_app_summary(self.marketplace_app.name, summary) + self.marketplace_app.reload() + self.assertEqual(self.marketplace_app.description, summary) + + def test_update_app_description(self): + jingrow.set_user(self.team.user) + desc = jingrow.mock("paragraph") + update_app_description(self.marketplace_app.name, desc) + self.marketplace_app.reload() + self.assertEqual(self.marketplace_app.long_description, desc) + + def test_releases(self): + jingrow.set_user(self.team.user) + r = releases({"app": self.marketplace_app.name, "source": self.app_source.name}) + self.assertEqual(r[0].name, self.app_release.name) + + def test_app_release_approvals(self): + jingrow.set_user(self.team.user) + create_approval_request(self.marketplace_app.name, self.app_release.name) + latest_approval = get_latest_approval_request(self.app_release.name) + self.assertIsNotNone(latest_approval) + + def test_new_app(self): + app = { + "name": "email_delivery_service", + "title": "Email Delivery Service", + "version": "Version 14", + "repository_url": "http://git.jingrow.com:3000/jingrow/email_delivery_service", + "branch": "develop", + "github_installation_id": "", + } + name = new_app(app) + self.assertEqual(name, app["name"]) + + def test_get_apps_with_plans(self): + jingrow_app = create_test_app() + group2 = create_test_release_group( + [jingrow_app, self.app], jingrow_version=self.version + ) + create_test_marketplace_app( + app=jingrow_app.name, + sources=[{"version": self.version, "source": group2.apps[0].source}], + ) + create_app_plan(jingrow_app.name, self.plan_data) + apps = get_apps_with_plans(["jingrow", "jerp"], group2.name) + self.assertEqual(apps[0].name, jingrow_app.name) + + def test_publisher_profile(self): + jingrow.set_user(self.team.user) + publisher_info = { + "display_name": "Test Publisher", + "contact_email": self.team.user, + "website": "https://github.com", + } + update_publisher_profile(publisher_info) + updated_name = "Test Publisher 2" + update_publisher_profile({"display_name": updated_name}) + info = get_publisher_profile_info() + self.assertEqual(info["profile_info"].contact_email, self.team.user) + self.assertEqual(info["profile_info"].display_name, updated_name) + + def test_get_subscription(self): + site = create_test_site(subdomain="test1", team=self.team.name) + plan = create_test_marketplace_app_plan(self.marketplace_app.name) + create_test_marketplace_app_subscription( + site=site.name, app=self.app.name, team=self.team.name, plan=plan.name + ) + jingrow.set_user(self.team.user) + self.assertIsNotNone(subscriptions()) + + def test_change_branch(self): + old_branch = self.app_source.branch + change_branch( + self.marketplace_app.name, self.app_source.name, "Version 14", "develop" + ) + self.app_source.reload() + self.assertNotEqual(old_branch, self.app_source.branch) + + def test_add_version(self): + old_versions = len(self.marketplace_app.sources) + add_version(self.marketplace_app.name, "develop", "Nightly") + self.marketplace_app.reload() + self.assertEqual(old_versions + 1, len(self.marketplace_app.sources)) + + def test_remove_version(self): + old_versions = len(self.marketplace_app.sources) + add_version(self.marketplace_app.name, "develop", "Nightly") + remove_version(self.marketplace_app.name, "Nightly") + self.marketplace_app.reload() + self.assertEqual(old_versions, len(self.marketplace_app.sources)) + + @responses.activate + def test_branches(self): + jingrow.set_user(self.team.user) + responses.get( + url=f"http://git.jingrow.com:3000/api/v1/repos/{self.app_source.repository_owner}/{self.app_source.repository}/branches?per_page=100", + json=PAYLOAD, + status=200, + headers={}, + ) + results = branches(self.app_source.name) + self.assertEqual(len(results), 2) diff --git a/jcloud/api/tests/test_server.py b/jcloud/api/tests/test_server.py new file mode 100644 index 0000000..b40a94a --- /dev/null +++ b/jcloud/api/tests/test_server.py @@ -0,0 +1,344 @@ +# Copyright (c) 2019, JINGROW +# See license.txt + +from __future__ import annotations + +from unittest.mock import MagicMock, Mock, patch + +import jingrow +from jingrow.model.naming import make_autoname +from jingrow.tests.utils import JingrowTestCase + +from jcloud.api.server import all, change_plan, new +from jcloud.jcloud.pagetype.ansible_play.test_ansible_play import create_test_ansible_play +from jcloud.jcloud.pagetype.cluster.cluster import Cluster +from jcloud.jcloud.pagetype.cluster.test_cluster import create_test_cluster +from jcloud.jcloud.pagetype.database_server.database_server import DatabaseServer +from jcloud.jcloud.pagetype.proxy_server.test_proxy_server import create_test_proxy_server +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.jcloud.pagetype.team.test_team import create_test_jcloud_admin_team +from jcloud.jcloud.pagetype.virtual_machine.virtual_machine import VirtualMachine +from jcloud.jcloud.pagetype.virtual_machine_image.test_virtual_machine_image import ( + create_test_virtual_machine_image, +) +from jcloud.jcloud.pagetype.virtual_machine_image.virtual_machine_image import ( + VirtualMachineImage, +) +from jcloud.runner import Ansible +from jcloud.utils.test import foreground_enqueue_pg + + +def create_test_server_plan( + document_type: str, + price_usd: float = 10.0, + price_cny: float = 750.0, + title: str | None = None, + plan_name: str | None = None, +): + """Create test Plan pg.""" + plan_name = plan_name or f"Test {document_type} plan {make_autoname('.#')}" + title = plan_name + plan = jingrow.get_pg( + { + "pagetype": "Server Plan", + "server_type": document_type, + "name": plan_name, + "title": title, + "price_cny": price_cny, + "price_usd": price_usd, + "enabled": 1, + "instance_type": "t2.micro", + } + ).insert(ignore_if_duplicate=True) + plan.reload() + return plan + + +def successful_provision(self: VirtualMachine): + self.status = "Running" + self.save() + + +def successful_sync(self: VirtualMachine): + self.status = "Running" + self.save() + self.update_servers() + + +def successful_ping_ansible(self: BaseServer): + create_test_ansible_play("Ping Server", "ping.yml", self.pagetype, self.name) + + +def successful_upgrade_mariadb(self: DatabaseServer): + create_test_ansible_play("Upgrade MariaDB", "upgrade_mariadb.yml", self.pagetype, self.name) + + +def successful_upgrade_mariadb_patched(self: DatabaseServer): + create_test_ansible_play( + "Upgrade MariaDB Patched", "upgrade_mariadb_patched.yml", self.pagetype, self.name + ) + + +def successful_tls_certificate(self: BaseServer): + create_test_ansible_play("Setup TLS Certificates", "tls.yml", self.pagetype, self.name) + + +def successful_update_agent_ansible(self: BaseServer): + create_test_ansible_play("Update Agent", "update_agent.yml", self.pagetype, self.name) + + +def successful_wait_for_cloud_init(self: BaseServer): + create_test_ansible_play( + "Wait for Cloud Init to finish", "wait_for_cloud_init.yml", self.pagetype, self.name + ) + + +@patch.object(VirtualMachineImage, "client", new=MagicMock()) +@patch.object(VirtualMachine, "client", new=MagicMock()) +@patch.object(Ansible, "run", new=Mock()) +@patch.object(BaseServer, "ping_ansible", new=successful_ping_ansible) +@patch.object(DatabaseServer, "upgrade_mariadb", new=successful_upgrade_mariadb) +@patch.object(DatabaseServer, "upgrade_mariadb_patched", new=successful_upgrade_mariadb_patched) +@patch.object(BaseServer, "wait_for_cloud_init", new=successful_wait_for_cloud_init) +@patch.object(BaseServer, "update_tls_certificate", new=successful_tls_certificate) +@patch.object(BaseServer, "update_agent_ansible", new=successful_update_agent_ansible) +class TestAPIServer(JingrowTestCase): + @patch.object(Cluster, "provision_on_aws_ec2", new=Mock()) + def setUp(self): + self.team = create_test_jcloud_admin_team() + + self.app_plan = create_test_server_plan("Server") + self.app_plan.db_set("memory", 1024) + self.db_plan = create_test_server_plan("Database Server") + self.cluster = create_test_cluster() + create_test_proxy_server(cluster=self.cluster.name) + + def tearDown(self): + jingrow.set_user("Administrator") + jingrow.db.rollback() + + def _get_pg_count(self, pagetype: str, status: str, team: str): + return jingrow.db.count(pagetype, filters={"status": status, "team": team}) + + def test_create_new_server_creates_pending_server_and_db_server(self): + create_test_virtual_machine_image(cluster=self.cluster, series="m") + create_test_virtual_machine_image( + cluster=self.cluster, series="f" + ) # call from here and not setup, so mocks work + jingrow.set_user(self.team.user) + + servers_before = self._get_pg_count("Server", "Pending", self.team.name) + db_servers_before = self._get_pg_count("Database Server", "Pending", self.team.name) + + new( + { + "cluster": self.cluster.name, + "db_plan": self.db_plan.name, + "app_plan": self.app_plan.name, + "title": "Test Server", + } + ) + + servers_after = self._get_pg_count("Server", "Pending", self.team.name) + db_servers_after = self._get_pg_count("Database Server", "Pending", self.team.name) + + self.assertEqual(servers_before + 1, servers_after) + self.assertEqual(db_servers_before + 1, db_servers_after) + + @patch( + "jcloud.jcloud.pagetype.jcloud_job.jcloud_job.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + @patch.object(VirtualMachine, "provision", new=successful_provision) + @patch.object(VirtualMachine, "sync", new=successful_sync) + def test_new_fn_creates_active_server_and_db_server_once_jcloud_job_succeeds(self): + create_test_virtual_machine_image(cluster=self.cluster, series="m") + create_test_virtual_machine_image( + cluster=self.cluster, series="f" + ) # call from here and not setup, so mocks work + jingrow.set_user(self.team.user) + + servers_before = self._get_pg_count("Server", "Active", self.team.name) + db_servers_before = self._get_pg_count("Database Server", "Active", self.team.name) + + new( + { + "cluster": self.cluster.name, + "db_plan": self.db_plan.name, + "app_plan": self.app_plan.name, + "title": "Test Server", + } + ) + + servers_after = self._get_pg_count("Server", "Active", self.team.name) + db_servers_after = self._get_pg_count("Database Server", "Active", self.team.name) + + self.assertEqual(servers_before + 1, servers_after) + self.assertEqual(db_servers_before + 1, db_servers_after) + + @patch( + "jcloud.jcloud.pagetype.jcloud_job.jcloud_job.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + @patch.object(VirtualMachine, "provision", new=successful_provision) + @patch.object(VirtualMachine, "sync", new=successful_sync) + def test_new_fn_creates_server_with_active_subscription(self): + create_test_virtual_machine_image(cluster=self.cluster, series="m") + create_test_virtual_machine_image( + cluster=self.cluster, series="f" + ) # call from here and not setup, so mocks work + jingrow.set_user(self.team.user) + + new( + { + "cluster": self.cluster.name, + "db_plan": self.db_plan.name, + "app_plan": self.app_plan.name, + "title": "Test Server", + } + ) + + server = jingrow.get_last_pg("Server") + self.assertEqual(server.plan, self.app_plan.name) + app_subscription = jingrow.get_pg( + "Subscription", {"document_type": "Server", "document_name": server.name} + ) + self.assertTrue(app_subscription.enabled) + self.assertEqual(app_subscription.plan, self.app_plan.name) + + db_server = jingrow.get_last_pg("Database Server") + self.assertEqual(db_server.plan, self.db_plan.name) + db_subscription = jingrow.get_pg( + "Subscription", + {"document_type": "Database Server", "document_name": db_server.name}, + ) + self.assertTrue(db_subscription.enabled) + self.assertEqual(db_subscription.plan, self.db_plan.name) + + @patch.object(VirtualMachine, "provision", new=successful_provision) + @patch.object(VirtualMachine, "sync", new=successful_sync) + def test_change_plan_changes_plan_of_server_and_updates_subscription_pg(self): + create_test_virtual_machine_image(cluster=self.cluster, series="m") + create_test_virtual_machine_image( + cluster=self.cluster, series="f" + ) # call from here and not setup, so mocks work + + app_plan_2 = create_test_server_plan(document_type="Server") + app_plan_2.db_set("memory", 2048) + db_plan_2 = create_test_server_plan(document_type="Database Server") + + self.team.allocate_credit_amount(100000, source="Prepaid Credits", remark="Test Credits") + jingrow.set_user(self.team.user) + + new( + { + "cluster": self.cluster.name, + "db_plan": self.db_plan.name, + "app_plan": self.app_plan.name, + "title": "Test Server", + } + ) + server = jingrow.get_last_pg("Server") + db_server = jingrow.get_last_pg("Database Server") + jingrow.db.set_value( + "Jcloud Job", {"status": "Running"}, "status", "Success" + ) # Mark running jobs as success as extra steps we don't check + + change_plan( + server.name, + app_plan_2.name, + ) + + server.reload() + app_subscription = jingrow.get_pg( + "Subscription", {"document_type": "Server", "document_name": server.name} + ) + self.assertEqual(app_subscription.plan, app_plan_2.name) + self.assertTrue(app_subscription.enabled) + self.assertEqual(server.plan, app_plan_2.name) + self.assertEqual(server.ram, app_plan_2.memory) + jingrow.db.set_value( + "Jcloud Job", {"status": "Running"}, "status", "Success" + ) # Mark running jobs as success as extra steps we don't check + + change_plan( + db_server.name, + db_plan_2.name, + ) + + db_server.reload() + db_subscription = jingrow.get_pg( + "Subscription", + {"document_type": "Database Server", "document_name": db_server.name}, + ) + self.assertEqual(db_subscription.plan, db_plan_2.name) + self.assertTrue(db_subscription.enabled) + self.assertEqual(db_server.plan, db_plan_2.name) + + +class TestAPIServerList(JingrowTestCase): + def setUp(self): + from jcloud.jcloud.pagetype.database_server.test_database_server import ( + create_test_database_server, + ) + from jcloud.jcloud.pagetype.jcloud_tag.test_jcloud_tag import create_and_add_test_tag + from jcloud.jcloud.pagetype.server.test_server import create_test_server + from jcloud.utils import get_current_team + + proxy_server = create_test_proxy_server() + database_server = create_test_database_server() + database_server.title = "Database Server" + database_server.team = get_current_team() + database_server.save() + + self.db_server_dict = { + "name": database_server.name, + "cluster": database_server.cluster, + "plan": None, + "region_info": {"image": None, "title": None}, + "tags": [], + "title": "Database Server", + "status": database_server.status, + "creation": database_server.creation, + "app_server": f"f{database_server.name[1:]}", + } + + app_server = create_test_server(proxy_server.name, database_server.name) + app_server.title = "App Server" + app_server.team = get_current_team() + app_server.save() + + create_and_add_test_tag(app_server.name, "Server") + + self.app_server_dict = { + "name": app_server.name, + "cluster": app_server.cluster, + "plan": None, + "region_info": {"image": None, "title": None}, + "tags": ["test_tag"], + "title": "App Server", + "status": app_server.status, + "creation": app_server.creation, + "app_server": f"f{app_server.name[1:]}", + } + + def tearDown(self): + jingrow.db.rollback() + + def test_list_all_servers(self): + self.assertEqual(all(), [self.app_server_dict, self.db_server_dict]) + + def test_list_app_servers(self): + self.assertEqual(all(server_filter={"server_type": "App Servers", "tag": ""}), [self.app_server_dict]) + + def test_list_db_servers(self): + self.assertEqual( + all(server_filter={"server_type": "Database Servers", "tag": ""}), + [self.db_server_dict], + ) + + def test_list_tagged_servers(self): + self.assertEqual( + all(server_filter={"server_type": "", "tag": "test_tag"}), + [self.app_server_dict], + ) diff --git a/jcloud/api/tests/test_site.py b/jcloud/api/tests/test_site.py new file mode 100644 index 0000000..0950df0 --- /dev/null +++ b/jcloud/api/tests/test_site.py @@ -0,0 +1,1021 @@ +# Copyright (c) 2019, JINGROW +# See license.txt + +import datetime +import unittest +from unittest.mock import MagicMock, Mock, call, patch + +import jingrow +import responses + +from jcloud.api.site import all +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob, poll_pending_jobs +from jcloud.jcloud.pagetype.agent_job.test_agent_job import fake_agent_job +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.app_release.test_app_release import create_test_app_release +from jcloud.jcloud.pagetype.bench.test_bench import create_test_bench +from jcloud.jcloud.pagetype.cluster.test_cluster import create_test_cluster +from jcloud.jcloud.pagetype.deploy_candidate_difference.test_deploy_candidate_difference import ( + create_test_deploy_candidate_differences, +) +from jcloud.jcloud.pagetype.marketplace_app.test_marketplace_app import ( + create_test_marketplace_app, +) +from jcloud.jcloud.pagetype.proxy_server.test_proxy_server import create_test_proxy_server +from jcloud.jcloud.pagetype.release_group.test_release_group import ( + create_test_release_group, +) +from jcloud.jcloud.pagetype.remote_file.remote_file import RemoteFile +from jcloud.jcloud.pagetype.remote_file.test_remote_file import create_test_remote_file +from jcloud.jcloud.pagetype.root_domain.test_root_domain import create_test_root_domain +from jcloud.jcloud.pagetype.server.test_server import create_test_server +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.site_plan.test_site_plan import create_test_plan +from jcloud.jcloud.pagetype.team.test_team import create_test_jcloud_admin_team + + +class TestAPISite(unittest.TestCase): + def setUp(self): + self.team = create_test_jcloud_admin_team() + self.team.allocate_credit_amount(1000, source="Prepaid Credits", remark="Test") + self.team.payment_mode = "Prepaid Credits" + self.team.save() + + def tearDown(self): + jingrow.db.rollback() + jingrow.set_user("Administrator") + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_options_contains_only_public_groups_when_private_group_is_not_given( + self, + ): + from jcloud.api.site import get_new_site_options + + app = create_test_app() + + group12 = create_test_release_group([app], public=True, jingrow_version="Version 12") + group13 = create_test_release_group([app], public=True, jingrow_version="Version 13") + group14 = create_test_release_group([app], public=True, jingrow_version="Version 14") + + server = create_test_server() + create_test_bench(group=group12, server=server.name) + create_test_bench(group=group13, server=server.name) + create_test_bench(group=group14, server=server.name) + jingrow.set_user(self.team.user) + private_group = create_test_release_group([app], public=False, jingrow_version="Version 14") + create_test_bench(group=private_group, server=server.name) + + options = get_new_site_options() + + for version in options["versions"]: + if version["name"] == "Version 14": + self.assertEqual(version["group"]["name"], group14.name) + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_new_fn_creates_site_and_subscription(self): + from jcloud.api.site import new + + app = create_test_app() + cluster = create_test_cluster("Default", public=True) + server = create_test_server(cluster=cluster.name, public=True) + group = create_test_release_group([app], servers=[server.name]) + bench = create_test_bench(group=group, server=server.name) + plan = create_test_plan("Site") + + jingrow.set_user(self.team.user) + new_site = new( + { + "name": "testsite", + "group": group.name, + "plan": plan.name, + "apps": [app.name], + "cluster": bench.cluster, + } + ) + + created_site = jingrow.get_last_pg("Site") + subscription = jingrow.get_last_pg("Subscription") + self.assertEqual(new_site["site"], created_site.name) + self.assertEqual(subscription.document_name, created_site.name) + self.assertEqual(subscription.plan, plan.name) + self.assertTrue(subscription.enabled) + self.assertEqual(created_site.team, self.team.name) + self.assertEqual(created_site.bench, bench.name) + self.assertEqual(created_site.status, "Pending") + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_creating_new_site_with_customized_site_plan_should_allow_only_specified_apps( + self, + ): + from jcloud.api.site import new + + jingrow_app = create_test_app(name="jingrow") + allowed_app = create_test_app(name="allowed_app") + disallowed_app = create_test_app(name="disallowed_app") + + cluster = create_test_cluster("Default", public=True) + root_domain = create_test_root_domain("local.fc.jingrow.dev") + jingrow.db.set_single_value("Jcloud Settings", "domain", root_domain.name) + + n1_server = create_test_proxy_server(cluster=cluster.name, domain=root_domain.name) + f1_server = create_test_server(cluster=cluster.name, proxy_server=n1_server.name) + + group = create_test_release_group( + [jingrow_app, allowed_app, disallowed_app], public=True, jingrow_version="Version 15" + ) + group.append( + "servers", + { + "server": f1_server.name, + }, + ) + group.save() + create_test_bench(group=group, server=f1_server.name) + + plan = create_test_plan("Site", allowed_apps=[jingrow_app.name, allowed_app.name]) + + self.assertRaisesRegex( + jingrow.exceptions.ValidationError, + f"you can't deploy site with {disallowed_app.name} app", + new, + { + "name": "testsite1", + "group": group.name, + "plan": plan.name, + "apps": [jingrow_app.name, allowed_app.name, disallowed_app.name], + "cluster": cluster.name, + }, + ) + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_creating_new_site_with_site_plan_having_no_specified_apps_should_allow_to_install_any_app( + self, + ): + from jcloud.api.site import new + + jingrow_app = create_test_app(name="jingrow") + another_app = create_test_app(name="another_app") + + cluster = create_test_cluster("Default", public=True) + root_domain = create_test_root_domain("local.fc.jingrow.dev") + jingrow.db.set_single_value("Jcloud Settings", "domain", root_domain.name) + + n1_server = create_test_proxy_server(cluster=cluster.name, domain=root_domain.name) + f1_server = create_test_server(cluster=cluster.name, proxy_server=n1_server.name) + + group = create_test_release_group([jingrow_app, another_app], public=True, jingrow_version="Version 15") + group.append( + "servers", + { + "server": f1_server.name, + }, + ) + group.save() + create_test_bench(group=group, server=f1_server.name) + + plan = create_test_plan("Site", allowed_apps=[]) + + site = new( + { + "name": "testsite", + "group": group.name, + "plan": plan.name, + "apps": [jingrow_app.name, another_app.name], + "cluster": cluster.name, + } + ) + self.assertEqual(site["site"], "testsite.fc.dev") + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_creating_new_site_with_specified_release_groups_should_deploy_site_on_some_bench_which_is_configured_in_site_plan( + self, + ): + from jcloud.api.site import new + + cluster = create_test_cluster("Default", public=True) + root_domain = create_test_root_domain("local.fc.jingrow.dev") + jingrow.db.set_single_value("Jcloud Settings", "domain", root_domain.name) + + jingrow_app = create_test_app(name="jingrow") + + n1_server = create_test_proxy_server(cluster=cluster.name, domain=root_domain.name) + f1_server = create_test_server(cluster=cluster.name, proxy_server=n1_server.name) + n2_server = create_test_proxy_server(cluster=cluster.name, domain=root_domain.name) + f2_server = create_test_server(cluster=cluster.name, proxy_server=n2_server.name) + + rg1 = create_test_release_group([jingrow_app], public=True, jingrow_version="Version 15") + rg1.append( + "servers", + { + "server": f1_server.name, + }, + ) + rg1.save() + create_test_bench(group=rg1, server=f1_server.name) + + rg2 = create_test_release_group([jingrow_app], public=True, jingrow_version="Version 15") + rg2.append( + "servers", + { + "server": f2_server.name, + }, + ) + rg2.save() + rg2_bench = create_test_bench(group=rg2, server=f2_server.name) + + plan = create_test_plan("Site", allowed_apps=[], release_groups=[rg2.name]) + + """ + Try to deploy the site in rg1 + But, due to restrictions on Site Plan, it should deploy on rg2 + """ + + site_name = new( + { + "name": "testsite1", + "group": rg1.name, + "plan": plan.name, + "apps": [jingrow_app.name], + "cluster": cluster.name, + } + )["site"] + site = jingrow.get_pg("Site", site_name) + + self.assertEqual(site.group, rg2.name) + self.assertEqual(site.bench, rg2_bench.name) + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_creating_new_site_with_no_specified_release_group_should_deploy_site_on_some_bench_which_is_not_used_for_customized_site_plan( + self, + ): + from jcloud.api.site import new + + cluster = create_test_cluster("Default", public=True) + root_domain = create_test_root_domain("local.fc.jingrow.dev") + jingrow.db.set_single_value("Jcloud Settings", "domain", root_domain.name) + + jingrow_app = create_test_app(name="jingrow") + + n1_server = create_test_proxy_server(cluster=cluster.name, domain=root_domain.name) + f1_server = create_test_server(cluster=cluster.name, proxy_server=n1_server.name) + n2_server = create_test_proxy_server(cluster=cluster.name, domain=root_domain.name) + f2_server = create_test_server(cluster=cluster.name, proxy_server=n2_server.name) + + rg1 = create_test_release_group([jingrow_app], public=True, jingrow_version="Version 15") + rg1.append( + "servers", + { + "server": f1_server.name, + }, + ) + rg1.save() + rg1_bench = create_test_bench(group=rg1, server=f1_server.name) + + rg2 = create_test_release_group([jingrow_app], public=True, jingrow_version="Version 15") + rg2.append( + "servers", + { + "server": f2_server.name, + }, + ) + rg2.save() + create_test_bench(group=rg2, server=f2_server.name) + + plan = create_test_plan("Site", allowed_apps=[], release_groups=[], plan_title="Unlimited Plan") + create_test_plan("Site", allowed_apps=[], release_groups=[rg2.name], plan_title="Tiny Plan") + + """ + Try to deploy the site in rg1 + It should deploy on rg1 benches + """ + + site_name = new( + { + "name": "testsite1", + "group": rg1.name, + "plan": plan.name, + "apps": [jingrow_app.name], + "cluster": cluster.name, + } + )["site"] + site = jingrow.get_pg("Site", site_name) + + self.assertEqual(site.group, rg1.name) + self.assertEqual(site.bench, rg1_bench.name) + + """ + Try to deploy the site in rg2 + It should raise error + """ + self.assertRaisesRegex( + jingrow.exceptions.ValidationError, + f"Site can't be deployed on this release group {rg2.name} due to restrictions", + new, + { + "name": "testsite2", + "group": rg2.name, + "plan": plan.name, + "apps": [jingrow_app.name], + "cluster": cluster.name, + }, + ) + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_get_fn_returns_site_details(self): + from jcloud.api.site import get + + bench = create_test_bench() + group = jingrow.get_last_pg("Release Group", {"name": bench.group}) + jingrow.set_user(self.team.user) + site = create_test_site(bench=bench.name) + site.reload() + site_details = get(site.name) + self.assertEqual(site_details["name"], site.name) + self.assertDictEqual( + { + "name": site.name, + "host_name": site.host_name, + "status": site.status, + "archive_failed": bool(site.archive_failed), + "trial_end_date": site.trial_end_date, + "setup_wizard_complete": site.setup_wizard_complete, + "group": None, # because group is public + "team": site.team, + "jingrow_version": group.version, + "latest_jingrow_version": jingrow.db.get_value( + "Jingrow Version", {"status": "Stable"}, order_by="name desc" + ), + "group_public": group.public, + "server": site.server, + "server_region_info": jingrow.db.get_value( + "Cluster", site.cluster, ["title", "image"], as_dict=True + ), + "can_change_plan": True, + "hide_config": site.hide_config, + "notify_email": site.notify_email, + "info": { + "auto_updates_enabled": True, + "created_on": site.creation, + "last_deployed": None, + "owner": { + "first_name": "Jingrow", + "last_name": None, + "user_image": None, + }, + }, + "ip": jingrow.get_last_pg("Proxy Server").ip, + "site_tags": [{"name": x.tag, "tag": x.tag_name} for x in site.tags], + "tags": jingrow.get_all( + "Jcloud Tag", + {"team": self.team.name, "pagetype_name": "Site"}, + ["name", "tag"], + ), + "pending_for_long": False, + "site_migration": None, + "version_upgrade": None, + }, + site_details, + ) + + @patch( + "jcloud.jcloud.pagetype.app_release_difference.app_release_difference.Github", + new=MagicMock(), + ) + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def _setup_site_update(self): + version = "Version 13" + app = create_test_app() + group = create_test_release_group([app], jingrow_version=version) + self.bench1 = create_test_bench(group=group) + + create_test_app_release( + app_source=jingrow.get_pg("App Source", group.apps[0].source), + ) # creates pull type release diff only but args are same + + self.bench2 = create_test_bench(group=group, server=self.bench1.server) + + self.assertNotEqual(self.bench1, self.bench2) + # No need to create app release differences as it'll get autofilled by geo.json + create_test_deploy_candidate_differences(self.bench2.candidate) # for site update to be available + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_check_for_updates_shows_update_available_when_site_update_available(self): + from jcloud.api.site import check_for_updates + + self._setup_site_update() + jingrow.set_user(self.team.user) + site = create_test_site(bench=self.bench1.name) + out = check_for_updates(site.name) + self.assertEqual(out["update_available"], True) + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_check_for_updates_shows_update_unavailable_when_no_new_bench(self): + from jcloud.api.site import check_for_updates + + bench = create_test_bench() + + jingrow.set_user(self.team.user) + site = create_test_site(bench=bench.name) + out = check_for_updates(site.name) + self.assertEqual(out["update_available"], False) + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_installed_apps_returns_installed_apps_of_site(self): + from jcloud.api.site import installed_apps + + app1 = create_test_app() + app2 = create_test_app("jerp", "JERP") + group = create_test_release_group([app1, app2]) + bench = create_test_bench(group=group) + + jingrow.set_user(self.team.user) + site = create_test_site(bench=bench.name) + out = installed_apps(site.name) + self.assertEqual(len(out), 2) + self.assertEqual(out[0]["name"], group.apps[0].source) + self.assertEqual(out[1]["name"], group.apps[1].source) + self.assertEqual(out[0]["app"], group.apps[0].app) + self.assertEqual(out[1]["app"], group.apps[1].app) + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_available_apps_shows_apps_installed_in_bench_but_not_in_site(self): + from jcloud.api.site import available_apps + + app1 = create_test_app() + app2 = create_test_app("jerp", "JERP") + app3 = create_test_app("insights", "Insights") + group = create_test_release_group([app1, app2]) + bench = create_test_bench(group=group) + + group2 = create_test_release_group([app1, app3]) + create_test_bench(group=group2, server=bench.server) # app3 shouldn't show in available_apps + + jingrow.set_user(self.team.user) + site = create_test_site(bench=bench.name, apps=[app1.name]) + out = available_apps(site.name) + self.assertEqual(len(out), 1) + self.assertEqual(out[0]["name"], group.apps[1].source) + self.assertEqual(out[0]["app"], group.apps[1].app) + + def test_check_dns_(self): + pass + + def test_install_app_adds_to_app_list_only_on_successful_job(self): + from jcloud.api.site import install_app + + app = create_test_app() + app2 = create_test_app("jerp", "JERP") + group = create_test_release_group([app, app2]) + bench = create_test_bench(group=group) + + jingrow.set_user(self.team.user) + site = create_test_site(bench=bench.name, apps=[app.name]) + with fake_agent_job("Install App on Site", "Success"): + install_app(site.name, app2.name) + poll_pending_jobs() + site.reload() + self.assertEqual(len(site.apps), 2) + self.assertEqual(site.status, "Active") + + site = create_test_site(bench=bench.name, apps=[app.name]) + with fake_agent_job("Install App on Site", "Failure"): + install_app(site.name, app2.name) + poll_pending_jobs() + site.reload() + self.assertEqual(len(site.apps), 1) + self.assertEqual(site.status, "Active") + + def test_uninstall_app_removes_from_list_only_on_success(self): + from jcloud.api.site import uninstall_app + + app = create_test_app() + app2 = create_test_app("jerp", "JERP") + group = create_test_release_group([app, app2]) + bench = create_test_bench(group=group) + + jingrow.set_user(self.team.user) + site = create_test_site(bench=bench.name, apps=[app.name, app2.name]) + with fake_agent_job("Uninstall App from Site", "Success"): + uninstall_app(site.name, app2.name) + poll_pending_jobs() + site.reload() + self.assertEqual(len(site.apps), 1) + self.assertEqual(site.status, "Active") + + site = create_test_site(bench=bench.name, apps=[app.name, app2.name]) + with fake_agent_job("Uninstall App from Site", "Failure"): + uninstall_app(site.name, app2.name) + poll_pending_jobs() + site.reload() + self.assertEqual(len(site.apps), 2) + self.assertEqual(site.status, "Active") + + @patch.object(RemoteFile, "exists", new=Mock(return_value=True)) + @patch.object(RemoteFile, "download_link", new="http://test.com") + def test_restore_job_updates_apps_table_with_output_from_job(self): + from jcloud.api.site import restore + + app = create_test_app() + app2 = create_test_app("jerp", "JERP") + app3 = create_test_app("insights", "Insights") + group = create_test_release_group([app, app2, app3]) + bench = create_test_bench(group=group) + + jingrow.set_user(self.team.user) + site = create_test_site(bench=bench.name, apps=[app.name, app2.name]) + database = create_test_remote_file(site.name).name + public = create_test_remote_file(site.name).name + private = create_test_remote_file(site.name).name + + self.assertEqual(len(site.apps), 2) + self.assertEqual(site.apps[0].app, "jingrow") + self.assertEqual(site.apps[1].app, "jerp") + self.assertEqual(site.status, "Active") + + with fake_agent_job( + "Restore Site", + "Success", + data=jingrow._dict( + output="""jingrow 15.0.0-dev HEAD +insights 0.8.3 HEAD +""" + ), + ): + restore( + site.name, + { + "database": database, + "public": public, + "private": private, + }, + ) + poll_pending_jobs() + + site.reload() + self.assertEqual(len(site.apps), 2) + self.assertEqual(site.apps[0].app, "jingrow") + self.assertEqual(site.apps[1].app, "insights") + self.assertEqual(site.status, "Active") + + @patch.object(RemoteFile, "exists", new=Mock(return_value=True)) + @patch.object(RemoteFile, "download_link", new="http://test.com") + def test_restore_job_updates_apps_table_when_only_jingrow_is_installed(self): + from jcloud.api.site import restore + + app = create_test_app() + app2 = create_test_app("jerp", "JERP") + group = create_test_release_group([app, app2]) + bench = create_test_bench(group=group) + + jingrow.set_user(self.team.user) + site = create_test_site(bench=bench.name, apps=[app.name, app2.name]) + database = create_test_remote_file(site.name).name + public = create_test_remote_file(site.name).name + private = create_test_remote_file(site.name).name + + self.assertEqual(len(site.apps), 2) + self.assertEqual(site.apps[0].app, "jingrow") + self.assertEqual(site.apps[1].app, "jerp") + self.assertEqual(site.status, "Active") + + with fake_agent_job( + "Restore Site", "Success", data=jingrow._dict(output="""jingrow 15.0.0-dev HEAD""") + ): + restore( + site.name, + { + "database": database, + "public": public, + "private": private, + }, + ) + poll_pending_jobs() + + site.reload() + self.assertEqual(len(site.apps), 1) + self.assertEqual(site.apps[0].app, "jingrow") + self.assertEqual(site.status, "Active") + + @patch.object(RemoteFile, "exists", new=Mock(return_value=True)) + @patch.object(RemoteFile, "download_link", new="http://test.com") + @patch("jcloud.jcloud.pagetype.site.site.marketplace_app_hook") + def test_restore_job_runs_marketplace_hooks_for_apps_found_in_backup( + self, mock_marketplace_app_hook: Mock + ): + from jcloud.api.site import restore + + app = create_test_app() + app2 = create_test_app("jerp", "JERP") + create_test_marketplace_app("jerp") + app3 = create_test_app("insights", "Insights") + create_test_marketplace_app("insights") + group = create_test_release_group([app, app2, app3]) + bench = create_test_bench(group=group) + + jingrow.set_user(self.team.user) + site = create_test_site(bench=bench.name, apps=[app.name, app2.name]) + database = create_test_remote_file(site.name).name + public = create_test_remote_file(site.name).name + private = create_test_remote_file(site.name).name + + self.assertEqual(len(site.apps), 2) + self.assertEqual(site.apps[0].app, "jingrow") + self.assertEqual(site.apps[1].app, "jerp") + self.assertEqual(site.status, "Active") + + with fake_agent_job( + "Restore Site", + "Success", + data=jingrow._dict( + output="""jingrow 15.0.0-dev HEAD +insights 0.8.3 HEAD +""" + ), + ): + restore( + site.name, + { + "database": database, + "public": public, + "private": private, + }, + ) + poll_pending_jobs() + + mock_marketplace_app_hook.assert_has_calls( + [ + call(app="insights", site=site, op="install"), + call(app="jerp", site=site, op="uninstall"), + ] + ) + + @patch.object(RemoteFile, "exists", new=Mock(return_value=True)) + @patch.object(RemoteFile, "download_link", new="http://test.com") + def test_new_site_from_backup_job_updates_apps_table_with_output_from_job(self): + from jcloud.api.site import new + + app = create_test_app() + app2 = create_test_app("jerp", "JERP") + group = create_test_release_group([app, app2]) + plan = create_test_plan("Site") + create_test_bench(group=group) + subdomain = "testsite" + + # jingrow.set_user(self.team.user) # can't this due to weird perm error with ignore_perimssions in new site + database = create_test_remote_file().name + public = create_test_remote_file().name + private = create_test_remote_file().name + with fake_agent_job( + "New Site from Backup", + "Success", + data=jingrow._dict( + output="""jingrow 15.0.0-dev HEAD +jerp 0.8.3 HEAD +""" + ), + ), fake_agent_job( + "Add Site to Upstream", + "Success", + ): + new( + { + "name": subdomain, + "group": group.name, + "plan": plan.name, + "apps": [app.name], # giving 1 app only + "files": { + "database": database, + "public": public, + "private": private, + }, + "cluster": "Default", + } + ) + poll_pending_jobs() + + site = jingrow.get_last_pg("Site", {"subdomain": subdomain}) + self.assertEqual(len(site.apps), 2) + self.assertEqual(site.apps[0].app, "jingrow") + self.assertEqual(site.apps[1].app, "jerp") + self.assertEqual(site.status, "Active") + + def test_change_group_changes_group_and_bench_of_site(self): + from jcloud.api.site import change_group, change_group_options + from jcloud.jcloud.pagetype.site_update.site_update import process_update_site_job_update + + app = create_test_app() + server = create_test_server() + group1 = create_test_release_group([app]) + group2 = create_test_release_group([app]) + bench1 = create_test_bench(group=group1, server=server) + bench2 = create_test_bench(group=group2, server=server) + site = create_test_site( + bench=bench1.name, team=self.team, plan=create_test_plan("Site", private_benches=True).name + ) + + self.assertEqual(change_group_options(site.name), [{"name": group2.name, "title": group2.title}]) + + with fake_agent_job( + "Update Site Migrate", + "Success", + steps=[{"name": "Move Site", "status": "Success"}], + ): + change_group(site.name, group2.name) + + responses.get( + f"https://{site.host_name}/", + status=200, + ) + poll_pending_jobs() + + site_update = jingrow.get_last_pg("Site Update") + job = jingrow.get_pg("Agent Job", site_update.update_job) + + process_update_site_job_update(job) + + site.reload() + + self.assertEqual(site.group, group2.name) + self.assertEqual(site.bench, bench2.name) + + @patch( + "jcloud.jcloud.pagetype.agent_job.agent_job.process_site_migration_job_update", + new=Mock(), + ) + @patch("jcloud.jcloud.pagetype.site.site.create_dns_record", new=Mock()) + @patch("jcloud.jcloud.pagetype.site_migration.site_migration.jingrow.db.commit", new=MagicMock) + def test_site_change_region(self): + from jcloud.api.site import change_region, change_region_options + + app = create_test_app() + tokyo_cluster = create_test_cluster("Tokyo", public=True) + seoul_cluster = create_test_cluster("Seoul", public=True) + tokyo_server = create_test_server(cluster=tokyo_cluster.name) + seoul_server = create_test_server(cluster=seoul_cluster.name) + group = create_test_release_group([app]) + group.append( + "servers", + { + "server": tokyo_server.name, + }, + ) + group.save() + bench = create_test_bench(group=group, server=tokyo_server.name) + + group.append( + "servers", + { + "server": seoul_server.name, + }, + ) + group.save() + + create_test_bench(group=group, server=seoul_server.name) + site = create_test_site(bench=bench.name) + + options = change_region_options(site.name) + + self.assertEqual( + options["regions"], + [jingrow.get_value("Cluster", seoul_server.cluster, ["name", "title", "image"], as_dict=True)], + ) + self.assertEqual(options["current_region"], tokyo_server.cluster) + + with fake_agent_job("Update Site Migrate"): + responses.post( + f"https://{site.server}:443/agent/benches/{site.bench}/sites/{site.host_name}/config", + json={"jobs": []}, + status=200, + ) + change_region(site.name, seoul_server.cluster) + site_migration = jingrow.get_last_pg("Site Migration") + site_migration.update_site_record_fields() + + site.reload() + self.assertEqual(site.cluster, seoul_server.cluster) + + def test_version_upgrade_api_upgrades_site(self): + from jcloud.api.site import get_private_groups_for_upgrade, version_upgrade + from jcloud.jcloud.pagetype.site_update.site_update import process_update_site_job_update + + app = create_test_app() + server = create_test_server() + + v14_group = create_test_release_group([app], jingrow_version="Version 14") + v14_group.append( + "servers", + { + "server": server, + }, + ) + v14_group.save() + + v15_group = create_test_release_group([app], jingrow_version="Version 15") + v15_group.append( + "servers", + { + "server": server, + }, + ) + v15_group.save() + + v14_bench = create_test_bench(group=v14_group, server=server) + create_test_bench(group=v15_group, server=server) + site = create_test_site(bench=v14_bench.name) + + self.assertEqual( + get_private_groups_for_upgrade(site.name, v14_group.version), + [ + {"name": v15_group.name, "title": v15_group.title}, + ], + ) + + with fake_agent_job( + "Update Site Migrate", + "Success", + steps=[{"name": "Move Site", "status": "Success"}], + ): + version_upgrade(site.name, v15_group.name) + + responses.get( + f"https://{site.host_name}/", + status=200, + ) + poll_pending_jobs() + + site_update = jingrow.get_last_pg("Site Update") + job = jingrow.get_pg("Agent Job", site_update.update_job) + + process_update_site_job_update(job) + + site.reload() + site_version = jingrow.db.get_value("Release Group", site.group, "version") + self.assertEqual(site_version, v15_group.version) + + @patch( + "jcloud.jcloud.pagetype.agent_job.agent_job.process_site_migration_job_update", + new=Mock(), + ) + @patch("jcloud.jcloud.pagetype.site_migration.site_migration.jingrow.db.commit", new=MagicMock) + def test_site_change_server(self): + from jcloud.api.site import ( + change_server, + change_server_options, + is_server_added_in_group, + ) + from jcloud.utils import get_current_team + + app = create_test_app() + team = get_current_team() + server = create_test_server(team=team) + + group = create_test_release_group([app]) + group.append( + "servers", + { + "server": server, + }, + ) + group.save() + + bench = create_test_bench(group=group, server=server.name) + other_server = create_test_server(team=team) + create_test_bench(group=group, server=other_server.name) + + group.append( + "servers", + { + "server": other_server, + }, + ) + group.save() + + site = create_test_site(bench=bench.name) + + self.assertEqual( + change_server_options(site.name), + [{"name": other_server.name, "title": None}], + ) + + self.assertEqual( + is_server_added_in_group(site.name, other_server.name), + True, + ) + + with fake_agent_job("Update Site Migrate"): + responses.post( + f"https://{site.server}:443/agent/benches/{site.bench}/sites/{site.host_name}/config", + json={"jobs": []}, + status=200, + ) + + change_server(site.name, other_server.name) + site_migration = jingrow.get_last_pg("Site Migration") + site_migration.update_site_record_fields() + + site.reload() + self.assertEqual(site.server, other_server.name) + + def test_update_config(self): + pass + + def test_get_upload_link(self): + pass + + +class TestAPISiteList(unittest.TestCase): + def setUp(self): + from jcloud.jcloud.pagetype.jcloud_tag.test_jcloud_tag import create_and_add_test_tag + from jcloud.jcloud.pagetype.site.test_site import create_test_site + + app = create_test_app() + group = create_test_release_group([app]) + bench = create_test_bench(group=group) + + broken_site = create_test_site(bench=bench.name) + broken_site.status = "Broken" + broken_site.save() + self.broken_site_dict = { + "name": broken_site.name, + "cluster": broken_site.cluster, + "group": broken_site.group, + "plan": None, + "public": 0, + "server_region_info": {"image": None, "title": None}, + "tags": [], + "host_name": broken_site.host_name, + "status": broken_site.status, + "creation": broken_site.creation, + "bench": broken_site.bench, + "current_cpu_usage": broken_site.current_cpu_usage, + "current_database_usage": broken_site.current_database_usage, + "current_disk_usage": broken_site.current_disk_usage, + "trial_end_date": broken_site.trial_end_date, + "team": broken_site.team, + "title": group.title, + "version": group.version, + } + + trial_site = create_test_site(bench=bench.name) + trial_site.trial_end_date = datetime.datetime.now() + trial_site.save() + + self.trial_site_dict = { + "name": trial_site.name, + "cluster": trial_site.cluster, + "group": trial_site.group, + "plan": None, + "public": 0, + "server_region_info": {"image": None, "title": None}, + "tags": [], + "host_name": trial_site.host_name, + "status": trial_site.status, + "creation": trial_site.creation, + "bench": trial_site.bench, + "current_cpu_usage": trial_site.current_cpu_usage, + "current_database_usage": trial_site.current_database_usage, + "current_disk_usage": trial_site.current_disk_usage, + "trial_end_date": trial_site.trial_end_date.date(), + "team": trial_site.team, + "title": group.title, + "version": group.version, + } + + tagged_site = create_test_site(bench=bench.name) + create_and_add_test_tag(tagged_site.name, "Site") + + self.tagged_site_dict = { + "name": tagged_site.name, + "cluster": tagged_site.cluster, + "group": tagged_site.group, + "plan": None, + "public": 0, + "server_region_info": {"image": None, "title": None}, + "tags": ["test_tag"], + "host_name": tagged_site.host_name, + "status": tagged_site.status, + "creation": tagged_site.creation, + "bench": tagged_site.bench, + "current_cpu_usage": tagged_site.current_cpu_usage, + "current_database_usage": tagged_site.current_database_usage, + "current_disk_usage": tagged_site.current_disk_usage, + "trial_end_date": tagged_site.trial_end_date, + "team": tagged_site.team, + "title": group.title, + "version": group.version, + } + + def tearDown(self): + jingrow.db.rollback() + + def test_list_all_sites(self): + self.assertCountEqual(all(), [self.broken_site_dict, self.trial_site_dict, self.tagged_site_dict]) + + def test_list_broken_sites(self): + self.assertEqual(all(site_filter={"status": "Broken", "tag": ""}), [self.broken_site_dict]) + + def test_list_trial_sites(self): + self.assertEqual(all(site_filter={"status": "Trial", "tag": ""}), [self.trial_site_dict]) + + def test_list_tagged_sites(self): + self.assertEqual(all(site_filter={"status": "", "tag": "test_tag"}), [self.tagged_site_dict]) diff --git a/jcloud/api/webhook.py b/jcloud/api/webhook.py new file mode 100644 index 0000000..7f57d2c --- /dev/null +++ b/jcloud/api/webhook.py @@ -0,0 +1,83 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json + +import jingrow + +from jcloud.jcloud.pagetype.jcloud_role.jcloud_role import check_role_permissions + + +@jingrow.whitelist(allow_guest=True) +def available_events(): + return jingrow.get_all( + "Jcloud Webhook Event", + fields=["name", "description"], + filters={"enabled": 1}, + order_by="creation desc", + ) + + +@jingrow.whitelist() +def add(endpoint: str, secret: str, events: list[str]): + check_role_permissions("Jcloud Webhook") + pg = jingrow.new_pg("Jcloud Webhook") + pg.endpoint = endpoint + pg.secret = secret + pg.team = jingrow.local.team().name + for event in events: + pg.append("events", {"event": event}) + pg.save() + + +@jingrow.whitelist() +def update(name: str, endpoint: str, secret: str, events: list[str]): + check_role_permissions("Jcloud Webhook") + pg = jingrow.get_pg("Jcloud Webhook", name) + pg.endpoint = endpoint + if secret: + pg.secret = secret + # reset event list + pg.events = [] + # add new events + for event in events: + pg.append("events", {"event": event}) + pg.save() + + +@jingrow.whitelist() +def attempts(webhook: str): + check_role_permissions("Jcloud Webhook Log") + pg = jingrow.get_pg("Jcloud Webhook", webhook) + pg.has_permission("read") + + JcloudWebhookAttempt = jingrow.qb.PageType("Jcloud Webhook Attempt") + JcloudWebhookLog = jingrow.qb.PageType("Jcloud Webhook Log") + query = ( + jingrow.qb.from_(JcloudWebhookAttempt) + .select( + JcloudWebhookAttempt.name, + JcloudWebhookAttempt.endpoint, + JcloudWebhookLog.event, + JcloudWebhookAttempt.status, + JcloudWebhookAttempt.response_status_code, + JcloudWebhookAttempt.timestamp, + ) + .left_join(JcloudWebhookLog) + .on(JcloudWebhookAttempt.parent == JcloudWebhookLog.name) + .where(JcloudWebhookAttempt.webhook == pg.name) + .orderby(JcloudWebhookAttempt.timestamp, order=jingrow.qb.desc) + ) + return query.run(as_dict=1) + + +@jingrow.whitelist() +def attempt(name: str): + check_role_permissions("Jcloud Webhook Attempt") + pg = jingrow.get_pg("Jcloud Webhook Attempt", name) + pg.has_permission("read") + data = pg.as_dict() + data.request_payload = json.loads(jingrow.get_value("Jcloud Webhook Log", pg.parent, "request_payload")) + return data diff --git a/jcloud/auth.py b/jcloud/auth.py new file mode 100644 index 0000000..03c14e4 --- /dev/null +++ b/jcloud/auth.py @@ -0,0 +1,130 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import json +import os +import traceback + +import jingrow + +JCLOUD_AUTH_KEY = "jcloud-auth-logs" +JCLOUD_AUTH_MAX_ENTRIES = 1000000 + + +ALLOWED_PATHS = [ + "/api/method/create-site-migration", + "/api/method/create-version-upgrade", + "/api/method/migrate-to-private-bench", + "/api/method/find-my-sites", + "/api/method/jingrow.core.pagetype.communication.email.mark_email_as_seen", + "/api/method/jingrow.realtime.get_user_info", + "/api/method/jingrow.realtime.can_subscribe_pg", + "/api/method/jingrow.realtime.can_subscribe_pagetype", + "/api/method/jingrow.realtime.has_permission", + "/api/method/jingrow.www.login.login_via_jingrow", + "/api/method/jingrow.integrations.oauth2.authorize", + "/api/method/jingrow.integrations.oauth2.approve", + "/api/method/jingrow.integrations.oauth2.get_token", + "/api/method/jingrow.integrations.oauth2.openid_profile", + "/api/method/jingrow.integrations.oauth2_logins.login_via_jingrow", + "/api/method/jingrow.website.pagetype.web_page_view.web_page_view.make_view_log", + "/api/method/get-user-sites-list-for-new-ticket", + "/api/method/ping", + "/api/method/login", + "/api/method/logout", + "/api/method/jcloud.jcloud.pagetype.razorpay_webhook_log.razorpay_webhook_log.razorpay_webhook_handler", + "/api/method/jcloud.jcloud.pagetype.razorpay_webhook_log.razorpay_webhook_log.razorpay_authorized_payment_handler", + "/api/method/jcloud.jcloud.pagetype.stripe_webhook_log.stripe_webhook_log.stripe_webhook_handler", + "/api/method/upload_file", + "/api/method/jingrow.search.web_search", + "/api/method/jingrow.email.queue.unsubscribe", + "/api/method/jcloud.utils.telemetry.capture_read_event", + "/api/method/validate_plan_change", + "/api/method/marketplace-apps", + "/api/method/jcloud.www.dashboard.get_context_for_dev", + "/api/method/jingrow.website.pagetype.web_form.web_form.accept", + "/api/method/jingrow.core.pagetype.user.user.test_password_strength", + "/api/method/jingrow.core.pagetype.user.user.update_password", + "/api/method/get_central_migration_data", +] + +ALLOWED_WILDCARD_PATHS = [ + "/api/method/jcloud.api.", + "/api/method/jcloud.saas.", + "/api/method/wiki.", + "/api/method/jingrow.integrations.oauth2_logins.", + "/api/method/jcloud.www.marketplace.index.", +] + +DENIED_PATHS = [ + # Added from jingrow/wwww/.. + "/printview", + "/printpreview", +] + + +DENIED_WILDCARD_PATHS = [ + "/api/", +] + + +def hook(): # noqa: C901 + if jingrow.form_dict.cmd: + path = f"/api/method/{jingrow.form_dict.cmd}" + else: + path = jingrow.request.path + + user_type = jingrow.get_cached_value("User", jingrow.session.user, "user_type") + + # Allow unchecked access to System Users + if user_type == "System User": + return + + if path in DENIED_PATHS: + log(path, user_type) + jingrow.throw("Access not allowed for this URL", jingrow.AuthenticationError) + + for denied in DENIED_WILDCARD_PATHS: + if path.startswith(denied): + for allowed in ALLOWED_WILDCARD_PATHS: + if path.startswith(allowed): + return + if path in ALLOWED_PATHS: + return + + log(path, user_type) + jingrow.throw("Access not allowed for this URL", jingrow.AuthenticationError) + + return + + +def log(path, user_type): + data = { + "ip": jingrow.local.request_ip, + "timestamp": jingrow.utils.now(), + "user_type": user_type, + "path": path, + "user": jingrow.session.user, + "referer": jingrow.request.headers.get("Referer", ""), + } + + if jingrow.cache().llen(JCLOUD_AUTH_KEY) > JCLOUD_AUTH_MAX_ENTRIES: + jingrow.cache().ltrim(JCLOUD_AUTH_KEY, 1, -1) + serialized = json.dumps(data, sort_keys=True, default=str) + jingrow.cache().rpush(JCLOUD_AUTH_KEY, serialized) + + +def flush(): + log_file = os.path.join(jingrow.utils.get_bench_path(), "logs", "jcloud.auth.json.log") + try: + # Fetch all entries without removing from cache + logs = jingrow.cache().lrange(JCLOUD_AUTH_KEY, 0, -1) + if logs: + logs = list(map(jingrow.safe_decode, logs)) + with open(log_file, "a", os.O_NONBLOCK) as f: + f.write("\n".join(logs)) + f.write("\n") + # Remove fetched entries from cache + jingrow.cache().ltrim(JCLOUD_AUTH_KEY, len(logs) - 1, -1) + except Exception: + traceback.print_exc() diff --git a/jcloud/bootstrap.py b/jcloud/bootstrap.py new file mode 100644 index 0000000..fb8b969 --- /dev/null +++ b/jcloud/bootstrap.py @@ -0,0 +1,344 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import os +import time + +import jingrow +from jingrow.desk.page.setup_wizard.setup_wizard import setup_complete + +ADMIN_EMAIL = "" +HOME_DIRECTORY = "" +CERTBOT_DIRECTORY = os.path.join(HOME_DIRECTORY, ".certbot") +WEBROOT_DIRECTORY = os.path.join(HOME_DIRECTORY, ".webroot") + +# We've already configured Route 53 zone for this +# Don't change this unless you know what you're doing +ROOT_DOMAIN = "local.jingrow.dev" +AWS_ACCESS_KEY_ID = "" +AWS_SECRET_ACCESS_KEY = "" + +BENCH_PATH = "" +CLONE_DIRECTORY = os.path.join(BENCH_PATH, "clones") +BUILD_DIRECTORY = os.path.join(BENCH_PATH, "builds") + +TELEGRAM_CHAT_ID = "" +TELEGRAM_BOT_TOKEN = "" + +AGENT_REPOSITORY_OWNER = "" +GITHUB_ACCESS_TOKEN = "" + +STRIPE_PUBLISHABLE_KEY = "" +STRIPE_SECRET_KEY = "" +NGROK_AUTH_TOKEN = "" + +MAIL_SERVER = "" +MAIL_PORT = "" +MAIL_LOGIN = "" +MAIL_PASSWORD = "" + + +def prepare(): + complete_setup_wizard() + settings = jingrow.get_single("Jcloud Settings") + setup_certbot(settings) + setup_root_domain(settings) + setup_stripe(settings) + + setup_agent(settings) + + setup_proxy_server() + setup_database_server() + setup_server() + + setup_registry(settings) + + setup_logging(settings) + setup_monitoring(settings) + setup_tracing(settings) + + setup_apps() + setup_teams() + setup_plans() + + +def complete_setup_wizard(): + setup_complete( + { + "language": "English", + "country": "China", + "timezone": "Asia/Kolkata", + "currency": "CNY", + } + ) + + +def setup_certbot(settings): + settings.eff_registration_email = ADMIN_EMAIL + settings.webroot_directory = WEBROOT_DIRECTORY + settings.certbot_directory = CERTBOT_DIRECTORY + settings.save() + settings.reload() + + +def setup_root_domain(settings): + if jingrow.db.get_value("Root Domain", ROOT_DOMAIN): + domain = jingrow.get_pg("Root Domain", ROOT_DOMAIN) + else: + domain = jingrow.get_pg( + { + "pagetype": "Root Domain", + "name": ROOT_DOMAIN, + "default_cluster": "Default", + "aws_access_key_id": AWS_ACCESS_KEY_ID, + "aws_secret_access_key": AWS_SECRET_ACCESS_KEY, + } + ).insert() + jingrow.db.commit() + while not jingrow.db.exists( + "TLS Certificate", {"wildcard": True, "domain": ROOT_DOMAIN, "status": "Active"} + ): + print("Waiting for TLS certificate") + time.sleep(1) + jingrow.db.commit() + + settings.domain = domain.name + settings.cluster = domain.default_cluster + settings.save() + settings.reload() + + +def setup_stripe(settings): + settings.stripe_publishable_key = STRIPE_PUBLISHABLE_KEY + settings.stripe_secret_key = STRIPE_SECRET_KEY + settings.ngrok_auth_token = NGROK_AUTH_TOKEN + settings.save() + settings.reload() + + +def setup_registry(settings): + registry = jingrow.get_pg( + { + "pagetype": "Registry Server", + "hostname": "registry", + "ip": "10.0.4.101", + "private_ip": "10.1.4.101", + } + ).insert() + + settings.clone_directory = CLONE_DIRECTORY + settings.build_directory = BUILD_DIRECTORY + + settings.docker_registry_url = registry.name + settings.docker_registry_username = registry.registry_username + settings.docker_registry_password = registry.get_password("registry_password") + + settings.save() + settings.reload() + + +def setup_logging(settings): + log = jingrow.get_pg( + { + "pagetype": "Log Server", + "hostname": "log", + "ip": "10.0.4.102", + "private_ip": "10.1.4.102", + } + ).insert() + + settings.log_server = log.name + + settings.save() + settings.reload() + + +def setup_monitoring(settings): + monitor = jingrow.get_pg( + { + "pagetype": "Monitor Server", + "hostname": "monitor", + "ip": "10.0.4.103", + "private_ip": "10.1.4.103", + } + ).insert() + + settings.monitor_server = monitor.name + settings.monitor_token = jingrow.generate_hash() + settings.jcloud_monitoring_password = jingrow.generate_hash() + + jingrow.get_pg( + { + "pagetype": "Telegram Group", + "name": "Alerts", + "chat_id": TELEGRAM_CHAT_ID, + } + ).insert() + + settings.telegram_alerts_chat_group = "Alerts" + settings.telegram_bot_token = TELEGRAM_BOT_TOKEN + + settings.save() + settings.reload() + + +def setup_tracing(settings): + redirect_uri = f"https://trace.{ROOT_DOMAIN}/auth/sso/" + oauth_client = jingrow.get_pg( + { + "pagetype": "OAuth Client", + "app_name": "Sentry", + "scopes": "all openid email", + "default_redirect_uri": redirect_uri, + "redirect_uris": redirect_uri, + "skip_authorization": True, + } + ).insert() + + jingrow.get_pg( + { + "pagetype": "Trace Server", + "hostname": "trace", + "ip": "10.0.4.105", + "private_ip": "10.1.4.105", + "sentry_admin_email": ADMIN_EMAIL, + "sentry_admin_password": jingrow.generate_hash(), + "sentry_mail_server": MAIL_SERVER, + "sentry_mail_port": MAIL_PORT, + "sentry_mail_login": MAIL_LOGIN, + "sentry_mail_password": MAIL_PASSWORD, + "sentry_oauth_server_url": jingrow.utils.get_url(), + "sentry_oauth_client_id": oauth_client.client_id, + "sentry_oauth_client_secret": oauth_client.client_secret, + } + ).insert() + + +def setup_agent(settings): + settings.agent_repository_owner = AGENT_REPOSITORY_OWNER + settings.agent_github_access_token = GITHUB_ACCESS_TOKEN + settings.github_access_token = GITHUB_ACCESS_TOKEN + settings.save() + settings.reload() + + +def setup_proxy_server(): + jingrow.get_pg( + { + "pagetype": "Proxy Server", + "hostname": "n1", + "ip": "10.0.1.101", + "private_ip": "10.1.1.101", + } + ).insert() + + +def setup_database_server(): + jingrow.get_pg( + { + "pagetype": "Database Server", + "title": "First - Database", + "hostname": "m1", + "ip": "10.0.3.101", + "private_ip": "10.1.3.101", + } + ).insert() + + +def setup_server(): + jingrow.get_pg( + { + "pagetype": "Server", + "title": "First - Application", + "hostname": "f1", + "ip": "10.0.2.101", + "private_ip": "10.1.2.101", + "proxy_server": f"n1.{ROOT_DOMAIN}", + "database_server": f"m1.{ROOT_DOMAIN}", + } + ).insert() + + +def setup(): + servers = [ + ("Proxy Server", f"n1.{ROOT_DOMAIN}"), + ("Database Server", f"m1.{ROOT_DOMAIN}"), + ("Server", f"f1.{ROOT_DOMAIN}"), + ("Registry Server", f"registry.{ROOT_DOMAIN}"), + ("Log Server", f"log.{ROOT_DOMAIN}"), + ("Monitor Server", f"monitor.{ROOT_DOMAIN}"), + ("Trace Server", f"trace.{ROOT_DOMAIN}"), + ] + for server_type, server in servers: + jingrow.get_pg(server_type, server).setup_server() + + +def setup_teams(): + from jcloud.api.account import signup + from jcloud.jcloud.pagetype.team.team import Team + + signup("cloud@jingrow.com") + request = jingrow.get_all( + "Account Request", ["*"], {"email": "cloud@jingrow.com"}, limit=1 + )[0] + cloud = Team.create_new(request, "Jingrow", "Cloud", "JingrowCloud@1", "China", False) + + signup("dev@jingrow.com") + request = jingrow.get_all( + "Account Request", ["*"], {"email": "dev@jingrow.com"}, limit=1 + )[0] + aditya = Team.create_new(request, "Aditya", "Hase", "AdityaHase@1", "China", False) + + cloud.append("team_members", {"user": aditya.name}) + cloud.save() + + +def setup_plans(): + plans = [("Free", 0), ("USD 10", 10), ("USD 25", 25)] + for index, plan in enumerate(plans, 1): + if jingrow.db.exists("Site Plan", plan[0]): + continue + jingrow.get_pg( + { + "pagetype": "Site Plan", + "name": plan[0], + "document_type": "Site", + "plan_title": plan[0], + "price_usd": plan[1], + "price_cny": plan[1] * 80, + "cpu_time_per_day": index, + "max_database_usage": 1024 * index, + "max_storage_usage": 10240 * index, + "roles": [ + {"role": "System Manager"}, + {"role": "Jcloud Admin"}, + {"role": "Jcloud Member"}, + ], + } + ).insert() + + +def setup_apps(): + app = jingrow.get_pg( + {"pagetype": "App", "name": "jingrow", "title": "Jingrow Framework", "jingrow": True} + ).insert() + source = jingrow.get_pg( + { + "pagetype": "App Source", + "app": app.name, + "branch": "develop", + "repository_url": "http://git.jingrow.com:3000/jingrow/jingrow", + "public": True, + "team": "Administrator", + "versions": [{"version": "Nightly"}], + } + ).insert() + jingrow.get_pg( + { + "pagetype": "Release Group", + "title": "Jingrow", + "version": "Nightly", + "team": "Administrator", + "apps": [{"app": app.name, "source": source.name}], + } + ).insert() diff --git a/jcloud/commands.py b/jcloud/commands.py new file mode 100644 index 0000000..8faebe4 --- /dev/null +++ b/jcloud/commands.py @@ -0,0 +1,51 @@ +from __future__ import absolute_import + +import click +import jingrow +from jingrow.commands import get_site, pass_context + + +@click.command("ngrok-webhook") +@pass_context +def start_ngrok_and_set_webhook(context): + from pyngrok import ngrok + + from jcloud.api.billing import get_stripe + + site = get_site(context) + jingrow.init(site=site) + jingrow.connect() + + # Set ngrok auth token + auth_token = jingrow.db.get_single_value("Jcloud Settings", "ngrok_auth_token") + + if auth_token: + ngrok.set_auth_token(auth_token) + + port = jingrow.conf.http_port or jingrow.conf.webserver_port + tunnel = ngrok.connect(port, host_header=site) + public_url = tunnel.public_url + print() + print(f"{public_url} -> http://{site}:{port}") + print(f"Inspect logs at {tunnel.api_url}") + + stripe = get_stripe() + url = f"{public_url}/api/method/jcloud.jcloud.pagetype.stripe_webhook_log.stripe_webhook_log.stripe_webhook_handler" + stripe.WebhookEndpoint.modify( + jingrow.db.get_single_value("Jcloud Settings", "stripe_webhook_endpoint_id"), url=url + ) + print("Updated Stripe Webhook Endpoint") + + ngrok_process = ngrok.get_ngrok_process() + try: + # Block until CTRL-C or some other terminating event + ngrok_process.proc.wait() + except KeyboardInterrupt: + print("Shutting down server...") + jingrow.destroy() + ngrok.kill() + + +commands = [ + start_ngrok_and_set_webhook, +] diff --git a/jcloud/config/__init__.py b/jcloud/config/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/config/desktop.py b/jcloud/config/desktop.py new file mode 100644 index 0000000..a4275c0 --- /dev/null +++ b/jcloud/config/desktop.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- + +from jingrow import _ + + +def get_data(): + return [ + { + "module_name": "Jcloud", + "category": "Modules", + "color": "grey", + "description": "Managed Jingrow Hosting", + "icon": "octicon octicon-rocket", + "type": "module", + "label": _("Jcloud"), + "reverse": 1, + } + ] diff --git a/jcloud/config/docs.py b/jcloud/config/docs.py new file mode 100644 index 0000000..e88b26c --- /dev/null +++ b/jcloud/config/docs.py @@ -0,0 +1,12 @@ +""" +Configuration for docs +""" + +# source_link = "https://github.com/[org_name]/jcloud" +# docs_base_url = "https://[org_name].github.io/jcloud" +# headline = "App that does everything" +# sub_heading = "Yes, you got that right the first time, everything" + + +def get_context(context): + context.brand_html = "Jcloud" diff --git a/jcloud/config/jcloud.py b/jcloud/config/jcloud.py new file mode 100755 index 0000000..2f9687f --- /dev/null +++ b/jcloud/config/jcloud.py @@ -0,0 +1,51 @@ +from jingrow import _ + + +def get_data(): + return [ + { + "label": _("Components"), + "items": [ + {"type": "pagetype", "name": "Proxy Server"}, + {"type": "pagetype", "name": "Server"}, + {"type": "pagetype", "name": "Bench"}, + {"type": "pagetype", "name": "Site"}, + {"type": "pagetype", "name": "Account Request"}, + ], + }, + { + "label": _("Agent"), + "items": [ + {"type": "pagetype", "name": "Agent Job"}, + {"type": "pagetype", "name": "Agent Job Step"}, + {"type": "pagetype", "name": "Agent Job Type"}, + ], + }, + { + "label": _("Setup"), + "items": [ + {"type": "pagetype", "name": "Site Plan"}, + {"type": "pagetype", "name": "App"}, + {"type": "pagetype", "name": "Release Group"}, + {"type": "pagetype", "name": "App Release"}, + {"type": "pagetype", "name": "Deploy Candidate"}, + ], + }, + {"label": _("Domains"), "items": [{"type": "pagetype", "name": "Custom Domain"}]}, + { + "label": _("Payments"), + "items": [ + {"type": "pagetype", "name": "Credit Ledger Entry"}, + {"type": "pagetype", "name": "Payment"}, + {"type": "pagetype", "name": "Payment Provision"}, + {"type": "pagetype", "name": "Usage Report"}, + ], + }, + { + "label": _("Deploy"), + "items": [ + {"type": "pagetype", "name": "Site Deploy"}, + {"type": "pagetype", "name": "Bench Deploy"}, + ], + }, + ] diff --git a/jcloud/docker/.vimrc b/jcloud/docker/.vimrc new file mode 100644 index 0000000..5a39bcf --- /dev/null +++ b/jcloud/docker/.vimrc @@ -0,0 +1,9 @@ +set noexpandtab +set shiftwidth=4 +set tabstop=4 +set number +set backspace=indent,eol,start +set complete-=i +set smarttab +set incsearch +set ruler diff --git a/jcloud/docker/Dockerfile b/jcloud/docker/Dockerfile new file mode 100644 index 0000000..ffde2a7 --- /dev/null +++ b/jcloud/docker/Dockerfile @@ -0,0 +1,261 @@ +# syntax = docker/dockerfile:experimental +FROM ubuntu:20.04 + +ENV LANG C.UTF-8 +ENV DEBIAN_FRONTEND noninteractive + +ENV OPENBLAS_NUM_THREADS 1 +ENV MKL_NUM_THREADS 1 + +# Install essential packages +RUN --mount=type=cache,target=/var/cache/apt apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + # Essentials + build-essential \ + git \ + mariadb-client \ + libmariadb-dev \ + pv \ + ntp \ + wget \ + curl \ + supervisor \ + file \ + # Dependencies for SSH access + openssh-server \ + nano \ + vim \ + less \ + htop \ + iputils-ping \ + telnet \ + # Dependencies for adding Python PPA + software-properties-common \ + gnupg \ + # weasyprint dependencies + libpango-1.0-0 \ + libharfbuzz0b \ + libpangoft2-1.0-0 \ + libpangocairo-1.0-0 \ + # wkhtmltopdf dependencies + ca-certificates \ + fontconfig \ + libfreetype6 \ + libjpeg-turbo8 \ + libpng16-16 \ + libx11-6 \ + libxcb1 \ + libxext6 \ + libxrender1 \ + xfonts-75dpi \ + xfonts-base \ + # pycups dependencies + gcc \ + libcups2-dev \ + # s3-attachment dependencies + libmagic1 \ + && rm -rf /var/lib/apt/lists/* \ + `#stage-pre-essentials` + +COPY --chown=root:root supervisord.conf /etc/supervisor/supervisord.conf + +# Install Redis from PPA +RUN --mount=type=cache,target=/var/cache/apt curl -fsSL https://packages.redis.io/gpg | gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg \ + && echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb focal main" | tee /etc/apt/sources.list.d/redis.list \ + && apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + redis-server \ + && rm -rf /var/lib/apt/lists/* `#stage-pre-redis` + +# Install Python from DeadSnakes PPA +ENV {{ pg.get_dependency_version("python", True) }} +RUN --mount=type=cache,target=/var/cache/apt add-apt-repository ppa:deadsnakes/ppa \ + && apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + python${PYTHON_VERSION} \ + python${PYTHON_VERSION}-dev \ + python${PYTHON_VERSION}-venv \ + python${PYTHON_VERSION}-distutils \ + && rm -rf /var/lib/apt/lists/* \ + `#stage-pre-python` + + +# Install wkhtmltopdf +ENV {{ pg.get_dependency_version("wkhtmltopdf", True) }} +{% if pg.get_dependency_version("wkhtmltopdf") == '0.12.6' %} +RUN wget https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_amd64.deb \ + && dpkg -i wkhtmltox_0.12.6-1.focal_amd64.deb \ + && rm wkhtmltox_0.12.6-1.focal_amd64.deb \ + `#stage-pre-wkhtmltopdf` +{% elif pg.get_dependency_version("wkhtmltopdf") == '0.12.5' %} +RUN wget https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox_0.12.5-1.focal_amd64.deb \ + && dpkg -i wkhtmltox_0.12.5-1.focal_amd64.deb \ + && rm wkhtmltox_0.12.5-1.focal_amd64.deb \ + `#stage-pre-wkhtmltopdf` +{% elif pg.get_dependency_version("wkhtmltopdf") == '0.12.4' %} +RUN wget https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.4/wkhtmltox-0.12.4_linux-generic-amd64.tar.xz \ + && tar -xvf wkhtmltox-0.12.4_linux-generic-amd64.tar.xz \ + && mv wkhtmltox/bin/wkhtmlto* /usr/local/bin/ \ + && rm -rf wkhtmltox-0.12.4_linux-generic-amd64.tar.xz wkhtmltox \ + `#stage-pre-wkhtmltopdf` +{% endif %} + +{% if pg.is_code_server_enabled %} +RUN curl -fsSL https://code-server.dev/install.sh | sh `#stage-pre-code-server` +{% endif %} + +# Install Fonts +RUN git clone --progress --depth 1 http://git.jingrow.com:3000/jingrow/fonts.git /tmp/fonts \ + && rm -rf /etc/fonts && mv /tmp/fonts/etc_fonts /etc/fonts \ + && rm -rf /usr/share/fonts && mv /tmp/fonts/usr_share_fonts /usr/share/fonts \ + && rm -rf /tmp/fonts \ + && fc-cache -fv \ + `#stage-pre-fonts` + + +# Set max_allowed_packet to 512 MB for mysqldump +RUN echo "[mysqldump]\nmax_allowed_packet = 512M" > /etc/mysql/conf.d/mysqldump.cnf + +# Add jingrow user +RUN useradd -ms /bin/bash jingrow + +# Install Additional Packages +{% for p in pg.additional_packages %} + +# Run before install scripts +{% if p.prerequisites %} +RUN --mount=type=cache,target=/var/cache/apt {{ p.prerequisites }} \ + `#stage-pre_before-{{ p.package }}` +{% endif %} + +# Install non Ubuntu packages +{% if p.package_manager not in ["apt-get", "apt"] %} +RUN {{ p.package_manager }} install {{ p.package }} \ + `#stage-pre-{{ p.package }}` + +# Install Ubuntu packages +{% else %} +RUN --mount=type=cache,target=/var/cache/apt apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends {{ p.package }} \ + && rm -rf /var/lib/apt/lists/* \ + `#stage-pre-{{ p.package }}` +{% endif %} + +# Run after install scripts +{% if p.after_install %} +RUN --mount=type=cache,target=/var/cache/apt {{ p.after_install }} \ + && rm -rf /var/lib/apt/lists/* \ + `#stage-pre_after-{{ p.package }}` +{% endif %} + +{% endfor %} + + +# symlink mysqldump to mariadb-dump +RUN ln -s /usr/bin/mysqldump /usr/bin/mariadb-dump + +# Switch to jingrow +USER jingrow +WORKDIR /home/jingrow + + +# Install Node using NVM +ENV NVM_DIR /home/jingrow/.nvm +ENV {{ pg.get_dependency_version("nvm", True) }} +ENV {{ pg.get_dependency_version("node", True) }} + +RUN wget https://raw.githubusercontent.com/nvm-sh/nvm/v${NVM_VERSION}/install.sh \ + && bash install.sh \ + && . "/home/jingrow/.nvm/nvm.sh" \ + && nvm install ${NODE_VERSION} \ + && nvm use v${NODE_VERSION} \ + && nvm alias default v${NODE_VERSION} \ + && rm install.sh \ + && nvm cache clear \ + `#stage-pre-node` + +ENV PATH "$PATH:/home/jingrow/.nvm/versions/node/v${NODE_VERSION}/bin" + +# Install Yarn +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 npm install -g yarn `#stage-pre-yarn` + + +# Install Bench +ENV PATH "$PATH:/home/jingrow/.local/bin" + +RUN wget https://bootstrap.pypa.io/get-pip.py && python${PYTHON_VERSION} get-pip.py `#stage-pre-pip` +ENV {{ pg.get_dependency_version("bench", True) }} +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 python${PYTHON_VERSION} -m pip install --upgrade http://npm.jingrow.com:105/jingrow-bench-5.23.0.tar.gz `#stage-bench-bench` + +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 python${PYTHON_VERSION} -m pip install Jinja2~=3.0.3 +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 python${PYTHON_VERSION} -m pip install --upgrade setuptools + +RUN git config --global advice.detachedHead false + +ENV PYTHONUNBUFFERED 1 + +# For the sake of completing the step +RUN `#stage-bench-env` + +# Set environment variables +{% for v in pg.environment_variables %} +ENV {{v.key}} {{ v.value }} +{% endfor %} + +# Install Jingrow app +RUN --mount=type=cache,sharing=locked,target=/home/jingrow/.cache,uid=1000,gid=1000 --mount=type=bind,source=apps/jingrow,target=/home/jingrow/context/apps/jingrow bench init --python /usr/bin/python${PYTHON_VERSION} --no-backups --jingrow-path file:///home/jingrow/context/apps/jingrow jingrow-bench `#stage-apps-jingrow` +WORKDIR /home/jingrow/jingrow-bench + +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 /home/jingrow/jingrow-bench/env/bin/pip install pycups==2.0.1 + +# Install Redisearch 2.0 from precompiled binaries +COPY --chown=jingrow:jingrow redis /home/jingrow/jingrow-bench/redis +COPY --chown=jingrow:jingrow .vimrc /home/jingrow/.vimrc +COPY --chown=jingrow:jingrow common_site_config.json /home/jingrow/jingrow-bench/sites/common_site_config.json + +# Install other apps +{% for app in pg.apps %} +{% if app.app != "jingrow" %} + +RUN --mount=type=cache,sharing=locked,target=/home/jingrow/.cache,uid=1000,gid=1000 \ + --mount=type=bind,source=apps/{{ app.app }},target=/home/jingrow/context/apps/{{ app.app }} \ + bench get-app file:///home/jingrow/context/apps/{{ app.app }} \ + {% if app.use_cached %} + # Bench get-app flags to use get-app cache + --cache-key {{ app.hash }} {% if pg.compress_app_cache %}--compress-artifacts{% endif %} \ + {% endif %} + `#stage-apps-{{ app.app }}` + +{% endif %} +{% endfor %} + +# Pull app updates from mounted repository +{% for app in pg.apps %} +{% if app.pullable_release %} +RUN --mount=type=bind,source=app_updates/{{ app.app }},target=/home/jingrow/context/app_updates/{{ app.app }} \ + cd /home/jingrow/jingrow-bench/apps/{{ app.app }} \ + && git reset --hard \ + && git remote add -f app_update /home/jingrow/context/app_updates/{{ app.app }} \ + && git fetch --depth 1 app_update {{ app.pullable_hash }} \ + && git checkout {{ app.pullable_hash }} \ + && git remote remove app_update \ + `#stage-pull-{{ app.app }}` +{% endif %} +{% endfor %} + + +COPY --chown=jingrow:jingrow config /home/jingrow/jingrow-bench/config +COPY --chown=jingrow:jingrow apps.txt /home/jingrow/jingrow-bench/sites/apps.txt + +# Create custom mounts +{% for m in pg.container_mounts %} +RUN mkdir -p {{ m.destination }} && \ + chown -R jingrow:jingrow {{ m.destination }} \ + `#stage-mounts-create` +{% endfor %} + +ENV JINGROW_HARD_LINK_ASSETS True +ENV HISTTIMEFORMAT "%Y-%m-%d %T " + +EXPOSE 8000 9000 2200 8088 +CMD ["supervisord"] diff --git a/jcloud/docker/Dockerfile_Bench_5_2_1 b/jcloud/docker/Dockerfile_Bench_5_2_1 new file mode 100644 index 0000000..7ec22bb --- /dev/null +++ b/jcloud/docker/Dockerfile_Bench_5_2_1 @@ -0,0 +1,176 @@ +# syntax = docker/dockerfile:experimental +FROM ubuntu:20.04 + +{% for d in pg.dependencies %} +ENV {{ d.dependency }} {{ d.version }} +{% else %} +ENV NVM_VERSION 0.36.0 +ENV NODE_VERSION 14.19.0 +ENV PYTHON_VERSION 3.7 +ENV WKHTMLTOPDF_VERSION 0.12.5 +ENV BENCH_VERSION 5.2.1 +{% endfor %} + +ENV LANG C.UTF-8 +ENV DEBIAN_FRONTEND noninteractive + +ENV OPENBLAS_NUM_THREADS 1 +ENV MKL_NUM_THREADS 1 + +# Install essential packages +RUN --mount=type=cache,target=/var/cache/apt apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + # Essentials + build-essential \ + git \ + mariadb-client \ + libmariadb-dev \ + pv \ + ntp \ + wget \ + curl \ + supervisor \ + # Dependencies for SSH access + openssh-server \ + nano \ + vim \ + less \ + htop \ + # Dependencies for adding Python PPA + software-properties-common \ + gnupg \ + # weasyprint dependencies + libpango-1.0-0 \ + libharfbuzz0b \ + libpangoft2-1.0-0 \ + libpangocairo-1.0-0 \ + # wkhtmltopdf dependencies + ca-certificates \ + fontconfig \ + libfreetype6 \ + libjpeg-turbo8 \ + libpng16-16 \ + libx11-6 \ + libxcb1 \ + libxext6 \ + libxrender1 \ + xfonts-75dpi \ + xfonts-base \ + # pycups dependencies + gcc \ + libcups2-dev \ + # s3-attachment dependencies + libmagic1 \ + && rm -rf /var/lib/apt/lists/* \ + `#stage-pre-essentials` + +COPY --chown=root:root supervisord.conf /etc/supervisor/supervisord.conf + +# Install Redis from PPA +RUN --mount=type=cache,target=/var/cache/apt add-apt-repository ppa:redislabs/redis \ + && apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + redis-server \ + && rm -rf /var/lib/apt/lists/* `#stage-pre-redis` + +# Install Python from DeadSnakes PPA +RUN --mount=type=cache,target=/var/cache/apt add-apt-repository ppa:deadsnakes/ppa \ + && apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + python${PYTHON_VERSION} \ + python${PYTHON_VERSION}-dev \ + python${PYTHON_VERSION}-distutils \ + && rm -rf /var/lib/apt/lists/* \ + `#stage-pre-python` + + +# Install wkhtmltopdf +RUN wget https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/${WKHTMLTOPDF_VERSION}/wkhtmltox_${WKHTMLTOPDF_VERSION}-1.focal_amd64.deb \ + && dpkg -i wkhtmltox_${WKHTMLTOPDF_VERSION}-1.focal_amd64.deb \ + && rm wkhtmltox_${WKHTMLTOPDF_VERSION}-1.focal_amd64.deb \ + `#stage-pre-wkhtmltopdf` + +RUN curl -fsSL https://code-server.dev/install.sh | sh `#stage-pre-code-server` + +# Install Fonts +RUN git clone --progress --depth 1 http://git.jingrow.com:3000/jingrow/fonts.git /tmp/fonts \ + && rm -rf /etc/fonts && mv /tmp/fonts/etc_fonts /etc/fonts \ + && rm -rf /usr/share/fonts && mv /tmp/fonts/usr_share_fonts /usr/share/fonts \ + && rm -rf /tmp/fonts \ + && fc-cache -fv \ + `#stage-pre-fonts` + + +# Set max_allowed_packet to 512 MB for mysqldump +RUN echo "[mysqldump]\nmax_allowed_packet = 512M" > /etc/mysql/conf.d/mysqldump.cnf + + +RUN useradd -ms /bin/bash jingrow +USER jingrow +WORKDIR /home/jingrow + + +# Install Node using NVM +ENV NVM_DIR /home/jingrow/.nvm +RUN wget https://raw.githubusercontent.com/nvm-sh/nvm/v${NVM_VERSION}/install.sh \ + && bash install.sh \ + && . "/home/jingrow/.nvm/nvm.sh" \ + && nvm install ${NODE_VERSION} \ + && nvm use v${NODE_VERSION} \ + && nvm alias default v${NODE_VERSION} \ + && rm install.sh \ + && nvm cache clear \ + `#stage-pre-node` + +ENV PATH "$PATH:/home/jingrow/.nvm/versions/node/v${NODE_VERSION}/bin" + +# Install Yarn +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 npm install -g yarn `#stage-pre-yarn` + + +# Install Bench +ENV PATH "$PATH:/home/jingrow/.local/bin" + +RUN wget https://bootstrap.pypa.io/get-pip.py && python${PYTHON_VERSION} get-pip.py `#stage-pre-pip` +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 pip3 install --upgrade jingrow-bench==${BENCH_VERSION} `#stage-bench-bench` + +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 pip3 install Jinja2~=3.0.3 +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 pip3 install --upgrade virtualenv setuptools +RUN mkdir /home/jingrow/jingrow-bench +WORKDIR /home/jingrow/jingrow-bench +RUN mkdir -p apps logs sites config/pids redis + +# Install Redisearch 2.0 from precompiled binaries +COPY --chown=jingrow:jingrow redis /home/jingrow/jingrow-bench/redis + +# Create virtual environment for bench +RUN virtualenv -p python${PYTHON_VERSION} env `#stage-bench-env` + +COPY --chown=jingrow:jingrow common_site_config.json /home/jingrow/jingrow-bench/sites/common_site_config.json +RUN git config --global advice.detachedHead false + +ENV PYTHONUNBUFFERED 1 + +# Install Jingrow app +RUN echo '["build"]' > .bench.cmd +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 --mount=type=bind,source=apps/jingrow,target=/home/jingrow/context/apps/jingrow bench get-app /home/jingrow/context/apps/jingrow `#stage-apps-jingrow` +RUN rm .bench.cmd + +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 /home/jingrow/jingrow-bench/env/bin/pip install pycups==2.0.1 + +# Install other apps +{% for app in pg.apps %} +{% if app.app != "jingrow" %} + +RUN --mount=type=cache,target=/home/jingrow/.cache,uid=1000,gid=1000 --mount=type=bind,source=apps/{{ app.app }},target=/home/jingrow/context/apps/{{ app.app }} bench get-app /home/jingrow/context/apps/{{ app.app }} `#stage-apps-{{ app.app }}` + +{% endif %} +{% endfor %} + +COPY --chown=jingrow:jingrow config /home/jingrow/jingrow-bench/config +COPY --chown=jingrow:jingrow apps.txt /home/jingrow/jingrow-bench/sites/apps.txt + +ENV JINGROW_HARD_LINK_ASSETS True + +EXPOSE 8000 8088 9000 2200 +CMD ["supervisord"] diff --git a/jcloud/docker/common_site_config.json b/jcloud/docker/common_site_config.json new file mode 100644 index 0000000..c21d077 --- /dev/null +++ b/jcloud/docker/common_site_config.json @@ -0,0 +1,9 @@ +{ + "monitor": true, + "redis_cache": "redis://localhost:13000", + "redis_queue": "redis://localhost:11000", + "redis_socketio": "redis://localhost:13000", + "shallow_clone": true, + "socketio_port": 9000, + "webserver_port": 8000 +} \ No newline at end of file diff --git a/jcloud/docker/config/redis-cache.conf b/jcloud/docker/config/redis-cache.conf new file mode 100644 index 0000000..860748e --- /dev/null +++ b/jcloud/docker/config/redis-cache.conf @@ -0,0 +1,13 @@ +dbfilename redis-cache.rdb +dir /home/jingrow/jingrow-bench/config/pids +pidfile /home/jingrow/jingrow-bench/config/pids/redis-cache.pid +bind 127.0.0.1 +port 13000 +maxmemory {{ pg.redis_cache_size }}mb +maxmemory-policy allkeys-lru +appendonly no +save "" + +{% if pg.is_redisearch_enabled %} +loadmodule /home/jingrow/jingrow-bench/redis/redisearch.so +{% endif %} diff --git a/jcloud/docker/config/redis-queue.conf b/jcloud/docker/config/redis-queue.conf new file mode 100644 index 0000000..92a1846 --- /dev/null +++ b/jcloud/docker/config/redis-queue.conf @@ -0,0 +1,10 @@ +dbfilename redis-queue.rdb +dir /home/jingrow/jingrow-bench/config/pids +pidfile /home/jingrow/jingrow-bench/config/pids/redis-queue.pid +bind 0.0.0.0 +port 11000 +protected-mode no + +{% if pg.is_redisearch_enabled %} +loadmodule /home/jingrow/jingrow-bench/redis/redisearch.so +{% endif %} diff --git a/jcloud/docker/config/ssh/sshd_config b/jcloud/docker/config/ssh/sshd_config new file mode 100644 index 0000000..14a5dff --- /dev/null +++ b/jcloud/docker/config/ssh/sshd_config @@ -0,0 +1,67 @@ +ListenAddress 0.0.0.0 +PidFile /home/jingrow/jingrow-bench/config/ssh/sshd.pid +Port 2200 + + +# Logging +LogLevel VERBOSE +SyslogFacility AUTH + + +# Authentication +PermitRootLogin no +StrictModes yes + +AuthenticationMethods publickey +PubkeyAuthentication yes + + +# Disable Other Authentication Methods +ChallengeResponseAuthentication no +GSSAPIAuthentication no +HostbasedAuthentication no +KbdInteractiveAuthentication no +KerberosAuthentication no +PasswordAuthentication no +PermitEmptyPasswords no +UsePAM no + + +# Certificates +AuthorizedKeysFile none +TrustedUserCAKeys /home/jingrow/jingrow-bench/config/ssh/ca.pub +AuthorizedPrincipalsFile /home/jingrow/jingrow-bench/config/ssh/principals + +HostKey /home/jingrow/jingrow-bench/config/ssh/ssh_host_rsa_key +HostCertificate /home/jingrow/jingrow-bench/config/ssh/ssh_host_rsa_key-cert.pub + + +# Capability Limits +AllowAgentForwarding no +AllowStreamLocalForwarding no +AllowTcpForwarding no + +GatewayPorts no + +PermitListen none +PermitOpen none + +PermitTunnel no +PermitUserEnvironment no +PermitUserRC no + +PrintMotd no + +X11Forwarding no +X11UseLocalhost yes + + +# Interactive Terminal +PermitTTY yes + + +# Rate Limit +LoginGraceTime 20 +MaxAuthTries 3 +MaxSessions 10 +MaxStartups 10:30:100 diff --git a/jcloud/docker/config/supervisor.conf b/jcloud/docker/config/supervisor.conf new file mode 100644 index 0000000..ac5e52b --- /dev/null +++ b/jcloud/docker/config/supervisor.conf @@ -0,0 +1,239 @@ +{% if pg.environment_variables %} + +[supervisord] +environment={% for v in pg.environment_variables %}{{v.key}}="{{v.value}}",{% endfor %} +{% endif %} + +[program:jingrow-bench-jingrow-web] +command=/home/jingrow/jingrow-bench/env/bin/gunicorn --bind 0.0.0.0:8000 --workers 2 --timeout 120 --graceful-timeout 30 --worker-tmp-dir /dev/shm jingrow.app:application --preload --max-requests 5000 --max-requests-jitter 1000 + +environment=FORWARDED_ALLOW_IPS="*" +priority=4 +autostart=true +autorestart=true +stopwaitsecs=40 +killasgroup=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/web.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/web.error.log +user=jingrow +directory=/home/jingrow/jingrow-bench/sites + +[program:jingrow-bench-jingrow-schedule] +command=bench schedule +priority=9 +startsecs=0 +autostart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/schedule.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/schedule.error.log +user=jingrow +directory=/home/jingrow/jingrow-bench + +{% if pg.use_rq_workerpool and pg.merge_all_rq_queues %} + +[program:jingrow-bench-jingrow-worker] +command=bench worker-pool --queue short,default,long +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/worker.error.log +user=jingrow +stopwaitsecs=1560 +directory=/home/jingrow/jingrow-bench +killasgroup=true +process_name=%(program_name)s + +{% elif pg.use_rq_workerpool %} + +[program:jingrow-bench-jingrow-short-worker] +command=bench worker-pool --queue short,default +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/worker.error.log +user=jingrow +stopwaitsecs=360 +directory=/home/jingrow/jingrow-bench +killasgroup=true +process_name=%(program_name)s + +[program:jingrow-bench-jingrow-long-worker] +command=bench worker-pool --queue long,default,short +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/worker.error.log +user=jingrow +stopwaitsecs=1560 +directory=/home/jingrow/jingrow-bench +killasgroup=true +process_name=%(program_name)s + +{% elif pg.merge_all_rq_queues %} + +[program:jingrow-bench-jingrow-worker] +command=bench worker --queue short,default,long +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/worker.error.log +user=jingrow +stopwaitsecs=1560 +directory=/home/jingrow/jingrow-bench +killasgroup=true +numprocs=1 +process_name=%(program_name)s-%(process_num)d + +{% elif pg.merge_default_and_short_rq_queues %} + +[program:jingrow-bench-jingrow-short-worker] +command=bench worker --queue short,default +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/worker.error.log +user=jingrow +stopwaitsecs=360 +directory=/home/jingrow/jingrow-bench +killasgroup=true +numprocs=1 +process_name=%(program_name)s-%(process_num)d + +[program:jingrow-bench-jingrow-long-worker] +command=bench worker --queue long,default,short +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/worker.error.log +user=jingrow +stopwaitsecs=1560 +directory=/home/jingrow/jingrow-bench +killasgroup=true +numprocs=1 +process_name=%(program_name)s-%(process_num)d + +{% else %} + +[program:jingrow-bench-jingrow-default-worker] +command=bench worker --queue default +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/worker.error.log +user=jingrow +stopwaitsecs=1560 +directory=/home/jingrow/jingrow-bench +killasgroup=true +numprocs=1 +process_name=%(program_name)s-%(process_num)d + +[program:jingrow-bench-jingrow-short-worker] +command=bench worker --queue short +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/worker.error.log +user=jingrow +stopwaitsecs=360 +directory=/home/jingrow/jingrow-bench +killasgroup=true +numprocs=1 +process_name=%(program_name)s-%(process_num)d + +[program:jingrow-bench-jingrow-long-worker] +command=bench worker --queue long +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/worker.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/worker.error.log +user=jingrow +stopwaitsecs=1560 +directory=/home/jingrow/jingrow-bench +killasgroup=true +numprocs=1 +process_name=%(program_name)s-%(process_num)d + +{% endif %} + +[program:jingrow-bench-redis-cache] +command=redis-server /home/jingrow/jingrow-bench/config/redis-cache.conf +priority=1 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/redis-cache.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/redis-cache.error.log +user=jingrow +directory=/home/jingrow/jingrow-bench/sites + +[program:jingrow-bench-redis-queue] +command=redis-server /home/jingrow/jingrow-bench/config/redis-queue.conf +priority=1 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/redis-queue.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/redis-queue.error.log +user=jingrow +directory=/home/jingrow/jingrow-bench/sites + +[program:jingrow-bench-node-socketio] +command=node /home/jingrow/jingrow-bench/apps/jingrow/socketio.js +priority=4 +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/node-socketio.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/node-socketio.error.log +user=jingrow +directory=/home/jingrow/jingrow-bench + +[group:jingrow-bench-web] +programs=jingrow-bench-jingrow-web,jingrow-bench-node-socketio + +{% if pg.merge_all_rq_queues %} + +[group:jingrow-bench-workers] +programs=jingrow-bench-jingrow-schedule,jingrow-bench-jingrow-worker + +{% elif pg.merge_default_and_short_rq_queues or pg.use_rq_workerpool %} + +[group:jingrow-bench-workers] +programs=jingrow-bench-jingrow-schedule,jingrow-bench-jingrow-short-worker,jingrow-bench-jingrow-long-worker + +{% else %} + +[group:jingrow-bench-workers] +programs=jingrow-bench-jingrow-schedule,jingrow-bench-jingrow-default-worker,jingrow-bench-jingrow-short-worker,jingrow-bench-jingrow-long-worker + +{% endif %} + +[group:jingrow-bench-redis] +programs=jingrow-bench-redis-cache,jingrow-bench-redis-queue + +{% if pg.is_code_server_enabled %} +[program:code-server] +command=code-server --bind-addr 0.0.0.0:8088 . +autostart=false +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/code-server.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/code-server.error.log +user=jingrow +directory=/home/jingrow/jingrow-bench/apps +{% endif %} + +{% if pg.is_ssh_enabled %} +[program:sshd] +command=/usr/sbin/sshd -f /home/jingrow/jingrow-bench/config/ssh/sshd_config -D -e +autostart=true +autorestart=true +stdout_logfile=/home/jingrow/jingrow-bench/logs/ssh.log +stderr_logfile=/home/jingrow/jingrow-bench/logs/ssh.error.log +user=jingrow +directory=/home/jingrow +{% endif %} diff --git a/jcloud/docker/docker-compose.yml b/jcloud/docker/docker-compose.yml new file mode 100644 index 0000000..5c45ca6 --- /dev/null +++ b/jcloud/docker/docker-compose.yml @@ -0,0 +1,114 @@ +version: "3.8" +services: + # TODO: Remove NGINX service in production + nginx: + image: nginx:1.19 + ports: + - "18080:80" + - "80:80" + volumes: + - nginx:/etc/nginx/conf.d:ro + - sites:/etc/nginx/sites:ro + - assets:/etc/nginx/sites/assets:ro + depends_on: + - web + web: + image: backbone:latest + command: + [ + "/home/jingrow/jingrow-bench/env/bin/gunicorn", + "--bind", + "0.0.0.0:8000", + "--timeout", + "120", + "--workers", + "4", + "--worker-tmp-dir", + "/dev/shm", + "--preload", + "jingrow.app:application", + "--log-level", + "DEBUG", + ] + working_dir: /home/jingrow/jingrow-bench/sites + # ports: + # - "18000:8000" + volumes: + # - nginx:/home/jingrow/jingrow-bench/nginx:rw + - logs:/home/jingrow/jingrow-bench/logs:rw + - sites:/home/jingrow/jingrow-bench/sites:rw + - assets:/home/jingrow/jingrow-bench/sites/assets:ro + depends_on: + - redis-cache + - redis-queue + - redis-socketio + socketio: + image: backbone:latest + command: ["node", "/home/jingrow/jingrow-bench/apps/jingrow/socketio.js"] + working_dir: /home/jingrow/jingrow-bench + # ports: + # - "19000:9000" + volumes: + - logs:/home/jingrow/jingrow-bench/logs:rw + - sites:/home/jingrow/jingrow-bench/sites:ro + depends_on: + - redis-socketio + - web + worker_default: + image: backbone:latest + command: ["bench", "worker", "--queue", "default"] + volumes: + - sites:/home/jingrow/jingrow-bench/sites:rw + - logs:/home/jingrow/jingrow-bench/logs:rw + depends_on: + - redis-cache + - redis-queue + - redis-socketio + worker_long: + image: backbone:latest + command: ["bench", "worker", "--queue", "long"] + volumes: + - sites:/home/jingrow/jingrow-bench/sites:rw + - logs:/home/jingrow/jingrow-bench/logs:rw + depends_on: + - redis-cache + - redis-queue + - redis-socketio + worker_short: + image: backbone:latest + command: ["bench", "worker", "--queue", "short"] + volumes: + - sites:/home/jingrow/jingrow-bench/sites:rw + - logs:/home/jingrow/jingrow-bench/logs:rw + depends_on: + - redis-cache + - redis-queue + - redis-socketio + redis-cache: + image: redis:6.0 + redis-queue: + image: redis:6.0 + redis-socketio: + image: redis:6.0 + +volumes: + nginx: + driver_opts: + type: none + o: bind + device: /home/aditya/Jingrow/benches/jcloud/apps/jcloud/mason/run/nginx + logs: + driver_opts: + type: none + o: bind + device: /home/aditya/Jingrow/benches/jcloud/apps/jcloud/mason/run/logs + sites: + driver_opts: + type: none + o: bind + device: /home/aditya/Jingrow/benches/jcloud/apps/jcloud/mason/run/sites + assets: + driver_opts: + type: none + o: bind + device: /home/aditya/Jingrow/benches/jcloud/apps/jcloud/mason/run/assets diff --git a/jcloud/docker/registry.conf b/jcloud/docker/registry.conf new file mode 100644 index 0000000..8a3479a --- /dev/null +++ b/jcloud/docker/registry.conf @@ -0,0 +1,79 @@ +upstream docker-registry { + server 127.0.0.1:5000; +} + +upstream docker-registry-ui { + server 127.0.0.1:6000; +} + +## Set a variable to help us decide if we need to add the +## 'Docker-Distribution-Api-Version' header. +## The registry always sets this header. +## In the case of nginx performing auth, the header is unset +## since nginx is auth-ing before proxying. +map $upstream_http_docker_distribution_api_version $docker_distribution_api_version { + '' 'registry/2.0'; +} + +server { + listen 443 ssl; + server_name registry.jingrow.cloud; + + # SSL + ssl_certificate /etc/letsencrypt/live/registry.jingrow.cloud/fullchain.pem; + ssl_certificate_key /etc/letsencrypt/live/registry.jingrow.cloud/privkey.pem; + + # Recommendations from https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html + ssl_protocols TLSv1.1 TLSv1.2; + ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; + ssl_prefer_server_ciphers on; + ssl_session_cache shared:SSL:10m; + + # disable any limits to avoid HTTP 413 for large image uploads + client_max_body_size 0; + + # required to avoid HTTP 411: see Issue #1486 (https://github.com/moby/moby/issues/1486) + chunked_transfer_encoding on; + + location /v2/ { + # Do not allow connections from docker 1.5 and earlier + # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents + if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { + return 404; + } + + # To add basic authentication to v2 use auth_basic setting. + auth_basic "Registry realm"; + auth_basic_user_file /etc/nginx/conf.d/registry.htpasswd; + + ## If $docker_distribution_api_version is empty, the header is not added. + ## See the map directive above where this variable is defined. + add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always; + + add_header Access-Control-Allow-Origin '*'; + add_header Access-Control-Allow-Credentials 'true'; + add_header Access-Control-Allow-Headers 'Authorization, Accept'; + add_header Access-Control-Allow-Methods 'HEAD, GET, OPTIONS'; + + proxy_pass http://docker-registry; + proxy_set_header Host $http_host; # required for docker client's sake + proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_read_timeout 900; + } + + location / { + # To add basic authentication to v2 use auth_basic setting. + auth_basic "Registry realm"; + auth_basic_user_file /home/aditya/Jingrow/benches/jcloud/registry/registry.htpasswd; + + proxy_set_header Host $http_host; # required for docker client's sake + proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto "http"; + proxy_read_timeout 900; + + proxy_pass http://docker-registry-ui; + } +} diff --git a/jcloud/docker/registry.md b/jcloud/docker/registry.md new file mode 100644 index 0000000..9818bc7 --- /dev/null +++ b/jcloud/docker/registry.md @@ -0,0 +1,18 @@ +1. Start registry with + + ```shell + docker run -d -p 5000:5000 --restart=always --name registry registry:2 + ``` + +1. Create `.htpasswd` file with + + ``` + htpasswd -Bbn user password > registry.htpasswd + ``` + +1. Start Registry UI with + ``` + docker run -d --name registry-ui -p 6000:80 -e REGISTRY_URL=https://registry.jingrow.cloud -e DELETE_IMAGES=true -e REGISTRY_TITLE="My registry" joxit/docker-registry-ui:static + ``` + +1. Use registry.conf as NGINX config \ No newline at end of file diff --git a/jcloud/docker/ssh_proxy/Dockerfile b/jcloud/docker/ssh_proxy/Dockerfile new file mode 100644 index 0000000..534077e --- /dev/null +++ b/jcloud/docker/ssh_proxy/Dockerfile @@ -0,0 +1,32 @@ +# syntax = docker/dockerfile:experimental +FROM ubuntu:20.04 + +ENV LANG C.UTF-8 +ENV DEBIAN_FRONTEND noninteractive + +# Install essential packages +RUN --mount=type=cache,target=/var/cache/apt apt-get update \ + && apt-get install --yes --no-install-suggests --no-install-recommends \ + openssh-server \ + && rm -rf /var/lib/apt/lists/* \ + `#stage-pre-essentials` + +# Remove existing host keys +RUN rm /etc/ssh/ssh_host_* + +COPY sshd_config /etc/ssh/sshd_config +COPY known_hosts /etc/ssh/ssh_known_hosts +COPY ca.pub /etc/ssh/ca.pub + +COPY ssh_host_rsa_key /etc/ssh/ssh_host_rsa_key +COPY ssh_host_rsa_key.pub /etc/ssh/ssh_host_rsa_key.pub +COPY ssh_host_rsa_key-cert.pub /etc/ssh/ssh_host_rsa_key-cert.pub + +RUN mkdir /run/sshd +RUN mkdir /etc/ssh/principals + +RUN useradd -m -p '*' jingrow + +EXPOSE 22 +VOLUME ["/home", "/etc"] +CMD ["/usr/sbin/sshd", "-D", "-e"] diff --git a/jcloud/docker/ssh_proxy/sshd_config b/jcloud/docker/ssh_proxy/sshd_config new file mode 100644 index 0000000..9f25dbe --- /dev/null +++ b/jcloud/docker/ssh_proxy/sshd_config @@ -0,0 +1,62 @@ +# Logging +LogLevel VERBOSE +SyslogFacility AUTH + + +# Authentication +PermitRootLogin no +StrictModes yes + +AuthenticationMethods publickey +PubkeyAuthentication yes + + +# Disable Other Authentication Methods +ChallengeResponseAuthentication no +GSSAPIAuthentication no +HostbasedAuthentication no +KbdInteractiveAuthentication no +KerberosAuthentication no +PasswordAuthentication no +PermitEmptyPasswords no +UsePAM no + + +# Certificates +AuthorizedKeysFile none +TrustedUserCAKeys /etc/ssh/ca.pub +AuthorizedPrincipalsFile /etc/ssh/principals/%u + +HostKey /etc/ssh/ssh_host_rsa_key +HostCertificate /etc/ssh/ssh_host_rsa_key-cert.pub + + +# Capability Limits +AllowAgentForwarding no +AllowStreamLocalForwarding no +AllowTcpForwarding no + +GatewayPorts no + +PermitListen none +PermitOpen none + +PermitTunnel no +PermitUserEnvironment no +PermitUserRC no + +PrintMotd no + +X11Forwarding no +X11UseLocalhost yes + + +# Interactive Terminal +PermitTTY yes + + +# Rate Limit +LoginGraceTime 20 +MaxAuthTries 3 +MaxSessions 10 +MaxStartups 10:30:100 diff --git a/jcloud/docker/supervisord.conf b/jcloud/docker/supervisord.conf new file mode 100644 index 0000000..934f1b6 --- /dev/null +++ b/jcloud/docker/supervisord.conf @@ -0,0 +1,21 @@ +[unix_http_server] +file=/tmp/supervisor.sock + +[supervisord] +logfile=/tmp/supervisord.log +logfile_maxbytes=50MB +logfile_backups=10 +loglevel=info +pidfile=/tmp/supervisord.pid +nodaemon=true +minfds=1024 +minprocs=200 + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[supervisorctl] +serverurl=unix:///tmp/supervisor.sock + +[include] +files = /home/jingrow/jingrow-bench/config/supervisor.conf diff --git a/jcloud/exceptions.py b/jcloud/exceptions.py new file mode 100644 index 0000000..60a8ddc --- /dev/null +++ b/jcloud/exceptions.py @@ -0,0 +1,69 @@ +from jingrow.exceptions import ValidationError + + +class CentralServerNotSet(ValidationError): + pass + + +class JingrowioServerNotSet(ValidationError): + pass + + +class CannotChangePlan(ValidationError): + pass + + +class OngoingAgentJob(ValidationError): + pass + + +class MissingAppsInBench(ValidationError): + pass + + +class InsufficientSpaceOnServer(ValidationError): + pass + + +class VolumeResizeLimitError(ValidationError): + pass + + +class AAAARecordExists(ValidationError): + pass + + +class ConflictingCAARecord(ValidationError): + pass + + +class ConflictingDNSRecord(ValidationError): + pass + + +class MultipleARecords(ValidationError): + pass + + +class MultipleCNAMERecords(ValidationError): + pass + + +class TeamHeaderNotInRequestError(ValidationError): + pass + + +class AlertRuleNotEnabled(ValidationError): + pass + + +class SiteUnderMaintenance(ValidationError): + pass + + +class SiteAlreadyArchived(ValidationError): + pass + + +class InactiveDomains(ValidationError): + pass diff --git a/jcloud/experimental/__init__.py b/jcloud/experimental/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/experimental/pagetype/__init__.py b/jcloud/experimental/pagetype/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/experimental/pagetype/referral_bonus/__init__.py b/jcloud/experimental/pagetype/referral_bonus/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/experimental/pagetype/referral_bonus/referral_bonus.js b/jcloud/experimental/pagetype/referral_bonus/referral_bonus.js new file mode 100644 index 0000000..682e82a --- /dev/null +++ b/jcloud/experimental/pagetype/referral_bonus/referral_bonus.js @@ -0,0 +1,20 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Referral Bonus', { + refresh: function (frm) { + if (!frm.pg.credits_allocated) { + let btn = frm.add_custom_button('Allocate Credits', () => { + frm + .call({ + pg: frm.pg, + method: 'allocate_credits', + btn, + }) + .then(() => { + frm.refresh(); + }); + }); + } + }, +}); diff --git a/jcloud/experimental/pagetype/referral_bonus/referral_bonus.json b/jcloud/experimental/pagetype/referral_bonus/referral_bonus.json new file mode 100644 index 0000000..4dbe3b1 --- /dev/null +++ b/jcloud/experimental/pagetype/referral_bonus/referral_bonus.json @@ -0,0 +1,65 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2021-11-10 12:24:19.654513", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "for_team", + "referred_by", + "credits_allocated" + ], + "fields": [ + { + "fieldname": "for_team", + "fieldtype": "Link", + "label": "For Team", + "options": "Team", + "read_only": 1 + }, + { + "fieldname": "referred_by", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Referred By", + "options": "Team", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "credits_allocated", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Credits Allocated", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2021-11-11 00:10:21.936375", + "modified_by": "Administrator", + "module": "Experimental", + "name": "Referral Bonus", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "referred_by", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/experimental/pagetype/referral_bonus/referral_bonus.py b/jcloud/experimental/pagetype/referral_bonus/referral_bonus.py new file mode 100644 index 0000000..984844f --- /dev/null +++ b/jcloud/experimental/pagetype/referral_bonus/referral_bonus.py @@ -0,0 +1,65 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + + +class ReferralBonus(Document): + @jingrow.whitelist() + def allocate_credits(self): + # Credits have already been allocated + if self.credits_allocated: + return + + # Team hasn't spent 25$/1800CNY money yet + if not team_has_spent(self.for_team): + self.add_comment( + text="Cannot credit referral bonus. The team hasn't spent 25$/1800CNY yet." + ) + return + + team = jingrow.get_pg("Team", self.referred_by) + credits_field = "free_credits_cny" if team.currency == "CNY" else "free_credits_usd" + credit_amount = jingrow.db.get_single_value("Jcloud Settings", credits_field) + if not credit_amount: + return + + team.allocate_credit_amount(credit_amount, source="Referral Bonus") + + self.credits_allocated = True + self.save() + self.reload() + + +# TODO: Remove hardcoded values and add fields in Jcloud Settings +def team_has_spent(team, usd_amount=25.0, cny_amount=1800.0): + """Has the team spent atleast the given amount yet (on stripe)""" + team_currency = jingrow.db.get_value("Team", team, "currency") + total_paid = sum( + jingrow.db.get_all( + "Invoice", + filters={"team": team, "status": "Paid", "transaction_amount": (">", 0)}, + pluck="transaction_amount", + ) + ) + + if team_currency == "CNY": + return total_paid >= cny_amount + + return total_paid >= usd_amount + + +def credit_referral_bonuses(): + unallocated_referral_bonuses = jingrow.get_all( + "Referral Bonus", + filters={"credits_allocated": False}, + fields=["name", "for_team", "referred_by"], + ) + + for rb in unallocated_referral_bonuses: + if team_has_spent(rb.for_team): + try: + jingrow.get_pg("Referral Bonus", rb.name).allocate_credits() + except Exception: + pass diff --git a/jcloud/experimental/pagetype/referral_bonus/test_referral_bonus.py b/jcloud/experimental/pagetype/referral_bonus/test_referral_bonus.py new file mode 100644 index 0000000..6c29452 --- /dev/null +++ b/jcloud/experimental/pagetype/referral_bonus/test_referral_bonus.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestReferralBonus(unittest.TestCase): + pass diff --git a/jcloud/fixtures/agent_job_type.json b/jcloud/fixtures/agent_job_type.json new file mode 100644 index 0000000..537fcf5 --- /dev/null +++ b/jcloud/fixtures/agent_job_type.json @@ -0,0 +1,1784 @@ +[ + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2024-08-20 12:32:56.100866", + "name": "Update In Place", + "request_method": "POST", + "request_path": "/bench/{bench}/update_inplace", + "steps": [ + { + "step_name": "Pull App Changes" + }, + { + "step_name": "Migrate Sites" + }, + { + "step_name": "Rebuild Bench Assets" + }, + { + "step_name": "Commit Container Changes" + }, + { + "step_name": "Bench Restart" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2025-02-13 13:25:40.384742", + "name": "Add Domain to Upstream", + "request_method": "POST", + "request_path": "/proxy/upstreams/{upstream}/domains", + "steps": [ + { + "step_name": "Add Site File to Upstream Directory" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.228754", + "name": "Rename Upstream", + "request_method": "POST", + "request_path": "/proxy/upstreams/{ip}/rename", + "steps": [ + { + "step_name": "Rename Upstream Directory" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2024-08-09 10:07:51.319214", + "name": "Rename Site", + "request_method": "POST", + "request_path": "benches/{site.bench}/sites/{site.name}/rename", + "steps": [ + { + "step_name": "Enable Maintenance Mode" + }, + { + "step_name": "Wait for Enqueued Jobs" + }, + { + "step_name": "Update Site Configuration" + }, + { + "step_name": "Rename Site" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Reload NGINX" + }, + { + "step_name": "Disable Maintenance Mode" + }, + { + "step_name": "Enable Scheduler" + }, + { + "step_name": "Create User" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.549528", + "name": "Recover Failed Site Migration", + "request_method": null, + "request_path": null, + "steps": [ + { + "step_name": "Move Site" + }, + { + "step_name": "Restore Touched Tables" + }, + { + "step_name": "Disable Maintenance Mode" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.927111", + "name": "Add Domain", + "request_method": "POST", + "request_path": "benches/{bench}/sites/{site}/domains", + "steps": [ + { + "step_name": "Update Site Configuration" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.946695", + "name": "Remove Domain", + "request_method": "DELETE", + "request_path": "benches/{bench}/sites/{site}/domains/{domain}", + "steps": [ + { + "step_name": "Update Site Configuration" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.762138", + "name": "Update Site Pull", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/update/pull", + "steps": [ + { + "step_name": "Enable Maintenance Mode" + }, + { + "step_name": "Wait for Enqueued Jobs" + }, + { + "step_name": "Move Site" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Bench Setup NGINX Target" + }, + { + "step_name": "Reload NGINX" + }, + { + "step_name": "Disable Maintenance Mode" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.585210", + "name": "Update Site Migrate", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/update/migrate", + "steps": [ + { + "step_name": "Enable Maintenance Mode" + }, + { + "step_name": "Wait for Enqueued Jobs" + }, + { + "step_name": "Clear Backup Directory" + }, + { + "step_name": "Backup Site Tables" + }, + { + "step_name": "Move Site" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Bench Setup NGINX Target" + }, + { + "step_name": "Reload NGINX" + }, + { + "step_name": "Run App Specific Scripts" + }, + { + "step_name": "Migrate Site" + }, + { + "step_name": "Log Touched Tables" + }, + { + "step_name": "Disable Maintenance Mode" + }, + { + "step_name": "Build Search Index" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.646447", + "name": "Add Host to Proxy", + "request_method": "POST", + "request_path": "/proxy/hosts", + "steps": [ + { + "step_name": "Add Host to Proxy" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.671517", + "name": "Add Code Server to Upstream", + "request_method": "POST", + "request_path": "/proxy/upstreams/{upstream}/sites", + "steps": [ + { + "step_name": "Add Site File to Upstream Directory" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.651948", + "name": "Remove Code Server from Upstream", + "request_method": "POST", + "request_path": "/proxy/upstreams/{upstream}/sites", + "steps": [ + { + "step_name": "Remove Site File from Upstream Directory" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.724970", + "name": "Setup Code Server", + "request_method": "POST", + "request_path": "benches/{bench}/codeserver", + "steps": [ + { + "step_name": "Create Code Server Config" + }, + { + "step_name": "Start Code Server" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.707766", + "name": "Start Code Server", + "request_method": "POST", + "request_path": "benches/{bench}/codeserver/start", + "steps": [ + { + "step_name": "Start Code Server" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.743617", + "name": "Stop Code Server", + "request_method": "POST", + "request_path": "benches/{bench}/codeserver/stop", + "steps": [ + { + "step_name": "Stop Code Server" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.690936", + "name": "Archive Code Server", + "request_method": "POST", + "request_path": "benches/{bench}/codeserver/archive", + "steps": [ + { + "step_name": "Remove Code Server" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.607088", + "name": "Add Upstream to Proxy", + "request_method": "POST", + "request_path": "/proxy/upstreams", + "steps": [ + { + "step_name": "Add Upstream Directory" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.677444", + "name": "Install App on Site", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/apps", + "steps": [ + { + "step_name": "Install App on Site" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.839292", + "name": "New Site from Backup", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/restore", + "steps": [ + { + "step_name": "Download Backup Files" + }, + { + "step_name": "New Site" + }, + { + "step_name": "Update Site Configuration" + }, + { + "step_name": "Restore Site" + }, + { + "step_name": "Delete Downloaded Backup Files" + }, + { + "step_name": "Uninstall Unavailable Apps" + }, + { + "step_name": "Migrate Site" + }, + { + "step_name": "Set Administrator Password" + }, + { + "step_name": "Enable Scheduler" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.690831", + "name": "Reinstall Site", + "request_method": "POST", + "request_path": "benches/{bench}/sites/{site}/reinstall", + "steps": [ + { + "step_name": "Reinstall Site" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.706871", + "name": "Update Site Status", + "request_method": "POST", + "request_path": "/proxy/upstreams/{upstream}/sites/{site}/status", + "steps": [ + { + "step_name": "Update Site File" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.740351", + "name": "Uninstall App from Site", + "request_method": "DELETE", + "request_path": "/benches/{bench}/sites/{site}/apps", + "steps": [ + { + "step_name": "Uninstall App from Site" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.805097", + "name": "Recover Failed Site Pull", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/update/pull/recover", + "steps": [ + { + "step_name": "Move Site" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Bench Setup NGINX Target" + }, + { + "step_name": "Reload NGINX" + }, + { + "step_name": "Disable Maintenance Mode" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.786058", + "name": "Recover Failed Site Update", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/update/recover", + "steps": [ + { + "step_name": "Disable Maintenance Mode" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.625189", + "name": "Recover Failed Site Migrate", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/update/migrate/recover", + "steps": [ + { + "step_name": "Move Site" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Bench Setup NGINX Target" + }, + { + "step_name": "Reload NGINX" + }, + { + "step_name": "Restore Touched Tables" + }, + { + "step_name": "Run App Specific Scripts" + }, + { + "step_name": "Disable Maintenance Mode" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.893005", + "name": "Remove Host from Proxy", + "request_method": "DELETE", + "request_path": "/proxy/hosts/{host}", + "steps": [ + { + "step_name": "Remove Host from Proxy" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.911153", + "name": "Migrate Site", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/migrate", + "steps": [ + { + "step_name": "Migrate Site" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.963183", + "name": "Fetch Sites Info", + "request_method": "POST", + "request_path": "/benches/{bench}/info", + "steps": [ + { + "step_name": "Fetch Sites Info" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.000340", + "name": "Setup Redirects on Hosts", + "request_method": "POST", + "request_path": "/proxy/hosts/redirects", + "steps": [ + { + "step_name": "Remove Redirect on Host" + }, + { + "step_name": "Setup Redirect on Host" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.979580", + "name": "Remove Redirects on Hosts", + "request_method": "DELETE", + "request_path": "/proxy/hosts/redirects", + "steps": [ + { + "step_name": "Remove Redirect on Host" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.100535", + "name": "Rename Site on Upstream", + "request_method": "POST", + "request_path": "/proxy/upstreams/{upstream}/sites/{site}/rename", + "steps": [ + { + "step_name": "Rename Site File in Upstream Directory" + }, + { + "step_name": "Rename Host Directory" + }, + { + "step_name": "Rename Site in Host Directory" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.131920", + "name": "Add Wildcard Hosts to Proxy", + "request_method": "POST", + "request_path": "/proxy/wildcards", + "steps": [ + { + "step_name": "Add Wildcard Hosts to Proxy" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.180358", + "name": "Setup JERP", + "request_method": "POST", + "request_path": "/benches/{site.bench}/sites/{site.name}/jerp", + "steps": [ + { + "step_name": "Create User" + }, + { + "step_name": "Update JERP Configuration" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.157390", + "name": "Clear Cache", + "request_method": "DELETE", + "request_path": "/benches/{bench}/sites/{site}/cache", + "steps": [ + { + "step_name": "Clear Cache" + }, + { + "step_name": "Clear Website Cache" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.203468", + "name": "Restore Site Tables", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/update/migrate/restore", + "steps": [ + { + "step_name": "Restore Site Tables" + }, + { + "step_name": "Disable Maintenance Mode" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.283667", + "name": "Add User to Proxy", + "request_method": "POST", + "request_path": "/ssh/users", + "steps": [ + { + "step_name": "Add User to Proxy" + }, + { + "step_name": "Add Certificate to User" + }, + { + "step_name": "Add Principal to User" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.308026", + "name": "Remove User from Proxy", + "request_method": "DELETE", + "request_path": "/ssh/users/{user}", + "steps": [ + { + "step_name": "Remove User from Proxy" + }, + { + "step_name": "Remove Principal from User" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.327716", + "name": "Add User to ProxySQL", + "request_method": "POST", + "request_path": "/proxysql/users", + "steps": [ + { + "step_name": "Add User to ProxySQL" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.345276", + "name": "Remove User from ProxySQL", + "request_method": "DELETE", + "request_path": "/proxysql/users/{username}", + "steps": [ + { + "step_name": "Remove User from ProxySQL" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.362722", + "name": "Create Minio User", + "request_method": "POST", + "request_path": "/minio/create", + "steps": [ + { + "step_name": "Create Minio User" + }, + { + "step_name": "Create Minio Policy" + }, + { + "step_name": "Add Minio Policy" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.379874", + "name": "Remove Minio User", + "request_method": "POST", + "request_path": "/minio/remove", + "steps": [ + { + "step_name": "Remove Minio User" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.392780", + "name": "Enable Minio User", + "request_method": "POST", + "request_path": "/minio/update", + "steps": [ + { + "step_name": "Enable Minio User" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.405690", + "name": "Disable Minio User", + "request_method": "POST", + "request_path": "/minio/update", + "steps": [ + { + "step_name": "Disable Minio User" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.256524", + "name": "Cleanup Unused Files", + "request_method": "POST", + "request_path": "server/cleanup", + "steps": [ + { + "step_name": "Remove Archived Benches" + }, + { + "step_name": "Remove Temporary Files" + }, + { + "step_name": "Remove Unused Docker Artefacts" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.446023", + "name": "Add Backend to ProxySQL", + "request_method": "POST", + "request_path": "/proxysql/backends", + "steps": [ + { + "step_name": "Add Backend to ProxySQL" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.419313", + "name": "Update Saas Plan", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/update/saas", + "steps": [ + { + "step_name": "Update Saas Plan" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.502147", + "name": "Run After Migrate Steps", + "request_method": "POST", + "request_path": "benches/{bench}/sites/{site}/run_after_migrate_steps", + "steps": [ + { + "step_name": "Set Administrator Password" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Reload NGINX" + }, + { + "step_name": "Disable Maintenance Mode" + }, + { + "step_name": "Enable Scheduler" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.531987", + "name": "Move Site to Bench", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/move_to_bench", + "steps": [ + { + "step_name": "Enable Maintenance Mode" + }, + { + "step_name": "Wait for Enqueued Jobs" + }, + { + "step_name": "Move Site" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Bench Setup NGINX Target" + }, + { + "step_name": "Reload NGINX" + }, + { + "step_name": "Migrate Site" + }, + { + "step_name": "Disable Maintenance Mode" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.556954", + "name": "Reset Site Usage", + "request_method": "DELETE", + "request_path": "benches/{bench}/sites/{site}/usage", + "steps": [ + { + "step_name": "Reset Site Usage" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.759939", + "name": "Reload NGINX Job", + "request_method": "POST", + "request_path": "/proxy/reload", + "steps": [ + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.725087", + "name": "Backup Site", + "request_method": null, + "request_path": null, + "steps": [ + { + "step_name": "Backup Site" + }, + { + "step_name": "Upload Site Backup to S3" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.432717", + "name": "Bench Restart", + "request_method": "POST", + "request_path": "/benches/{bench}/restart", + "steps": [ + { + "step_name": "Bench Restart" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.568610", + "name": "Archive Bench", + "request_method": "POST", + "request_path": "/benches/{bench}/archive", + "steps": [ + { + "step_name": "Bench Disable Production" + }, + { + "step_name": "Move Bench to Archived Directory" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.023648", + "name": "New Bench", + "request_method": "POST", + "request_path": "/benches", + "steps": [ + { + "step_name": "Initialize Bench" + }, + { + "step_name": "Update Bench Configuration" + }, + { + "step_name": "Deploy Bench" + }, + { + "step_name": "Bench Setup NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.845251", + "name": "Force Update Bench Limits", + "request_method": "POST", + "request_path": "benches/{bench}/limit", + "steps": [ + { + "step_name": "Stop Bench" + }, + { + "step_name": "Update Bench Memory Limits" + }, + { + "step_name": "Start Bench" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.822202", + "name": "Update Bench Configuration", + "request_method": "POST", + "request_path": "/benches/{bench}/config", + "steps": [ + { + "step_name": "Update Bench Configuration" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Generate Docker Compose File" + }, + { + "step_name": "Update Bench Memory Limits" + }, + { + "step_name": "Deploy Bench" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.803361", + "name": "Rebuild Bench Assets", + "request_method": "POST", + "request_path": "benches/{bench}/rebuild", + "steps": [ + { + "step_name": "Rebuild Bench Assets" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.861985", + "name": "Optimize Tables", + "request_method": "POST", + "request_path": "benches/{bench}/sites/{site}/optimize", + "steps": [ + { + "step_name": "Optimize Tables" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2024-02-22 20:01:05.606166", + "name": "Docker Image Build", + "request_method": "POST", + "request_path": "builder/build", + "steps": [ + { + "step_name": "Docker Image Build" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2024-02-27 13:51:17.142316", + "name": "Patch App", + "request_method": "POST", + "request_path": "/bench/{bench}/patch/{app}", + "steps": [ + { + "step_name": "Git Apply" + }, + { + "step_name": "Rebuild Bench Assets" + }, + { + "step_name": "Bench Restart" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2024-03-19 11:40:26.237476", + "name": "Call Bench Supervisorctl", + "request_method": "POST", + "request_path": "/benches/{bench}/supervisorctl", + "steps": [ + { + "step_name": "Run Supervisorctl Command" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2024-03-26 10:28:39.750285", + "name": "Run Remote Builder", + "request_method": "POST", + "request_path": "/builder/build", + "steps": [ + { + "step_name": "Build Image" + }, + { + "step_name": "Push Docker Image" + }, + { + "step_name": "Cleanup Context" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2024-07-11 12:04:37.550558", + "name": "Add Database Index", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/add-database-index", + "steps": [ + { + "step_name": "Add Database Index With Bench Command" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:19.470276", + "name": "Restore Site", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/restore", + "steps": [ + { + "step_name": "Download Backup Files" + }, + { + "step_name": "Restore Site" + }, + { + "step_name": "Checksum of Downloaded Backup Files" + }, + { + "step_name": "Delete Downloaded Backup Files" + }, + { + "step_name": "Uninstall Unavailable Apps" + }, + { + "step_name": "Migrate Site" + }, + { + "step_name": "Set Administrator Password" + }, + { + "step_name": "Enable Scheduler" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2024-07-18 17:49:54.294402", + "name": "Column Statistics", + "request_method": "POST", + "request_path": "/database/column-stats", + "steps": [ + { + "step_name": "Fetch Column Statistics" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2024-08-09 12:03:58.145567", + "name": "Complete Setup Wizard", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/complete-setup-wizard", + "steps": [ + { + "step_name": "Complete Setup Wizard" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2024-06-11 19:43:45.877056", + "name": "Create User", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/create-user", + "steps": [ + { + "step_name": "Create User" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2024-08-09 10:10:41.301700", + "name": "New Site", + "request_method": "POST", + "request_path": "/benches/{bench}/sites", + "steps": [ + { + "step_name": "New Site" + }, + { + "step_name": "Install Apps" + }, + { + "step_name": "Update Site Configuration" + }, + { + "step_name": "Enable Scheduler" + }, + { + "step_name": "Create User" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.626647", + "name": "Add Site to Upstream", + "request_method": "POST", + "request_path": "/proxy/upstreams/{upstream}/sites", + "steps": [ + { + "step_name": "Add Site File to Upstream Directory" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.663533", + "name": "Update Site Configuration", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/config", + "steps": [ + { + "step_name": "Update Site Configuration" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.587094", + "name": "Remove Site from Upstream", + "request_method": "DELETE", + "request_path": "/proxy/upstreams/{upstream}/sites/{site}", + "steps": [ + { + "step_name": "Remove Site File from Upstream Directory" + }, + { + "step_name": "Generate NGINX Configuration" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 0, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 6, + "modified": "2023-11-06 07:28:18.510770", + "name": "Archive Site", + "request_method": null, + "request_path": null, + "steps": [ + { + "step_name": "Archive Site" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Reload NGINX" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2024-09-10 12:11:38.844112", + "name": "Recover Update In Place", + "request_method": "POST", + "request_path": "/bench/{bench}/recover_update_inplace", + "steps": [ + { + "step_name": "Enable Maintenance Mode" + }, + { + "step_name": "Deploy Bench" + }, + { + "step_name": "Bench Setup NGINX" + }, + { + "step_name": "Recover Sites" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2024-09-11 13:28:32.120581", + "name": "Ping Job", + "request_method": "POST", + "request_path": "ping_job", + "steps": [ + { + "step_name": "Ping Step" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2024-08-20 12:51:05.293373", + "name": "Update Bench In Place", + "request_method": "POST", + "request_path": "/bench/{bench}/update_inplace", + "steps": [ + { + "step_name": "Pull App Changes" + }, + { + "step_name": "Migrate Sites" + }, + { + "step_name": "Rebuild Bench Assets" + }, + { + "step_name": "Commit Container Changes" + }, + { + "step_name": "Bench Restart" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 1, + "modified": "2024-11-04 14:49:18.592247", + "name": "Create Database User", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/database/users", + "steps": [ + { + "step_name": "Create Database User" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 1, + "modified": "2024-11-04 14:49:18.592247", + "name": "Remove Database User", + "request_method": "DELETE", + "request_path": "/benches/{bench}/sites/{site}/database/users/{username}", + "steps": [ + { + "step_name": "Remove Database User" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 1, + "modified": "2024-11-04 14:49:18.592247", + "name": "Modify Database User Permissions", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/database/users/{db_user}/permissions", + "steps": [ + { + "step_name": "Modify Database User Permissions" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 1, + "modified": "2024-10-28 14:49:19.894247", + "name": "Fetch Database Table Schema", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/database/schema", + "steps": [ + { + "step_name": "Fetch Database Table Schema" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2024-12-19 17:21:14.136650", + "name": "Analyze Slow Queries", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/database/analyze-slow-queries", + "steps": [ + { + "step_name": "Analyze Slow Queries" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2025-01-09 17:07:45.359754", + "name": "Physical Backup Database", + "request_method": "POST", + "request_path": "/database/physical-backup", + "steps": [ + { + "step_name": "Fetch Database Tables Information" + }, + { + "step_name": "Flush Database Tables" + }, + { + "step_name": "Flush Changes to Disk" + }, + { + "step_name": "Export Table Schema" + }, + { + "step_name": "Collect Files Metadata" + }, + { + "step_name": "Store Backup Metadata" + }, + { + "step_name": "Create Database Snapshot" + }, + { + "step_name": "Unlock Tables" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2025-01-19 22:57:23.072792", + "name": "Physical Restore Database", + "request_method": "POST", + "request_path": "/database/physical-restore", + "steps": [ + { + "step_name": "Validate Backup Files" + }, + { + "step_name": "Validate Connection to Target Database" + }, + { + "step_name": "Warmup MyISAM Files" + }, + { + "step_name": "Check and Fix MyISAM Table Files" + }, + { + "step_name": "Warmup InnoDB Files" + }, + { + "step_name": "Prepare Database for Restoration" + }, + { + "step_name": "Create Tables from Table Schema" + }, + { + "step_name": "Discard InnoDB Tablespaces" + }, + { + "step_name": "Copying InnoDB Table Files" + }, + { + "step_name": "Import InnoDB Tablespaces" + }, + { + "step_name": "Hold Write Lock on MyISAM Tables" + }, + { + "step_name": "Copying MyISAM Table Files" + }, + { + "step_name": "Validate And Fix Tables" + }, + { + "step_name": "Unlock All Tables" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2025-01-21 15:31:19.602466", + "name": "Activate Site", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/activate", + "steps": [ + { + "step_name": "Disable Maintenance Mode" + } + ] + }, + { + "disabled_auto_retry": 1, + "docstatus": 0, + "pagetype": "Agent Job Type", + "max_retry_count": 3, + "modified": "2025-01-21 15:32:21.823510", + "name": "Deactivate Site", + "request_method": "POST", + "request_path": "/benches/{bench}/sites/{site}/deactivate", + "steps": [ + { + "step_name": "Enable Maintenance Mode" + }, + { + "step_name": "Wait for Enqueued Jobs" + } + ] + } +] \ No newline at end of file diff --git a/jcloud/fixtures/bench_dependency.json b/jcloud/fixtures/bench_dependency.json new file mode 100644 index 0000000..ea3f860 --- /dev/null +++ b/jcloud/fixtures/bench_dependency.json @@ -0,0 +1,444 @@ +[ + { + "docstatus": 0, + "pagetype": "Bench Dependency", + "internal": 0, + "modified": "2023-09-26 19:45:32.017532", + "name": "PYTHON_VERSION", + "supported_versions": [ + { + "is_custom": 0, + "parent": "PYTHON_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 12", + "version": "3.7" + }, + { + "is_custom": 0, + "parent": "PYTHON_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 13", + "version": "3.7" + }, + { + "is_custom": 0, + "parent": "PYTHON_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 13", + "version": "3.8" + }, + { + "is_custom": 0, + "parent": "PYTHON_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Nightly", + "version": "3.10" + }, + { + "is_custom": 0, + "parent": "PYTHON_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "3.10" + }, + { + "is_custom": 0, + "parent": "PYTHON_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 15", + "version": "3.10" + }, + { + "is_custom": 0, + "parent": "PYTHON_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Nightly", + "version": "3.11" + }, + { + "is_custom": 0, + "parent": "PYTHON_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "3.11" + }, + { + "is_custom": 0, + "parent": "PYTHON_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 15", + "version": "3.11" + } + ], + "title": "Python Version" + }, + { + "docstatus": 0, + "pagetype": "Bench Dependency", + "internal": 0, + "modified": "2023-10-06 15:45:18.593014", + "name": "NODE_VERSION", + "supported_versions": [ + { + "is_custom": 0, + "parent": "NODE_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Nightly", + "version": "18.16.0" + }, + { + "is_custom": 0, + "parent": "NODE_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 15", + "version": "18.16.0" + }, + { + "is_custom": 0, + "parent": "NODE_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "16.11.0" + }, + { + "is_custom": 0, + "parent": "NODE_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "16.16.0" + }, + { + "is_custom": 0, + "parent": "NODE_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 13", + "version": "14.19.0" + }, + { + "is_custom": 0, + "parent": "NODE_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 12", + "version": "12.19.0" + } + ], + "title": "Node Version" + }, + { + "docstatus": 0, + "pagetype": "Bench Dependency", + "internal": 0, + "modified": "2023-09-27 09:37:02.358511", + "name": "WKHTMLTOPDF_VERSION", + "supported_versions": [ + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Nightly", + "version": "0.12.4" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 12", + "version": "0.12.4" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 13", + "version": "0.12.4" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "0.12.4" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 15", + "version": "0.12.4" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Nightly", + "version": "0.12.5" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 12", + "version": "0.12.5" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 13", + "version": "0.12.5" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "0.12.5" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 15", + "version": "0.12.5" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Nightly", + "version": "0.12.6" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 12", + "version": "0.12.6" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 13", + "version": "0.12.6" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "0.12.6" + }, + { + "is_custom": 0, + "parent": "WKHTMLTOPDF_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 15", + "version": "0.12.6" + } + ], + "title": "Wkhtmltopdf Version" + }, + { + "docstatus": 0, + "pagetype": "Bench Dependency", + "internal": 0, + "modified": "2023-10-28 22:08:23.853052", + "name": "BENCH_VERSION", + "supported_versions": [ + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "5.15.2" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 13", + "version": "5.15.2" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 12", + "version": "5.15.2" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 15", + "version": "5.16.0" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Nightly", + "version": "5.17.2" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 15", + "version": "5.17.2" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "5.17.2" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 13", + "version": "5.17.2" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 12", + "version": "5.17.2" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Nightly", + "version": "5.18.0" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 15", + "version": "5.18.0" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "5.18.0" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 13", + "version": "5.18.0" + }, + { + "is_custom": 0, + "parent": "BENCH_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 12", + "version": "5.18.0" + } + ], + "title": "Bench Version" + }, + { + "docstatus": 0, + "pagetype": "Bench Dependency", + "internal": 1, + "modified": "2023-09-26 15:46:26.816012", + "name": "NVM_VERSION", + "supported_versions": [ + { + "is_custom": 0, + "parent": "NVM_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Nightly", + "version": "0.39.1" + }, + { + "is_custom": 0, + "parent": "NVM_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 12", + "version": "0.39.1" + }, + { + "is_custom": 0, + "parent": "NVM_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 13", + "version": "0.39.1" + }, + { + "is_custom": 0, + "parent": "NVM_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 14", + "version": "0.39.1" + }, + { + "is_custom": 0, + "parent": "NVM_VERSION", + "parentfield": "supported_versions", + "parenttype": "Bench Dependency", + "supported_jingrow_version": "Version 15", + "version": "0.39.1" + } + ], + "title": "NVM Version" + } +] \ No newline at end of file diff --git a/jcloud/fixtures/cloud_region.json b/jcloud/fixtures/cloud_region.json new file mode 100644 index 0000000..f4bc7b5 --- /dev/null +++ b/jcloud/fixtures/cloud_region.json @@ -0,0 +1,98 @@ +[ + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2023-08-08 12:33:57.631284", + "name": "ap-south-1", + "provider": "AWS EC2", + "region_name": "ap-south-1" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2023-08-08 12:34:06.958325", + "name": "eu-west-2", + "provider": "AWS EC2", + "region_name": "eu-west-2" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2023-08-08 12:34:15.834702", + "name": "eu-central-2", + "provider": "AWS EC2", + "region_name": "eu-central-2" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2023-08-08 12:34:23.204769", + "name": "ap-southeast-1", + "provider": "AWS EC2", + "region_name": "ap-southeast-1" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2023-08-08 12:34:40.385001", + "name": "me-south-1", + "provider": "AWS EC2", + "region_name": "me-south-1" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2023-08-08 12:34:53.773228", + "name": "af-south-1", + "provider": "AWS EC2", + "region_name": "af-south-1" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2023-08-08 12:34:59.528123", + "name": "us-east-1", + "provider": "AWS EC2", + "region_name": "us-east-1" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2023-08-08 12:34:33.152010", + "name": "eu-central-1", + "provider": "AWS EC2", + "region_name": "eu-central-1" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2024-09-23 10:47:04.247556", + "name": "fsn1", + "provider": "Hetzner", + "region_name": "fsn1" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2024-09-23 10:48:01.851504", + "name": "sin", + "provider": "Hetzner", + "region_name": "sin" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2024-09-23 10:47:41.953263", + "name": "ash", + "provider": "Hetzner", + "region_name": "ash" + }, + { + "docstatus": 0, + "pagetype": "Cloud Region", + "modified": "2024-09-23 10:48:48.686461", + "name": "nbg1", + "provider": "Hetzner", + "region_name": "nbg1" + } +] \ No newline at end of file diff --git a/jcloud/fixtures/jcloud_job_type.json b/jcloud/fixtures/jcloud_job_type.json new file mode 100644 index 0000000..11cf959 --- /dev/null +++ b/jcloud/fixtures/jcloud_job_type.json @@ -0,0 +1,314 @@ +[ + { + "docstatus": 0, + "pagetype": "Jcloud Job Type", + "modified": "2025-01-02 15:31:03.095292", + "name": "Create Server", + "steps": [ + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.provision()\n", + "step_name": "Create Server", + "wait_until_true": 0 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.sync()\nresult = (machine.status == \"Running\", False)\n", + "step_name": "Wait for Server to start", + "wait_until_true": 1 + }, + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\nserver.ping_ansible()\n\nplays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Ping Server\"}, [\"status\"], order_by=\"creation desc\", limit=1)\nresult = (plays and plays[0].status == \"Success\", False)\n", + "step_name": "Wait for Server to be accessible", + "wait_until_true": 1 + }, + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\nserver.update_tls_certificate()", + "step_name": "Update TLS Certificate", + "wait_until_true": 0 + }, + { + "script": "plays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Setup TLS Certificates\"}, [\"status\"], order_by=\"creation desc\", limit=1)\nresult = (plays and plays[0].status in (\"Success\", \"Failure\"), False)", + "step_name": "Wait for TLS Certificate to be updated", + "wait_until_true": 1 + }, + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\nserver.wait_for_cloud_init()", + "step_name": "Check Cloud Init status", + "wait_until_true": 0 + }, + { + "script": "plays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Wait for Cloud Init to finish\"}, [\"status\"], order_by=\"creation desc\", limit=1)\nresult = (plays and plays[0].status in (\"Success\", \"Failure\"), False)", + "step_name": "Wait for Cloud Init to finish", + "wait_until_true": 1 + }, + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\nserver.update_agent_ansible()", + "step_name": "Update Agent Ansible", + "wait_until_true": 0 + }, + { + "script": "plays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Update Agent\"}, [\"status\"], order_by=\"creation desc\", limit=1)\nresult = (plays and plays[0].status in (\"Success\", \"Failure\"), False)", + "step_name": "Wait for Agent to be updated", + "wait_until_true": 1 + }, + { + "script": "if pg.server_type == \"Database Server\":\n server = jingrow.get_pg(\"Database Server\", pg.server)\n server.upgrade_mariadb()", + "step_name": "Upgrade MariaDB", + "wait_until_true": 0 + }, + { + "script": "if pg.server_type == \"Database Server\":\n plays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Upgrade MariaDB\"}, [\"status\"], order_by=\"creation desc\", limit=1)\n result = (plays and plays[0].status == \"Success\", plays and plays[0].status == \"Failure\")\nelse:\n result = (True,)", + "step_name": "Wait for MariaDB Upgrade to Complete", + "wait_until_true": 1 + }, + { + "script": "if pg.server_type == \"Database Server\":\n default_variables = jingrow.get_all(\"MariaDB Variable\", {\"set_on_new_servers\":1}, pluck=\"name\")\n for var_name in default_variables:\n var = jingrow.get_pg(\"MariaDB Variable\", var_name)\n var.set_on_server(pg.server)\n\nserver = jingrow.get_pg(pg.server_type, pg.server)\nserver.set_swappiness()\nserver.add_glass_file()\nserver.update_filebeat()\n\nif pg.server_type == \"Server\":\n server.setup_mysqldump()\n server.install_earlyoom()\n\nserver.validate_mounts()\nserver.save()", + "step_name": "Set additional config", + "wait_until_true": 0 + } + ] + }, + { + "docstatus": 0, + "pagetype": "Jcloud Job Type", + "modified": "2025-01-02 15:33:27.252601", + "name": "Resize Server", + "steps": [ + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.stop()", + "step_name": "Stop Virtual Machine", + "wait_until_true": 0 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.sync()\nresult = (machine.status == \"Stopped\", False)", + "step_name": "Wait for Virtual Machine to Stop", + "wait_until_true": 1 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.resize(arguments.machine_type)", + "step_name": "Resize Virtual Machine", + "wait_until_true": 0 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\ntry:\n machine.start()\n result = (True, False)\nexcept:\n result = (False, False)", + "step_name": "Start Virtual Machine", + "wait_until_true": 1 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.sync()\nresult = (machine.status == \"Running\", False)", + "step_name": "Wait for Virtual Machine to Start", + "wait_until_true": 1 + }, + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\nserver.ping_ansible()\n\nplays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Ping Server\"}, [\"status\"], order_by=\"creation desc\", limit=1)\nresult = (plays and plays[0].status == \"Success\", False)\n", + "step_name": "Wait for Server to be accessible", + "wait_until_true": 1 + }, + { + "script": "if pg.server_type == \"Database Server\":\n server = jingrow.get_pg(pg.server_type, pg.server)\n server.adjust_memory_config()\nelif pg.server_type == \"Server\":\n server = jingrow.get_pg(pg.server_type, pg.server)\n server.auto_scale_workers()", + "step_name": "Set additional config", + "wait_until_true": 0 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\n\nserver = jingrow.get_pg(pg.server_type, pg.server)\nif server.plan:\n plan_disk_size = jingrow.db.get_value(\"Server Plan\", server.plan, \"disk\")\n if plan_disk_size and plan_disk_size > machine.disk_size:\n try:\n server.increase_disk_size(increment=plan_disk_size - machine.disk_size)\n except:\n pass", + "step_name": "Increase Disk Size", + "wait_until_true": 0 + } + ] + }, + { + "docstatus": 0, + "pagetype": "Jcloud Job Type", + "modified": "2024-02-05 17:08:00.514456", + "name": "Create Server Snapshot", + "steps": [ + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.stop()\n", + "step_name": "Stop Virtual Machine", + "wait_until_true": 0 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.sync()\nresult = (machine.status == \"Stopped\", False)", + "step_name": "Wait for Virtual Machine to Stop", + "wait_until_true": 1 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\narguments.image = machine.create_image()", + "step_name": "Create Snapshot", + "wait_until_true": 0 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\ntry:\n machine.start()\n result = (True, False)\nexcept:\n result = (False, False)", + "step_name": "Start Virtual Machine", + "wait_until_true": 1 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.sync()\nresult = (machine.status == \"Running\", False)", + "step_name": "Wait for Virtual Machine to Start", + "wait_until_true": 1 + }, + { + "script": "image = jingrow.get_pg(\"Virtual Machine Image\", arguments.image)\nimage.sync()\nresult = (image.status == \"Available\", False)", + "step_name": "Wait for Snapshot to Complete", + "wait_until_true": 1 + } + ] + }, + { + "docstatus": 0, + "pagetype": "Jcloud Job Type", + "modified": "2022-11-01 14:35:52.936503", + "name": "Archive Server", + "steps": [ + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.disable_termination_protection()", + "step_name": "Disable Termination Protection", + "wait_until_true": 0 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.terminate()", + "step_name": "Terminate Virtual Machine", + "wait_until_true": 0 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.sync()\nresult = (machine.status == \"Terminated\", False)", + "step_name": "Wait for Virtual Machine to Terminate", + "wait_until_true": 1 + } + ] + }, + { + "docstatus": 0, + "pagetype": "Jcloud Job Type", + "modified": "2024-01-05 13:40:21.038901", + "name": "Upgrade MariaDB", + "steps": [ + { + "script": "server = jingrow.get_pg(\"Database Server\", pg.server)\nserver.stop_mariadb()", + "step_name": "Stop MariaDB", + "wait_until_true": 0 + }, + { + "script": "plays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Stop MariaDB\"}, [\"status\"], order_by=\"creation desc\", limit=1)\nresult = (plays and plays[0].status == \"Success\", False)\n", + "step_name": "Wait for MariaDB to Stop", + "wait_until_true": 1 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.create_snapshots()\nsnapshot = jingrow.get_last_pg(\"Virtual Disk Snapshot\", {\"virtual_machine\": pg.virtual_machine})\nsnapshot.add_comment(text=\"Before MariaDB Upgrade\")", + "step_name": "Create Server Snapshot", + "wait_until_true": 0 + }, + { + "script": "server = jingrow.get_pg(\"Database Server\", pg.server)\nserver.upgrade_mariadb()", + "step_name": "Upgrade MariaDB", + "wait_until_true": 0 + }, + { + "script": "plays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Upgrade MariaDB\"}, [\"status\"], order_by=\"creation desc\", limit=1)\nresult = (plays and plays[0].status == \"Success\", plays and plays[0].status == \"Failure\")\n", + "step_name": "Wait for MariaDB Upgrade to Complete", + "wait_until_true": 1 + } + ] + }, + { + "docstatus": 0, + "pagetype": "Jcloud Job Type", + "modified": "2025-02-14 14:30:00.676187", + "name": "Increase Disk Size", + "steps": [ + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\nmountpoint = arguments.labels.get(\"mountpoint\")\nserver.calculated_increase_disk_size(mountpoint=mountpoint)", + "step_name": "Increase Disk Size", + "wait_until_true": 0 + }, + { + "script": "if jingrow.db.get_value(pg.server_type, pg.server, \"provider\") == \"AWS EC2\":\n plays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Extend EC2 Volume\"}, [\"status\"], order_by=\"creation desc\", limit=1)\n result = (plays and plays[0].status == \"Success\", plays and plays[0].status == \"Failure\")\nelse:\n result = (True, False)", + "step_name": "Wait for partition to resize (AWS)", + "wait_until_true": 1 + }, + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\nif server.provider == \"OCI\":\n machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\n machine.sync()\n result = (machine.status == \"Running\", False)\nelse:\n result = (True, False)", + "step_name": "Wait for server to start (OCI)", + "wait_until_true": 1 + }, + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\nif server.provider == \"OCI\":\n server.ping_ansible()\n\n plays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Ping Server\"}, [\"status\"], order_by=\"creation desc\", limit=1)\n result = (plays and plays[0].status == \"Success\", False)\nelse:\n result = (True, False)\n", + "step_name": "Wait for server to be accessible (OCI)", + "wait_until_true": 1 + }, + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\nif server.provider == \"OCI\":\n server.add_glass_file()\nelse:\n result = (True, False)\n # handled for aws already in extend playbook", + "step_name": "Add glass file back (OCI)", + "wait_until_true": 0 + } + ] + }, + { + "docstatus": 0, + "pagetype": "Jcloud Job Type", + "modified": "2024-05-27 11:29:41.787719", + "name": "Prune Docker system", + "steps": [ + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\ntelegram = jingrow.get_pg(\"Jcloud Settings\").telegram(\"Information\")\ntelegram.send(f\"Pruning docker cache on [{server.name}]({jingrow.utils.get_url_to_form(server.pagetype, server.name)})\")\nserver.prune_docker_system()", + "step_name": "Prune Docker system", + "wait_until_true": 0 + }, + { + "script": "plays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Prune Docker System\"}, [\"status\"], order_by=\"creation desc\", limit=1)\nresult = (plays and plays[0].status == \"Success\", plays and plays[0].status == \"Failure\")\n", + "step_name": "Wait for docker system prune", + "wait_until_true": 1 + } + ] + }, + { + "docstatus": 0, + "pagetype": "Jcloud Job Type", + "modified": "2024-06-18 16:03:25.166898", + "name": "Increase Swap", + "steps": [ + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\ntelegram = jingrow.get_pg(\"Jcloud Settings\").telegram(\"Information\")\ntelegram.send(f\"Increasing swap on [{server.name}]({jingrow.utils.get_url_to_form(server.pagetype, server.name)})\")\nserver.increase_swap(4)", + "step_name": "Add swap on server", + "wait_until_true": 0 + }, + { + "script": "plays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Increase Swap\"}, [\"status\"], order_by=\"creation desc\", limit=1)\nresult = (plays and plays[0].status == \"Success\", plays and plays[0].status == \"Failure\")", + "step_name": "Wait for swap to be added", + "wait_until_true": 1 + } + ] + }, + { + "docstatus": 0, + "pagetype": "Jcloud Job Type", + "modified": "2024-12-06 10:59:08.032149", + "name": "Stop and Start Server", + "steps": [ + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.stop()", + "step_name": "Stop Virtual Machine", + "wait_until_true": 0 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.sync()\nresult = (machine.status == \"Stopped\", False)", + "step_name": "Wait for Virtual Machine to Stop", + "wait_until_true": 1 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\ntry:\n machine.start()\n result = (True, False)\nexcept:\n result = (False, False)", + "step_name": "Start Virtual Machine", + "wait_until_true": 1 + }, + { + "script": "machine = jingrow.get_pg(\"Virtual Machine\", pg.virtual_machine)\nmachine.sync()\nresult = (machine.status == \"Running\", False)", + "step_name": "Wait for Virtual Machine to Start", + "wait_until_true": 1 + }, + { + "script": "server = jingrow.get_pg(pg.server_type, pg.server)\nserver.ping_ansible()\n\nplays = jingrow.get_all(\"Ansible Play\", {\"server\": pg.server, \"play\": \"Ping Server\"}, [\"status\"], order_by=\"creation desc\", limit=1)\nresult = (plays and plays[0].status == \"Success\", False)\n", + "step_name": "Wait for Server to be accessible", + "wait_until_true": 1 + } + ] + } +] \ No newline at end of file diff --git a/jcloud/fixtures/jcloud_method_permission.json b/jcloud/fixtures/jcloud_method_permission.json new file mode 100644 index 0000000..3557b67 --- /dev/null +++ b/jcloud/fixtures/jcloud_method_permission.json @@ -0,0 +1,218 @@ +[ + { + "checkbox_label": "View", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Server", + "method": "jcloud.api.server.overview", + "modified": "2023-10-30 00:06:29.049045", + "name": "592c9b7d00" + }, + { + "checkbox_label": "Reboot", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Server", + "method": "jcloud.api.server.reboot", + "modified": "2023-10-28 20:24:00.088011", + "name": "bae89e803c" + }, + { + "checkbox_label": "Login as Admin", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.login", + "modified": "2023-09-04 21:22:19.952471", + "name": "9b1cac0bc7" + }, + { + "checkbox_label": "Database Access", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.enable_database_access", + "modified": "2023-09-04 22:20:23.131654", + "name": "e98deda64d" + }, + { + "checkbox_label": "Update", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Release Group", + "method": "jcloud.api.bench.update", + "modified": "2023-08-09 23:22:21.493409", + "name": "3a824a0bfc" + }, + { + "checkbox_label": "Update", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.update", + "modified": "2023-08-09 23:22:16.445271", + "name": "56a21e93ad" + }, + { + "checkbox_label": "View", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.overview", + "modified": "2023-10-30 00:28:47.065270", + "name": "10805b1854" + }, + { + "checkbox_label": "Restore", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.restore", + "modified": "2023-08-10 23:21:37.969947", + "name": "8d80a2c6cb" + }, + { + "checkbox_label": "Migrate", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.migrate", + "modified": "2023-09-25 11:41:46.915505", + "name": "f408433d45" + }, + { + "checkbox_label": "View", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Release Group", + "method": "jcloud.api.bench.get", + "modified": "2023-08-11 11:08:02.298748", + "name": "bfd7914b01" + }, + { + "checkbox_label": "Update", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.update", + "modified": "2023-08-11 11:14:41.597516", + "name": "1807bf655e" + }, + { + "checkbox_label": "Deploy", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Release Group", + "method": "jcloud.api.bench.deploy_and_update", + "modified": "2023-08-16 00:02:26.396406", + "name": "7cb4ecfd3a" + }, + { + "checkbox_label": "Archive", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.archive", + "modified": "2023-09-25 21:34:34.964150", + "name": "ab2bec02b1" + }, + { + "checkbox_label": "Reset", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.reset", + "modified": "2023-09-25 21:36:30.485983", + "name": "17c4806fba" + }, + { + "checkbox_label": "Deactivate", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.deactivate", + "modified": "2023-09-25 21:36:56.303022", + "name": "91dcf28e26" + }, + { + "checkbox_label": "SSH Access", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Release Group", + "method": "jcloud.api.bench.generate_certificate", + "modified": "2023-09-25 21:40:01.706266", + "name": "a1b64a0abb" + }, + { + "checkbox_label": "Restart", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Release Group", + "method": "jcloud.api.bench.restart", + "modified": "2023-09-25 21:40:40.280318", + "name": "07cba0fe8f" + }, + { + "checkbox_label": "Change Plan", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.change_plan", + "modified": "2023-09-25 21:42:47.001379", + "name": "977ecf4b96" + }, + { + "checkbox_label": "Download Backups", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Site", + "method": "jcloud.api.site.get_backup_link", + "modified": "2023-09-25 21:46:24.621413", + "name": "084fe1db67" + }, + { + "checkbox_label": "View", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Database Server", + "method": "jcloud.api.server.overview", + "modified": "2023-10-30 00:06:13.793641", + "name": "3376d83172" + }, + { + "checkbox_label": "Drop", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Server", + "method": "jcloud.api.server.archive", + "modified": "2023-10-30 00:04:57.685127", + "name": "f636248627" + }, + { + "checkbox_label": "Drop", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Database Server", + "method": "jcloud.api.server.archive", + "modified": "2023-10-30 00:05:06.341786", + "name": "3eacc28055" + }, + { + "checkbox_label": "Reboot", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Database Server", + "method": "jcloud.api.server.reboot", + "modified": "2023-10-30 00:05:29.046083", + "name": "58dee80d9c" + }, + { + "checkbox_label": "View", + "docstatus": 0, + "pagetype": "Jcloud Method Permission", + "document_type": "Marketplace App", + "method": "jcloud.api.marketplace.get_app", + "modified": "2023-10-30 00:15:04.814190", + "name": "fbec67f5d7" + } +] \ No newline at end of file diff --git a/jcloud/fixtures/jcloud_webhook_event.json b/jcloud/fixtures/jcloud_webhook_event.json new file mode 100644 index 0000000..f74521b --- /dev/null +++ b/jcloud/fixtures/jcloud_webhook_event.json @@ -0,0 +1,20 @@ +[ + { + "description": "获取站点订阅计划变更的通知", + "docstatus": 0, + "pagetype": "Jcloud Webhook Event", + "enabled": 1, + "modified": "2024-09-23 12:06:21.293809", + "name": "Site Plan Change", + "title": "站点套餐变更" + }, + { + "description": "等待中、安装中、更新中、活跃、不活跃、异常、已归档、已暂停", + "docstatus": 0, + "pagetype": "Jcloud Webhook Event", + "enabled": 1, + "modified": "2024-09-23 11:47:30.206054", + "name": "Site Status Update", + "title": "站点状态更新" + } +] \ No newline at end of file diff --git a/jcloud/fixtures/jingrow_version.json b/jcloud/fixtures/jingrow_version.json new file mode 100644 index 0000000..d7a94f5 --- /dev/null +++ b/jcloud/fixtures/jingrow_version.json @@ -0,0 +1,237 @@ +[ + { + "default": 0, + "dependencies": [ + { + "dependency": "NVM_VERSION", + "parent": "Version 12", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "0.36.0" + }, + { + "dependency": "NODE_VERSION", + "parent": "Version 12", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "12.19.0" + }, + { + "dependency": "PYTHON_VERSION", + "parent": "Version 12", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "3.7" + }, + { + "dependency": "WKHTMLTOPDF_VERSION", + "parent": "Version 12", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "0.12.5" + }, + { + "dependency": "BENCH_VERSION", + "parent": "Version 12", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "5.15.2" + } + ], + "docstatus": 0, + "pagetype": "Jingrow Version", + "modified": "2024-06-27 14:45:44.660933", + "name": "Version 12", + "number": 12, + "public": 0, + "status": "End of Life" + }, + { + "default": 0, + "dependencies": [ + { + "dependency": "NVM_VERSION", + "parent": "Version 13", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "0.36.0" + }, + { + "dependency": "NODE_VERSION", + "parent": "Version 13", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "14.19.0" + }, + { + "dependency": "PYTHON_VERSION", + "parent": "Version 13", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "3.8" + }, + { + "dependency": "WKHTMLTOPDF_VERSION", + "parent": "Version 13", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "0.12.5" + }, + { + "dependency": "BENCH_VERSION", + "parent": "Version 13", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "5.15.2" + } + ], + "docstatus": 0, + "pagetype": "Jingrow Version", + "modified": "2024-06-27 14:45:38.192207", + "name": "Version 13", + "number": 13, + "public": 1, + "status": "End of Life" + }, + { + "default": 0, + "dependencies": [ + { + "dependency": "NVM_VERSION", + "parent": "Nightly", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "0.36.0" + }, + { + "dependency": "NODE_VERSION", + "parent": "Nightly", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "18.16.0" + }, + { + "dependency": "PYTHON_VERSION", + "parent": "Nightly", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "3.11" + }, + { + "dependency": "WKHTMLTOPDF_VERSION", + "parent": "Nightly", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "0.12.5" + }, + { + "dependency": "BENCH_VERSION", + "parent": "Nightly", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "5.22.6" + } + ], + "docstatus": 0, + "pagetype": "Jingrow Version", + "modified": "2023-07-21 19:46:37.813960", + "name": "Nightly", + "number": 15, + "public": 1, + "status": "Develop" + }, + { + "default": 0, + "dependencies": [ + { + "dependency": "NVM_VERSION", + "parent": "Version 14", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "0.36.0" + }, + { + "dependency": "NODE_VERSION", + "parent": "Version 14", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "14.19.0" + }, + { + "dependency": "PYTHON_VERSION", + "parent": "Version 14", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "3.10" + }, + { + "dependency": "WKHTMLTOPDF_VERSION", + "parent": "Version 14", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "0.12.5" + }, + { + "dependency": "BENCH_VERSION", + "parent": "Version 14", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "5.15.2" + } + ], + "docstatus": 0, + "pagetype": "Jingrow Version", + "modified": "2023-07-21 19:44:34.122005", + "name": "Version 14", + "number": 14, + "public": 1, + "status": "Stable" + }, + { + "default": 1, + "dependencies": [ + { + "dependency": "NVM_VERSION", + "parent": "Version 15", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "0.36.0" + }, + { + "dependency": "NODE_VERSION", + "parent": "Version 15", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "18.16.0" + }, + { + "dependency": "PYTHON_VERSION", + "parent": "Version 15", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "3.11" + }, + { + "dependency": "WKHTMLTOPDF_VERSION", + "parent": "Version 15", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "0.12.5" + }, + { + "dependency": "BENCH_VERSION", + "parent": "Version 15", + "parentfield": "dependencies", + "parenttype": "Jingrow Version", + "version": "5.22.6" + } + ], + "docstatus": 0, + "pagetype": "Jingrow Version", + "modified": "2023-07-19 17:31:05.044895", + "name": "Version 15", + "number": 15, + "public": 1, + "status": "Stable" + } +] \ No newline at end of file diff --git a/jcloud/fixtures/mariadb_variable.json b/jcloud/fixtures/mariadb_variable.json new file mode 100644 index 0000000..068dcd8 --- /dev/null +++ b/jcloud/fixtures/mariadb_variable.json @@ -0,0 +1,434 @@ +[ + { + "datatype": "Str", + "default_value": "0", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 18:21:51.984252", + "name": "performance_schema", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "\"\"", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 17:55:19.751992", + "name": "performance-schema-instrument", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "OFF", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 17:55:19.766945", + "name": "performance-schema-consumer-events-stages-current", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "OFF", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 17:55:19.784028", + "name": "performance-schema-consumer-events-stages-history", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "OFF", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 17:55:19.809367", + "name": "performance-schema-consumer-events-stages-history-long", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "OFF", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 17:55:19.833060", + "name": "performance-schema-consumer-events-statements-current", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "OFF", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 17:55:19.851892", + "name": "performance-schema-consumer-events-statements-history", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "OFF", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 17:55:19.870716", + "name": "performance-schema-consumer-events-statements-history-long", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "OFF", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 17:55:19.951921", + "name": "performance-schema-consumer-events-waits-current", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "OFF", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 17:55:19.971205", + "name": "performance-schema-consumer-events-waits-history", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "OFF", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-07-27 17:55:19.987419", + "name": "performance-schema-consumer-events-waits-history-long", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Int", + "default_value": "5120", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-11-22 12:51:29.101315", + "name": "tmp_disk_table_size", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "5", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-11-22 12:52:00.473677", + "name": "extra_max_connections", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "200", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-11-22 12:50:54.084797", + "name": "max_connections", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": null, + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-01-12 14:21:03.949888", + "name": "max_user_connections", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": null, + "pg_section": "replication-and-binary-log", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-03-05 12:59:31.069472", + "name": "binlog_expire_logs_seconds", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Int", + "default_value": "32", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-06-12 10:30:47.403256", + "name": "max_heap_table_size", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "", + "pg_section": "replication-and-binary-log", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2023-08-03 14:20:47.702196", + "name": "expire_logs_days", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "37", + "pg_section": "innodb", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2023-09-15 15:21:13.086214", + "name": "innodb_old_blocks_pct", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Int", + "default_value": null, + "pg_section": "innodb", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2023-06-15 14:41:36.038646", + "name": "innodb_buffer_pool_size", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "/var/lib/mysql/mysql-bin", + "pg_section": "replication-and-binary-log", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2023-06-15 14:41:09.156699", + "name": "log_bin", + "set_on_new_servers": 0, + "skippable": 1 + }, + { + "datatype": "Str", + "default_value": "5000", + "pg_section": "innodb", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-11-22 12:50:46.547631", + "name": "innodb_old_blocks_time", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Int", + "default_value": "512", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-11-22 12:51:34.994866", + "name": "max_allowed_packet", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "3600", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-11-22 12:50:58.514076", + "name": "max_statement_time", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "ON", + "pg_section": "innodb", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-12-04 16:30:16.720727", + "name": "innodb_print_all_deadlocks", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": null, + "pg_section": "innodb", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-08-31 13:53:59.926965", + "name": "innodb_strict_mode", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "50", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-08-14 15:54:38.037523", + "name": "innodb_lock_wait_timeout", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": null, + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-10-09 09:00:46.202412", + "name": "net_buffer_length", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": null, + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-10-21 13:02:20.343316", + "name": "net_read_timeout", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": null, + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2024-10-21 13:03:05.958798", + "name": "net_write_timeout", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "FORCE", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2024-11-22 12:31:31.593757", + "name": "myisam_recover_options", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "3307", + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2024-11-22 12:52:35.958089", + "name": "extra_port", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "MIXED", + "pg_section": "replication-and-binary-log", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2024-12-12 12:59:33.579411", + "name": "binlog_format", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": null, + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2025-01-13 12:24:41.969815", + "name": "long_query_time", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": null, + "pg_section": "server", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 0, + "modified": "2025-01-24 15:40:01.244238", + "name": "tmpdir", + "set_on_new_servers": 0, + "skippable": 0 + }, + { + "datatype": "Str", + "default_value": "ON", + "pg_section": "innodb", + "docstatus": 0, + "pagetype": "MariaDB Variable", + "dynamic": 1, + "modified": "2025-03-04 16:43:33.176673", + "name": "innodb_status_output_locks", + "set_on_new_servers": 0, + "skippable": 0 + } +] \ No newline at end of file diff --git a/jcloud/fixtures/print_format.json b/jcloud/fixtures/print_format.json new file mode 100644 index 0000000..a58b6d4 --- /dev/null +++ b/jcloud/fixtures/print_format.json @@ -0,0 +1,28 @@ +[ + { + "align_labels_right": 0, + "css": ".print-format {\n margin-top: 1cm;\n}\n\n.print-format td, .print-format th {\n padding-left: 0 !important;\n padding-right: 0 !important;\n}\n\n.print-format th {\n background-color: white !important;\n border-bottom: 1px solid #d2d2d2 !important;\n}\n\n.print-format .table > tbody > tr > td {\n border-top-color: transparent;\n}", + "custom_format": 0, + "default_print_language": null, + "disabled": 0, + "pg_type": "Invoice", + "docstatus": 0, + "pagetype": "Print Format", + "font": "Default", + "format_data": "[{\"fieldname\": \"print_heading_template\", \"fieldtype\": \"Custom HTML\", \"options\": \"{% set months = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December'] %}\\n\\n
\\n Invoice for {{ months[pg.month - 1] }} {{ pg.year}} billing period\\n
\"}, {\"fieldtype\": \"Section Break\", \"label\": \"\"}, {\"fieldtype\": \"Column Break\"}, {\"fieldname\": \"_custom_html\", \"print_hide\": 0, \"label\": \"Custom HTML\", \"fieldtype\": \"HTML\", \"options\": \"
\\n \\n
\\n Jingrow Technologies Pvt. Ltd.
\\n D/324, Neelkanth Business Park,
\\n Vidyavihar (W), Mumbai 4000 86, China.\\n
\\n
\\n\\n
\\n \\n
\\n {% set address = jingrow.db.get_value('Team', pg.team, 'billing_address') %}\\n {{ jingrow.get_pg('Address', address).get_display() if address else '' }}\\n
\\n
\"}, {\"fieldtype\": \"Column Break\"}, {\"fieldname\": \"_custom_html\", \"print_hide\": 0, \"label\": \"Custom HTML\", \"fieldtype\": \"HTML\", \"options\": \"{% set details = {\\n 'Invoice': pg.name,\\n 'Payment due on': jingrow.utils.global_date_format(pg.due_date)\\n} %}\\n{% for d in details %}\\n
\\n
\\n \\n
\\n
\\n {{ details[d] }}\\n
\\n
\\n{% endfor %}\"}, {\"fieldtype\": \"Section Break\", \"label\": \"\"}, {\"fieldtype\": \"Column Break\"}, {\"fieldname\": \"_custom_html\", \"print_hide\": 0, \"label\": \"Custom HTML\", \"fieldtype\": \"HTML\", \"options\": \"

\\n Total due {{ pg.get_formatted('amount_due') }} on {{\\n jingrow.utils.global_date_format(pg.due_date) }}\\n

\\n\\n

Usage Charges

\\n\\n\\n \\n \\n \\n \\n \\n \\n \\n \\n \\n {% for row in pg.site_usage %} {% set lineitem = pg.items[loop.index - 1]\\n %}\\n \\n \\n \\n \\n \\n \\n {% endfor %}\\n \\n \\n \\n \\n \\n \\n \\n \\n {% if pg.amount_due > 0 %}\\n \\n \\n \\n \\n \\n \\n \\n \\n {% endif %}\\n
SitePlanDaysAmount
{{ row.site }}{{ jingrow.db.get_value('Plan', row.plan, 'plan_title') }}{{ jingrow.utils.cint(row.days_active) }}{{ lineitem.get_formatted('amount') }}
\\n Total\\n {{ pg.get_formatted('total') }}
\\n Applied Credit Balance\\n {{ pg.get_formatted('starting_balance') }}
\\n Amount Due\\n {{ pg.get_formatted('amount_due') }}
\\n\"}, {\"fieldtype\": \"Section Break\", \"label\": \"Stripe Billing\"}, {\"fieldtype\": \"Column Break\"}, {\"fieldname\": \"_custom_html\", \"print_hide\": 0, \"label\": \"Custom HTML\", \"fieldtype\": \"HTML\", \"options\": \"
\\n If you have added a card on file, then it will automatically be charged.
If not, pay using the following link: {{ pg.stripe_invoice_url }}\\n
\"}]", + "html": null, + "line_breaks": 0, + "modified": "2020-07-23 12:04:26.876744", + "module": "Jcloud", + "name": "Jingrow Brand 2020", + "parent": null, + "parentfield": null, + "parenttype": null, + "print_format_builder": 1, + "print_format_type": "Jinja", + "raw_commands": null, + "raw_printing": 0, + "show_section_headings": 0, + "standard": "No" + } +] \ No newline at end of file diff --git a/jcloud/fixtures/role.json b/jcloud/fixtures/role.json new file mode 100644 index 0000000..d334f4b --- /dev/null +++ b/jcloud/fixtures/role.json @@ -0,0 +1,65 @@ +[ + { + "bulk_actions": 0, + "dashboard": 0, + "desk_access": 0, + "disabled": 0, + "docstatus": 0, + "pagetype": "Role", + "form_sidebar": 0, + "home_page": null, + "is_custom": 0, + "list_sidebar": 0, + "modified": "2024-05-27 11:14:43.545958", + "name": "Jcloud Support Agent", + "notifications": 0, + "restrict_to_domain": null, + "role_name": "Jcloud Support Agent", + "search_bar": 0, + "timeline": 0, + "two_factor_auth": 0, + "view_switcher": 0 + }, + { + "bulk_actions": 0, + "dashboard": 0, + "desk_access": 0, + "disabled": 0, + "docstatus": 0, + "pagetype": "Role", + "form_sidebar": 0, + "home_page": null, + "is_custom": 0, + "list_sidebar": 0, + "modified": "2020-04-06 22:48:03.538506", + "name": "Jcloud Admin", + "notifications": 0, + "restrict_to_domain": null, + "role_name": "Jcloud Admin", + "search_bar": 0, + "timeline": 0, + "two_factor_auth": 0, + "view_switcher": 0 + }, + { + "bulk_actions": 0, + "dashboard": 0, + "desk_access": 0, + "disabled": 0, + "docstatus": 0, + "pagetype": "Role", + "form_sidebar": 0, + "home_page": null, + "is_custom": 0, + "list_sidebar": 0, + "modified": "2020-04-06 22:48:33.006868", + "name": "Jcloud Member", + "notifications": 0, + "restrict_to_domain": null, + "role_name": "Jcloud Member", + "search_bar": 0, + "timeline": 0, + "two_factor_auth": 0, + "view_switcher": 0 + } +] \ No newline at end of file diff --git a/jcloud/fixtures/server_storage_plan.json b/jcloud/fixtures/server_storage_plan.json new file mode 100644 index 0000000..7081e2f --- /dev/null +++ b/jcloud/fixtures/server_storage_plan.json @@ -0,0 +1,12 @@ +[ + { + "docstatus": 0, + "pagetype": "Server Storage Plan", + "enabled": 1, + "modified": "2024-06-27 12:34:02.292945", + "name": "Add-on Storage plan", + "price_cny": 16.4, + "price_usd": 0.2, + "title": "Add-on Storage plan" + } +] \ No newline at end of file diff --git a/jcloud/fixtures/site_config_key.json b/jcloud/fixtures/site_config_key.json new file mode 100644 index 0000000..eebe9e0 --- /dev/null +++ b/jcloud/fixtures/site_config_key.json @@ -0,0 +1,730 @@ +[ + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "dropbox_secret_key", + "modified": "2020-09-15 14:08:54.908669", + "name": "dropbox_secret_key", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Dropbox Secret Key", + "type": "Password" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "google_analytics_id", + "modified": "2020-09-23 13:20:06.853336", + "name": "google_analytics_id", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Google Analytics ID", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "server_script_enabled", + "modified": "2020-09-23 13:19:31.392060", + "name": "server_script_enabled", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Enable Server Script", + "type": "Boolean" + }, + { + "description": "For MixPanel analytics on desk", + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "mixpanel_id", + "modified": "2020-09-23 13:20:06.665519", + "name": "mixpanel_id", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Mixpanel ID", + "type": "String" + }, + { + "description": "Utilise jingrow.debug_log via jingrow.log to enable different levels of logging. At level 2, shows queries and results run at every action.", + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "logging", + "modified": "2020-09-15 15:04:51.652890", + "name": "logging", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Logging", + "type": "Number" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "paypal_password", + "modified": "2020-09-15 14:08:54.811020", + "name": "paypal_password", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "PayPal Password", + "type": "Password" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "disable_scheduler", + "modified": "2020-09-23 13:19:29.813690", + "name": "disable_scheduler", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Disable Scheduler", + "type": "Boolean" + }, + { + "description": "Skips socket.IO setup on frontend", + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "disable_async", + "modified": "2020-09-23 13:19:29.424739", + "name": "disable_async", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Disable Async", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "mail_server", + "modified": "2020-09-23 13:20:09.314591", + "name": "mail_server", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Mail Server", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "pause_scheduler", + "modified": "2020-09-23 13:19:31.595626", + "name": "pause_scheduler", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Pause Scheduler", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "email_sender_name", + "modified": "2020-09-23 13:20:09.161227", + "name": "email_sender_name", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Email Sender Name", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "sandbox_api_secret", + "modified": "2020-09-15 14:08:01.105018", + "name": "sandbox_api_secret", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Sandbox API Secret", + "type": "Password" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "error_report_email", + "modified": "2020-09-23 13:20:09.049932", + "name": "error_report_email", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Error Report Email", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "mail_login", + "modified": "2020-09-23 13:20:08.959200", + "name": "mail_login", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Mail Login", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "sandbox_api_key", + "modified": "2020-09-23 13:20:08.873969", + "name": "sandbox_api_key", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Sandbox API Key", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "dropbox_broker_site", + "modified": "2020-09-23 13:20:06.896234", + "name": "dropbox_broker_site", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Dropbox Broker Site", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "dropbox_access_key", + "modified": "2020-09-23 13:20:08.191030", + "name": "dropbox_access_key", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Dropbox Access Key", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "mail_password", + "modified": "2020-09-15 16:24:58.138559", + "name": "mail_password", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Mail Password", + "type": "Password" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "sandbox_publishable_key", + "modified": "2020-09-23 13:20:08.046283", + "name": "sandbox_publishable_key", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Sandbox Publishable Key", + "type": "String" + }, + { + "description": "Load Data from local infile for mysql", + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "local_infile", + "modified": "2020-09-23 13:20:07.338841", + "name": "local_infile", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Local Infile", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "paypal_username", + "modified": "2020-09-23 13:20:07.720966", + "name": "paypal_username", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Paypal Username", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "disable_session_cache", + "modified": "2020-09-23 13:19:30.483940", + "name": "disable_session_cache", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Disable Session Cache", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "sandbox_api_password", + "modified": "2020-09-15 14:08:54.867414", + "name": "sandbox_api_password", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Sandbox API Password", + "type": "Password" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "always_use_account_email_id_as_sender", + "modified": "2020-09-23 13:19:31.704114", + "name": "always_use_account_email_id_as_sender", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Use Account Email ID As Sender", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "mail_port", + "modified": "2020-09-23 13:20:07.292141", + "name": "mail_port", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Mail Port", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "use_tls", + "modified": "2020-09-23 13:19:31.190810", + "name": "use_tls", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Use TLS", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "skip_setup_wizard", + "modified": "2020-09-23 13:19:30.992492", + "name": "skip_setup_wizard", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Skip Setup Wizard", + "type": "Boolean" + }, + { + "description": "", + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "ignore_csrf", + "modified": "2020-09-23 13:19:29.953144", + "name": "ignore_csrf", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Ignore CSRF", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "disable_global_search", + "modified": "2020-09-23 13:19:30.364289", + "name": "disable_global_search", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Disable Global Search", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "disable_error_snapshot", + "modified": "2020-09-23 13:19:30.291418", + "name": "disable_error_snapshot", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Disable Error Snapshot", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "sandbox_api_username", + "modified": "2020-09-23 13:20:06.789882", + "name": "sandbox_api_username", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Sandbox API Username", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "deny_multiple_sessions", + "modified": "2020-09-23 13:19:30.124051", + "name": "deny_multiple_sessions", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Deny Multiple Sessions", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "use_ssl", + "modified": "2020-09-23 13:19:30.603957", + "name": "use_ssl", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Use SSL", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "sandbox_signature", + "modified": "2020-09-23 13:20:07.221238", + "name": "sandbox_signature", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Sandbox Signature", + "type": "String" + }, + { + "description": "for RazorPay Settings", + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "converted_rupee_to_paisa", + "modified": "2020-09-23 13:19:29.608942", + "name": "converted_rupee_to_paisa", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Converted Rupee To Paisa", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "auto_email_id", + "modified": "2020-09-23 13:20:08.539873", + "name": "auto_email_id", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Auto Email ID", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 1, + "key": "host_name", + "modified": "2020-09-23 13:20:07.065755", + "name": "host_name", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Host Name", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "always_use_account_name_as_sender_name", + "modified": "2020-09-23 13:19:30.881690", + "name": "always_use_account_name_as_sender_name", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Use Account Name as Sender Name", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "paypal_signature", + "modified": "2020-09-23 13:20:06.930263", + "name": "paypal_signature", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Paypal Signature", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "disable_website_cache", + "modified": "2020-09-23 13:19:30.743853", + "name": "disable_website_cache", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Disable Website Cache", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "pop_timeout", + "modified": "2020-09-15 13:39:59.489299", + "name": "pop_timeout", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "POP Timeout", + "type": "Number" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "data_import_batch_size", + "modified": "2020-09-15 13:39:45.559768", + "name": "data_import_batch_size", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Batch Size for Data Import", + "type": "Number" + }, + { + "description": "Key used to encrypt Passwords", + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "encryption_key", + "modified": "2020-09-15 14:54:18.042444", + "name": "encryption_key", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Encryption Key", + "type": "Password" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "mute_emails", + "modified": "2020-09-23 13:19:29.237748", + "name": "mute_emails", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Mute Emails", + "type": "Boolean" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 0, + "key": "max_file_size", + "modified": "2020-09-15 14:56:18.188966", + "name": "max_file_size", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Max File Size", + "type": "Number" + }, + { + "description": "At FC, we use this to manage site usages.", + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 1, + "key": "rate_limit", + "modified": "2020-09-18 15:06:55.725912", + "name": "rate_limit", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Rate Limit", + "type": "JSON" + }, + { + "description": "We use this to suspend or deactivate sites in FC.", + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 1, + "key": "maintenance_mode", + "modified": "2020-09-23 13:19:29.122411", + "name": "maintenance_mode", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Enable Maintenance Mode", + "type": "Boolean" + }, + { + "description": "Maintains a list of domains for site", + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 1, + "key": "domains", + "modified": "2020-09-30 11:04:28.883458", + "name": "domains", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Domains", + "type": "JSON" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 1, + "key": "db_name", + "modified": "2023-04-10 15:09:26.815014", + "name": "db_name", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Database Name", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 1, + "key": "db_host", + "modified": "2023-04-10 15:09:26.815014", + "name": "db_host", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Database Host", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 1, + "key": "db_password", + "modified": "2023-04-10 15:09:26.815014", + "name": "db_password", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Database Password", + "type": "String" + }, + { + "description": null, + "docstatus": 0, + "pagetype": "Site Config Key", + "internal": 1, + "key": "db_type", + "modified": "2023-04-10 15:09:26.815014", + "name": "db_type", + "parent": null, + "parentfield": null, + "parenttype": null, + "title": "Database Type", + "type": "String" + } +] \ No newline at end of file diff --git a/jcloud/fixtures/site_config_key_blacklist.json b/jcloud/fixtures/site_config_key_blacklist.json new file mode 100644 index 0000000..9cf9a7d --- /dev/null +++ b/jcloud/fixtures/site_config_key_blacklist.json @@ -0,0 +1,210 @@ +[ + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "ssl_certificate", + "modified": "2020-10-01 21:10:07.925888", + "name": "ssl_certificate", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "ssl_certificate_key", + "modified": "2020-10-01 21:10:31.635305", + "name": "ssl_certificate_key", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "jingrowcloud_url", + "modified": "2020-09-15 13:02:11.917220", + "name": "jingrowcloud_url", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "db_ssl_key", + "modified": "2020-09-15 13:02:12.019502", + "name": "db_ssl_key", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "db_password", + "modified": "2020-09-15 13:02:12.032911", + "name": "db_password", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "monitor", + "modified": "2020-09-15 13:02:12.044695", + "name": "monitor", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "root_login", + "modified": "2020-09-15 14:47:30.453481", + "name": "root_login", + "reason": "only used at postgres setup_db #L41...else used as flags" + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "db_port", + "modified": "2020-09-15 13:02:12.066262", + "name": "db_port", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "webserver_port", + "modified": "2020-09-15 13:02:12.078783", + "name": "webserver_port", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "restart_systemd_on_update", + "modified": "2020-09-15 13:02:12.088768", + "name": "restart_systemd_on_update", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "db_name", + "modified": "2020-09-15 13:02:12.098600", + "name": "db_name", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "db_ssl_cert", + "modified": "2020-09-15 13:02:12.111550", + "name": "db_ssl_cert", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "db_ssl_ca", + "modified": "2020-09-15 13:02:12.121237", + "name": "db_ssl_ca", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "keep_backups_for_hours", + "modified": "2020-09-15 14:48:35.698767", + "name": "keep_backups_for_hours", + "reason": "retain backups while deleting old ones : BackupGenerator" + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "socketio_port", + "modified": "2020-09-15 13:02:12.142390", + "name": "socketio_port", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "db_type", + "modified": "2020-09-15 13:02:12.164240", + "name": "db_type", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "db_host", + "modified": "2020-09-15 13:02:12.173858", + "name": "db_host", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "http_port", + "modified": "2020-09-15 13:02:12.183172", + "name": "http_port", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "developer_mode", + "modified": "2020-09-15 13:02:12.202427", + "name": "developer_mode", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "root_password", + "modified": "2020-09-15 14:47:48.878756", + "name": "root_password", + "reason": "Used under mysql and postgres setup_db methods" + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "allow_tests", + "modified": "2020-09-15 13:02:12.228708", + "name": "allow_tests", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "install_apps", + "modified": "2020-09-15 14:48:13.132821", + "name": "install_apps", + "reason": "apps to install at site creation" + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "admin_password", + "modified": "2020-09-15 13:02:12.250826", + "name": "admin_password", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "restart_supervisor_on_update", + "modified": "2020-09-15 13:02:12.259476", + "name": "restart_supervisor_on_update", + "reason": null + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "rds_db", + "modified": "2020-09-15 14:47:00.852841", + "name": "rds_db", + "reason": "for only granting certain perms on databases. named because of AWS RDS?" + }, + { + "docstatus": 0, + "pagetype": "Site Config Key Blacklist", + "key": "rate_limit", + "modified": "2020-09-18 11:54:37.058867", + "name": "rate_limit", + "reason": null + } +] \ No newline at end of file diff --git a/jcloud/hooks.py b/jcloud/hooks.py new file mode 100644 index 0000000..bb5cccd --- /dev/null +++ b/jcloud/hooks.py @@ -0,0 +1,421 @@ +from jcloud.api.account import get_jingrow_io_auth_url + +from . import __version__ as app_version + +app_name = "jcloud" +app_title = "Jcloud" +app_publisher = "Jingrow" +app_description = "Managed Jingrow Hosting" +app_icon = "octicon octicon-rocket" +app_color = "grey" +app_email = "aditya@framework.jingrow.com" +app_license = "GNU Affero General Public License v3.0" +version = app_version + +# Includes in +# ------------------ + +# include js, css files in header of desk.html +# app_include_css = "/assets/jcloud/css/jcloud.css" +# app_include_js = "/assets/jcloud/js/jcloud.js" +app_include_js = [ + "jcloud.bundle.js", +] + +# include js, css files in header of web template +# web_include_css = "/assets/jcloud/css/jcloud.css" +# web_include_js = "/assets/jcloud/js/jcloud.js" + +# include js in page +# page_js = {"page" : "public/js/file.js"} + +# include js in pagetype views +# pagetype_js = {"pagetype" : "public/js/pagetype.js"} +# pagetype_list_js = {"pagetype" : "public/js/pagetype_list.js"} +# pagetype_tree_js = {"pagetype" : "public/js/pagetype_tree.js"} +# pagetype_calendar_js = {"pagetype" : "public/js/pagetype_calendar.js"} + +# Home Pages +# ---------- + +# application home page (will override Website Settings) +# home_page = "login" + +# website user home page (by Role) +# role_home_page = { +# "Role": "home_page" +# } + +# Website user home page (by function) +# get_website_user_home_page = "jcloud.utils.get_home_page" + +# Generators +# ---------- + +# automatically create page for each record of this pagetype +# website_generators = ["Web Page"] + +look_for_sidebar_json = True + +base_template_map = { + r"docs.*": "templates/pg.html", + r"internal.*": "templates/pg.html", +} + +update_website_context = ["jcloud.overrides.update_website_context"] + +website_route_rules = [ + {"from_route": "/dashboard/", "to_route": "dashboard"}, +] + +website_redirects = [ + {"source": "/dashboard/f-login", "target": get_jingrow_io_auth_url() or "/"}, + { + "source": "/suspended-site", + "target": "/api/method/jcloud.api.handle_suspended_site_redirection", + }, + {"source": "/f-login", "target": "/dashboard/f-login"}, + {"source": "/signup", "target": "/jerp/signup"}, +] + +email_css = ["/assets/jcloud/css/email.css"] + +jinja = { + "filters": ["jcloud.jcloud.pagetype.marketplace_app.utils.number_k_format"], + "methods": ["jcloud.utils.get_country_info"], +} + +# Installation +# ------------ + +# before_install = "jcloud.install.before_install" +after_install = "jcloud.install.after_install" +after_migrate = ["jcloud.api.account.clear_country_list_cache", "jcloud.sanity.checks"] + +# Desk Notifications +# ------------------ +# See jingrow.core.notifications.get_notification_config + +notification_config = "jcloud.notifications.get_notification_config" + +# Permissions +# ----------- +# Permissions evaluated in scripted ways + +permission_query_conditions = { + "Site": "jcloud.jcloud.pagetype.site.site.get_permission_query_conditions", + "Site Domain": ("jcloud.jcloud.pagetype.site_domain.site_domain.get_permission_query_conditions"), + "TLS Certificate": "jcloud.jcloud.pagetype.tls_certificate.tls_certificate.get_permission_query_conditions", + "Team": "jcloud.jcloud.pagetype.team.team.get_permission_query_conditions", + "Subscription": ("jcloud.jcloud.pagetype.subscription.subscription.get_permission_query_conditions"), + "Stripe Payment Method": "jcloud.jcloud.pagetype.stripe_payment_method.stripe_payment_method.get_permission_query_conditions", + "Balance Transaction": "jcloud.jcloud.pagetype.balance_transaction.balance_transaction.get_permission_query_conditions", + "Invoice": "jcloud.jcloud.pagetype.invoice.invoice.get_permission_query_conditions", + "App Source": ("jcloud.jcloud.pagetype.app_source.app_source.get_permission_query_conditions"), + "App Release": ("jcloud.jcloud.pagetype.app_release.app_release.get_permission_query_conditions"), + "Release Group": "jcloud.jcloud.pagetype.release_group.release_group.get_permission_query_conditions", + "Deploy Candidate": "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.get_permission_query_conditions", + "Deploy Candidate Difference": "jcloud.jcloud.pagetype.deploy_candidate_difference.deploy_candidate_difference.get_permission_query_conditions", + "Deploy": "jcloud.jcloud.pagetype.deploy.deploy.get_permission_query_conditions", + "Bench": "jcloud.jcloud.pagetype.bench.bench.get_permission_query_conditions", + "Server": "jcloud.jcloud.pagetype.server.server.get_permission_query_conditions", + "Database Server": "jcloud.jcloud.pagetype.database_server.database_server.get_permission_query_conditions", + "Virtual Machine": "jcloud.jcloud.pagetype.virtual_machine.virtual_machine.get_permission_query_conditions", + "Jcloud Webhook": "jcloud.jcloud.pagetype.jcloud_webhook.jcloud_webhook.get_permission_query_conditions", + "Jcloud Webhook Log": "jcloud.jcloud.pagetype.jcloud_webhook_log.jcloud_webhook_log.get_permission_query_conditions", + "SQL Playground Log": "jcloud.jcloud.pagetype.sql_playground_log.sql_playground_log.get_permission_query_conditions", + "Site Database User": "jcloud.jcloud.pagetype.site_database_user.site_database_user.get_permission_query_conditions", +} +has_permission = { + "Site": "jcloud.overrides.has_permission", + "Site Domain": "jcloud.overrides.has_permission", + "TLS Certificate": "jcloud.overrides.has_permission", + "Team": "jcloud.jcloud.pagetype.team.team.has_permission", + "Subscription": "jcloud.overrides.has_permission", + "Stripe Payment Method": "jcloud.overrides.has_permission", + "Balance Transaction": "jcloud.overrides.has_permission", + "Invoice": "jcloud.jcloud.pagetype.invoice.invoice.has_permission", + "App Source": "jcloud.overrides.has_permission", + "App Release": "jcloud.jcloud.pagetype.app_release.app_release.has_permission", + "Release Group": "jcloud.overrides.has_permission", + "Deploy Candidate": "jcloud.overrides.has_permission", + "Deploy Candidate Difference": "jcloud.overrides.has_permission", + "Deploy": "jcloud.overrides.has_permission", + "Bench": "jcloud.overrides.has_permission", + "Server": "jcloud.overrides.has_permission", + "Database Server": "jcloud.overrides.has_permission", + "Jcloud Webhook": "jcloud.overrides.has_permission", + "Jcloud Webhook Log": "jcloud.overrides.has_permission", + "Jcloud Webhook Attempt": "jcloud.jcloud.pagetype.jcloud_webhook_attempt.jcloud_webhook_attempt.has_permission", + "SQL Playground Log": "jcloud.overrides.has_permission", + "Site Database User": "jcloud.overrides.has_permission", +} + +# Document Events +# --------------- +# Hook on document methods and events + +pg_events = { + "Stripe Webhook Log": { + "after_insert": [ + "jcloud.jcloud.pagetype.invoice.stripe_webhook_handler.handle_stripe_webhook_events", + "jcloud.jcloud.pagetype.team.team.process_stripe_webhook", + ], + }, + "Address": {"validate": "jcloud.api.billing.validate_gst"}, + "Site": {"before_insert": "jcloud.jcloud.pagetype.team.team.validate_site_creation"}, + "Marketplace App Subscription": { + "on_update": "jcloud.jcloud.pagetype.storage_integration_subscription.storage_integration_subscription.create_after_insert", + }, +} + +# Scheduled Tasks +# --------------- + +scheduler_events = { + "weekly_long": ["jcloud.jcloud.pagetype.marketplace_app.events.auto_review_for_missing_steps"], + "daily": [ + "jcloud.experimental.pagetype.referral_bonus.referral_bonus.credit_referral_bonuses", + "jcloud.jcloud.pagetype.log_counter.log_counter.record_counts", + "jcloud.jcloud.pagetype.incident.incident.notify_ignored_servers", + "jcloud.jcloud.pagetype.site.site.send_renew_notification", + ], + "daily_long": [ + "jcloud.jcloud.audit.check_bench_fields", + "jcloud.jcloud.audit.check_offsite_backups", + "jcloud.jcloud.audit.check_app_server_replica_benches", + "jcloud.jcloud.pagetype.invoice.invoice.finalize_unpaid_prepaid_credit_invoices", + "jcloud.jcloud.pagetype.bench.bench.sync_analytics", + "jcloud.saas.pagetype.saas_app_subscription.saas_app_subscription.suspend_prepaid_subscriptions", + "jcloud.jcloud.pagetype.payout_order.payout_order.create_marketplace_payout_orders", + "jcloud.jcloud.pagetype.root_domain.root_domain.cleanup_cname_records", + "jcloud.jcloud.pagetype.remote_file.remote_file.poll_file_statuses", + "jcloud.jcloud.pagetype.site_domain.site_domain.update_dns_type", + "jcloud.jcloud.pagetype.jcloud_webhook_log.jcloud_webhook_log.clean_logs_older_than_24_hours", + "jcloud.jcloud.pagetype.virtual_disk_snapshot.virtual_disk_snapshot.sync_all_snapshots_from_aws", + ], + "hourly": [ + "jcloud.jcloud.pagetype.site.backups.cleanup_local", + "jcloud.jcloud.pagetype.agent_job.agent_job.update_job_step_status", + "jcloud.jcloud.pagetype.bench.bench.archive_obsolete_benches", + "jcloud.jcloud.pagetype.site.backups.schedule_for_sites_with_backup_time", + "jcloud.jcloud.pagetype.tls_certificate.tls_certificate.renew_tls_certificates", + "jcloud.saas.pagetype.product_trial_request.product_trial_request.expire_long_pending_trial_requests", + ], + "hourly_long": [ + "jcloud.jcloud.pagetype.release_group.release_group.prune_servers_without_sites", + "jcloud.jcloud.pagetype.server.server.scale_workers", + "jcloud.jcloud.pagetype.usage_record.usage_record.link_unlinked_usage_records", + "jcloud.jcloud.pagetype.bench.bench.sync_benches", + "jcloud.jcloud.pagetype.invoice.invoice.finalize_draft_invoices", + "jcloud.jcloud.pagetype.app.app.poll_new_releases", + "jcloud.jcloud.pagetype.agent_job.agent_job.fail_old_jobs", + "jcloud.jcloud.pagetype.site_update.site_update.mark_stuck_updates_as_fatal", + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.cleanup_build_directories", + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.delete_draft_candidates", + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.check_builds_status", + "jcloud.jcloud.pagetype.virtual_machine.virtual_machine.snapshot_virtual_machines", + "jcloud.jcloud.pagetype.virtual_disk_snapshot.virtual_disk_snapshot.delete_old_snapshots", + "jcloud.jcloud.pagetype.app_release.app_release.cleanup_unused_releases", + "jcloud.jcloud.pagetype.jcloud_webhook.jcloud_webhook.auto_disable_high_delivery_failure_webhooks", + "jcloud.saas.pagetype.product_trial.product_trial.sync_product_site_users", + ], + "all": [ + "jcloud.auth.flush", + "jcloud.jcloud.pagetype.site.sync.sync_setup_wizard_status", + "jcloud.jcloud.pagetype.site.archive.archive_suspended_trial_sites", + "jcloud.jcloud.pagetype.agent_job.agent_job.flush", + ], + "cron": { + "1-59/2 * * * *": [ + "jcloud.jcloud.pagetype.incident.incident.validate_incidents", + ], + "*/2 * * * *": [ + "jcloud.jcloud.pagetype.incident.incident.resolve_incidents", + ], + "0 4 * * *": [ + "jcloud.jcloud.pagetype.site.backups.cleanup_offsite", + "jcloud.jcloud.cleanup.unlink_remote_files_from_site", + ], + "10 0 * * *": [ + "jcloud.jcloud.audit.check_backup_records", + ], + "0 3 * * *": [ + "jcloud.jcloud.pagetype.drip_email.drip_email.send_drip_emails", + ], + "* * * * * 0/5": [ + "jcloud.jcloud.pagetype.agent_job.agent_job.poll_pending_jobs", + "jcloud.jcloud.pagetype.jcloud_webhook_log.jcloud_webhook_log.process", + "jcloud.jcloud.pagetype.telegram_message.telegram_message.send_telegram_message", + ], + "0 */6 * * *": [ + "jcloud.jcloud.pagetype.server.server.cleanup_unused_files", + "jcloud.jcloud.pagetype.razorpay_payment_record.razorpay_payment_record.fetch_pending_payment_orders", + ], + "30 * * * *": ["jcloud.jcloud.pagetype.agent_job.agent_job.suspend_sites"], + "*/15 * * * *": [ + "jcloud.jcloud.pagetype.site_update.site_update.schedule_updates", + "jcloud.jcloud.pagetype.drip_email.drip_email.send_welcome_email", + "jcloud.jcloud.pagetype.site.backups.schedule", + "jcloud.jcloud.pagetype.site_update.site_update.run_scheduled_updates", + "jcloud.jcloud.pagetype.site_migration.site_migration.run_scheduled_migrations", + "jcloud.jcloud.pagetype.version_upgrade.version_upgrade.run_scheduled_upgrades", + "jcloud.jcloud.pagetype.subscription.subscription.create_usage_records", + "jcloud.jcloud.pagetype.virtual_machine.virtual_machine.sync_virtual_machines", + "jcloud.jcloud.pagetype.mariadb_stalk.mariadb_stalk.fetch_stalks", + ], + "*/5 * * * *": [ + "jcloud.jcloud.pagetype.version_upgrade.version_upgrade.update_from_site_update", + "jcloud.jcloud.pagetype.site_replication.site_replication.update_from_site", + "jcloud.jcloud.pagetype.virtual_disk_snapshot.virtual_disk_snapshot.sync_snapshots", + "jcloud.jcloud.pagetype.site.site.sync_sites_setup_wizard_complete_status", + ], + "* * * * *": [ + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.run_scheduled_builds", + "jcloud.jcloud.pagetype.agent_request_failure.agent_request_failure.remove_old_failures", + "jcloud.saas.pagetype.site_access_token.site_access_token.cleanup_expired_access_tokens", + ], + "*/10 * * * *": [ + "jcloud.saas.pagetype.product_trial.product_trial.replenish_standby_sites", + "jcloud.jcloud.pagetype.site.saas_pool.create", + ], + "*/30 * * * *": [ + "jcloud.jcloud.pagetype.site_update.scheduled_auto_updates.trigger", + "jcloud.jcloud.pagetype.team.suspend_sites.execute", + ], + "15,45 * * * *": [ + "jcloud.jcloud.pagetype.site.site_usages.update_cpu_usages", + "jcloud.jcloud.pagetype.site.site_usages.update_disk_usages", + ], + "15 2,4 * * *": [ + "jcloud.jcloud.pagetype.team_deletion_request.team_deletion_request.process_team_deletion_requests", + ], + "0 0 1 */3 *": ["jcloud.jcloud.pagetype.backup_restoration_test.backup_test.run_backup_restore_test"], + "0 8 * * *": [ + "jcloud.jcloud.pagetype.aws_savings_plan_recommendation.aws_savings_plan_recommendation.create", + "jcloud.jcloud.cleanup.reset_large_output_fields_from_ansible_tasks", + ], + "0 21 * * *": [ + "jcloud.jcloud.audit.billing_audit", + "jcloud.jcloud.audit.partner_billing_audit", + ], + "0 6 * * *": [ + "jcloud.jcloud.audit.suspend_sites_with_disabled_team", + "jcloud.jcloud.pagetype.tls_certificate.tls_certificate.retrigger_failed_wildcard_tls_callbacks", + "jcloud.jcloud.pagetype.aws_savings_plan_recommendation.aws_savings_plan_recommendation.refresh", + "jcloud.infrastructure.pagetype.ssh_access_audit.ssh_access_audit.run", + ], + }, +} + +deploy_hours = [1, 2, 3, 4, 5, 21, 22, 23] # Purposefully avoiding 0 + +fixtures = [ + "Agent Job Type", + "Jcloud Job Type", + "Jingrow Version", + "MariaDB Variable", + "Cloud Region", + {"dt": "Role", "filters": [["role_name", "like", "Jcloud%"]]}, + "Site Config Key Blacklist", + "Jcloud Method Permission", + "Bench Dependency", + "Server Storage Plan", + "Jcloud Webhook Event", +] +# Testing +# ------- + +before_tests = "jcloud.tests.before_test.execute" + +# Overriding Methods +# ------------------------------ +# +override_whitelisted_methods = {"upload_file": "jcloud.overrides.upload_file"} +# +# each overriding function accepts a `data` argument; +# generated from the base implementation of the pagetype dashboard, +# along with any modifications made in other Jingrow apps +# override_pagetype_dashboards = { +# "Task": "jcloud.task.get_dashboard_data" +# } + +override_pagetype_class = {"User": "jcloud.overrides.CustomUser"} + +on_session_creation = "jcloud.overrides.on_session_creation" +# on_logout = "jcloud.overrides.on_logout" + +before_request = "jcloud.overrides.before_request" +before_job = "jcloud.overrides.before_job" +after_job = "jcloud.overrides.after_job" + +# Data Deletion Privacy Docs + +user_data_fields = [ + {"pagetype": "Team", "strict": True}, +] + +auth_hooks = ["jcloud.auth.hook"] + +page_renderer = ["jcloud.metrics.MetricsRenderer"] + +export_python_type_annotations = True + + +# These are used for some business logic, they should be manually evicted. +__persistent_cache_keys = [ + "agent-jobs", + "monitor-transactions", + "google_oauth_flow*", + "fc_oauth_state*", + "one_time_login_key*", + "jcloud-auth-logs", + "rl:*", +] + +# `jingrow.rename_pg` erases all caches, this hook preserves some of them. +# Note: +# - These are only "most used" cache keys. This lessens the impact of renames but doesn't eliminate them. +# - Adding more keys here will slow down `jingrow.clear_cache` but it's "rare" enough. +# - This also means that other "valid" jingrow.clear_cache() usage won't clear these keys! +# - Use jingrow.cache.flushall() instead. +persistent_cache_keys = [ + *__persistent_cache_keys, + "agent_job_step_output", + "all_apps", + "app_hooks", + "assets_json", + "assignment_rule_map", + "bootinfo", + "builder.builder*", # path resolution, it has its own cache eviction. + "db_tables", + "defaults", + "pagetype_form_meta", + "pagetype_meta", + "doctypes_with_web_view", + "document_cache::*", + "document_naming_rule_map", + "domain_restricted_doctypes", + "domain_restricted_pages", + "energy_point_rule_map", + "jingrow.utils.scheduler.schedule_jobs_based_on_activity*", # dormant checks + "jingrow.website.page_renderers*", # FW's routing + "home_page", + "information_schema:counts", + "installed_app_modules", + "ip_country_map", + "is_table", + "languages", + "last_db_session_update", + "marketplace_apps", + "merged_translations", + "metadata_version", + "server_script_map", # Routing and actual server scripts + "session", + "table_columns", + "website_page", + "website_route_rules", +] + +before_migrate = ["jcloud.overrides.before_after_migrate"] +after_migrate = ["jcloud.overrides.before_after_migrate"] diff --git a/jcloud/infrastructure/__init__.py b/jcloud/infrastructure/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/__init__.py b/jcloud/infrastructure/pagetype/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit/__init__.py b/jcloud/infrastructure/pagetype/ssh_access_audit/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit/ssh_access_audit.js b/jcloud/infrastructure/pagetype/ssh_access_audit/ssh_access_audit.js new file mode 100644 index 0000000..4367d99 --- /dev/null +++ b/jcloud/infrastructure/pagetype/ssh_access_audit/ssh_access_audit.js @@ -0,0 +1,23 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on("SSH Access Audit", { + refresh(frm) { + [ + [__('Run'), 'run', frm.pg.status === 'Pending'], + ].forEach(([label, method, condition]) => { + if (condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => frm.call(method).then(() => frm.refresh()), + ); + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit/ssh_access_audit.json b/jcloud/infrastructure/pagetype/ssh_access_audit/ssh_access_audit.json new file mode 100644 index 0000000..f8f2b89 --- /dev/null +++ b/jcloud/infrastructure/pagetype/ssh_access_audit/ssh_access_audit.json @@ -0,0 +1,121 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2025-01-29 15:13:17.999712", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "status", + "total_hosts", + "reachable_hosts", + "column_break_knsp", + "total_violations", + "user_violations", + "inventory", + "section_break_sqln", + "violations", + "suspicious_users", + "hosts" + ], + "fields": [ + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "inventory", + "fieldtype": "Code", + "hidden": 1, + "label": "Inventory", + "read_only": 1 + }, + { + "fieldname": "violations", + "fieldtype": "Table", + "label": "Violations", + "options": "SSH Access Audit Violation", + "read_only": 1 + }, + { + "fieldname": "hosts", + "fieldtype": "Table", + "label": "Hosts", + "options": "SSH Access Audit Host", + "read_only": 1 + }, + { + "fieldname": "column_break_knsp", + "fieldtype": "Column Break" + }, + { + "fieldname": "total_hosts", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Total Hosts", + "read_only": 1 + }, + { + "fieldname": "total_violations", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Total Violations", + "read_only": 1 + }, + { + "fieldname": "user_violations", + "fieldtype": "Int", + "label": "User Violations", + "read_only": 1 + }, + { + "fieldname": "section_break_sqln", + "fieldtype": "Section Break" + }, + { + "depends_on": "eval:pg.suspicious_users !== \"[]\"", + "fieldname": "suspicious_users", + "fieldtype": "Code", + "label": "Suspicious Users", + "options": "JSON", + "read_only": 1 + }, + { + "fieldname": "reachable_hosts", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Reachable Hosts", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-01-30 18:16:46.869475", + "modified_by": "Administrator", + "module": "Infrastructure", + "name": "SSH Access Audit", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit/ssh_access_audit.py b/jcloud/infrastructure/pagetype/ssh_access_audit/ssh_access_audit.py new file mode 100644 index 0000000..2d16e00 --- /dev/null +++ b/jcloud/infrastructure/pagetype/ssh_access_audit/ssh_access_audit.py @@ -0,0 +1,351 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json +import shutil +from functools import cached_property + +import jingrow +from ansible import constants, context +from ansible.executor.task_queue_manager import TaskQueueManager +from ansible.inventory.manager import InventoryManager +from ansible.module_utils.common.collections import ImmutableDict +from ansible.parsing.dataloader import DataLoader +from ansible.playbook.play import Play +from ansible.plugins.callback import CallbackBase +from ansible.vars.manager import VariableManager +from jingrow.model.document import Document + +from jcloud.utils import reconnect_on_failure + +SERVER_TYPES = [ + "Proxy Server", + "Server", + "Database Server", + "Analytics Server", + "Log Server", + "Monitor Server", + "Registry Server", + "Trace Server", +] + + +class SSHAccessAudit(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.infrastructure.pagetype.ssh_access_audit_host.ssh_access_audit_host import ( + SSHAccessAuditHost, + ) + from jcloud.infrastructure.pagetype.ssh_access_audit_violation.ssh_access_audit_violation import ( + SSHAccessAuditViolation, + ) + + hosts: DF.Table[SSHAccessAuditHost] + inventory: DF.Code | None + name: DF.Int | None + reachable_hosts: DF.Int + status: DF.Literal["Pending", "Running", "Success", "Failure"] + suspicious_users: DF.Code | None + total_hosts: DF.Int + total_violations: DF.Int + user_violations: DF.Int + violations: DF.Table[SSHAccessAuditViolation] + # end: auto-generated types + + def before_insert(self): + self.set_inventory() + + @jingrow.whitelist() + def run(self): + jingrow.only_for("System Manager") + self.status = "Running" + self.save() + jingrow.enqueue_pg( + self.pagetype, self.name, "_run", queue="long", timeout=3600, enqueue_after_commit=True + ) + + def _run(self): + self.fetch_keys_from_servers() + self.check_key_violations() + self.check_user_violations() + self.set_statistics() + self.set_status() + self.save() + + def fetch_keys_from_servers(self): + try: + ad_hoc = AnsibleAdHoc(sources=self.inventory) + hosts = ad_hoc.run() + sorted_hosts = sorted(hosts, key=lambda host: self.inventory.index(host["host"])) + for host in sorted_hosts: + self.append("hosts", host) + except Exception: + import traceback + + traceback.print_exc() + + def set_inventory(self): + all_servers = [] + domain = jingrow.db.get_value("Jcloud Settings", None, "domain") + for server_type in SERVER_TYPES: + # Skip self-hosted servers + filters = {"status": "Active", "domain": domain} + meta = jingrow.get_meta(server_type) + if meta.has_field("cluster"): + filters["cluster"] = ("!=", "Hybrid") + + if meta.has_field("is_self_hosted"): + filters["is_self_hosted"] = False + + servers = jingrow.get_all(server_type, filters=filters, pluck="name", order_by="creation asc") + all_servers.extend(servers) + + all_servers.extend(self.get_self_inventory()) + self.inventory = ",".join(all_servers) + + def get_self_inventory(self): + # Jcloud should audit itself + servers = [jingrow.local.site, f"db.{jingrow.local.site}"] + if jingrow.conf.replica_host: + servers.append(f"db2.{jingrow.local.site}") + return servers + + def get_acceptable_key_fields(self): + fields = [] + for server_type in SERVER_TYPES: + meta = jingrow.get_meta(server_type) + for key_field in ["root_public_key", "jingrow_public_key"]: + if meta.has_field(key_field): + fields.append([server_type, key_field]) + + fields.append(["SSH Key", "public_key"]) + return fields + + def get_known_key_fields(self): + fields = self.get_acceptable_key_fields() + fields.append(["User SSH Key", "ssh_public_key"]) + return fields + + @cached_property + def acceptable_keys(self): + keys = {} + domain = jingrow.db.get_value("Jcloud Settings", None, "domain") + fields = self.get_acceptable_key_fields() + for pagetype, field in fields: + filters = {} + if pagetype.endswith("Server"): # Skip self-hosted servers + filters = {"status": "Active", "domain": domain} + + meta = jingrow.get_meta(pagetype) + if meta.has_field("cluster"): + filters["cluster"] = ("!=", "Hybrid") + + if meta.has_field("is_self_hosted"): + filters["is_self_hosted"] = False + + documents = jingrow.get_all(pagetype, filters=filters, fields=["name", field]) + for document in documents: + key_string = document.get(field) + if not key_string: + continue + key = _extract_key_from_key_string(key_string) + keys[key] = { + "key_pagetype": pagetype, + "key_document": document.name, + "key_field": field, + } + return keys + + @cached_property + def known_keys(self): + keys = {} + fields = self.get_known_key_fields() + for pagetype, field in fields: + documents = jingrow.get_all(pagetype, fields=["name", field]) + for document in documents: + key_string = document.get(field) + if not key_string: + continue + key = _extract_key_from_key_string(key_string) + keys[key] = { + "key_pagetype": pagetype, + "key_document": document.name, + "key_field": field, + } + return keys + + def check_key_violations(self): + known_keys = self.known_keys + acceptable_keys = self.acceptable_keys + for host in self.hosts: + if not host.users: + continue + users = json.loads(host.users) + for user in users: + for key in user["keys"]: + if key in acceptable_keys: + continue + violation = {"host": host.host, "user": user["user"], "key": key} + if key in known_keys: + violation.update(known_keys[key]) + self.append("violations", violation) + + def check_user_violations(self): + suspicious_users = [] + acceptable_users = set(["jingrow", "root"]) + for host in self.hosts: + if not host.users: + continue + users = json.loads(host.users) + for user in users: + if user["user"] not in acceptable_users: + suspicious_users.append((host.host, user["user"])) + + self.suspicious_users = json.dumps(suspicious_users, indent=1, sort_keys=True) + + def set_statistics(self): + self.total_hosts = len(self.hosts) + self.reachable_hosts = len([host for host in self.hosts if host.status == "Completed"]) + self.total_violations = len(self.violations) + self.user_violations = len(json.loads(self.suspicious_users)) + + def set_status(self): + if self.violations or self.user_violations: + self.status = "Failure" + else: + self.status = "Success" + + +class AnsibleAdHoc: + def __init__(self, sources): + constants.HOST_KEY_CHECKING = False + context.CLIARGS = ImmutableDict( + become_method="sudo", + check=False, + connection="ssh", + extra_vars=[], + remote_user="root", + start_at_task=None, + syntax=False, + verbosity=3, + ) + + self.loader = DataLoader() + self.passwords = dict({}) + + self.inventory = InventoryManager(loader=self.loader, sources=sources) + self.variable_manager = VariableManager(loader=self.loader, inventory=self.inventory) + + self.callback = AnsibleCallback() + + def run(self): + self.tasks = [ + { + "action": {"module": "shell", "args": "grep '/bin/.*sh' /etc/passwd | cut -f 1,6 -d ':'"}, + "register": "users", + }, + { + "action": {"module": "shell", "args": "cat {{item.split(':')[1]}}/.ssh/authorized_keys"}, + "ignore_errors": True, + "with_items": "{{users.stdout_lines}}", + }, + ] + source = dict( + name="Ansible Play", + hosts="all", + gather_facts="no", + tasks=self.tasks, + ) + + self.play = Play().load(source, variable_manager=self.variable_manager, loader=self.loader) + + tqm = TaskQueueManager( + inventory=self.inventory, + variable_manager=self.variable_manager, + loader=self.loader, + passwords=self.passwords, + stdout_callback=self.callback, + forks=16, + ) + + try: + tqm.run(self.play) + finally: + tqm.cleanup() + self.loader.cleanup_all_tmp_files() + + shutil.rmtree(constants.DEFAULT_LOCAL_TMP, True) + + return list(self.callback.hosts.values()) + + +class AnsibleCallback(CallbackBase): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.hosts = {} + + def v2_runner_on_ok(self, result, *args, **kwargs): + self.update_task("Completed", result) + + def v2_runner_on_failed(self, result, *args, **kwargs): + self.update_task("Completed", result) + + def v2_runner_on_unreachable(self, result): + self.update_task("Unreachable", result) + + @reconnect_on_failure() + def update_task(self, status, result): + host, raw_result = self.parse_result(result) + if raw_result: + # Only update on the last task (that has results) + users = [] + for row in raw_result: + user = { + "user": row["item"].split(":")[0], + "command": row["cmd"], + "keys": [], + "raw_keys": [], + } + for key in row["stdout_lines"]: + stripped_key = key.strip() + if stripped_key and not stripped_key.startswith("#"): + user["raw_keys"].append(key) + user["keys"].append(_extract_key_from_key_string(stripped_key)) + + users.append(user) + + self.hosts[host] = { + "users": json.dumps(users, indent=1, sort_keys=True), + "host": host, + "status": status, + } + + elif status == "Unreachable": + self.hosts[host] = {"host": host, "status": status} + + def parse_result(self, result): + host = result._host.get_name() + _result = result._result + return host, _result.get("results") + + +def _extract_key_from_key_string(key_string): + try: + key_type, key, *_ = key_string.split() + return f"{key_type} {key}" + except Exception: + return key_string + + +def run(): + audit = jingrow.new_pg("SSH Access Audit") + audit.insert() + audit.run() diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit/test_ssh_access_audit.py b/jcloud/infrastructure/pagetype/ssh_access_audit/test_ssh_access_audit.py new file mode 100644 index 0000000..f26ead6 --- /dev/null +++ b/jcloud/infrastructure/pagetype/ssh_access_audit/test_ssh_access_audit.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestSSHAccessAudit(UnitTestCase): + """ + Unit tests for SSHAccessAudit. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestSSHAccessAudit(IntegrationTestCase): + """ + Integration tests for SSHAccessAudit. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit_host/__init__.py b/jcloud/infrastructure/pagetype/ssh_access_audit_host/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit_host/ssh_access_audit_host.json b/jcloud/infrastructure/pagetype/ssh_access_audit_host/ssh_access_audit_host.json new file mode 100644 index 0000000..e3b0adb --- /dev/null +++ b/jcloud/infrastructure/pagetype/ssh_access_audit_host/ssh_access_audit_host.json @@ -0,0 +1,53 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2025-01-29 17:22:44.021043", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "host", + "status", + "users" + ], + "fields": [ + { + "columns": 7, + "fieldname": "host", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Host", + "read_only": 1, + "reqd": 1 + }, + { + "columns": 3, + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nCompleted\nUnreachable", + "read_only": 1, + "reqd": 1 + }, + { + "columns": 6, + "fieldname": "users", + "fieldtype": "Code", + "label": "Users", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-01-29 17:58:47.917689", + "modified_by": "Administrator", + "module": "Infrastructure", + "name": "SSH Access Audit Host", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit_host/ssh_access_audit_host.py b/jcloud/infrastructure/pagetype/ssh_access_audit_host/ssh_access_audit_host.py new file mode 100644 index 0000000..a55cfdd --- /dev/null +++ b/jcloud/infrastructure/pagetype/ssh_access_audit_host/ssh_access_audit_host.py @@ -0,0 +1,28 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +# import jingrow +from __future__ import annotations + +from jingrow.model.document import Document + + +class SSHAccessAuditHost(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + host: DF.Data + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + status: DF.Literal["Pending", "Completed", "Unreachable"] + users: DF.Code | None + # end: auto-generated types + + pass diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit_violation/__init__.py b/jcloud/infrastructure/pagetype/ssh_access_audit_violation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit_violation/ssh_access_audit_violation.json b/jcloud/infrastructure/pagetype/ssh_access_audit_violation/ssh_access_audit_violation.json new file mode 100644 index 0000000..bea3569 --- /dev/null +++ b/jcloud/infrastructure/pagetype/ssh_access_audit_violation/ssh_access_audit_violation.json @@ -0,0 +1,83 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2025-01-29 15:54:00.575633", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "host", + "user", + "key", + "column_break_sigr", + "key_pagetype", + "key_document", + "key_field" + ], + "fields": [ + { + "columns": 2, + "fieldname": "host", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Host", + "read_only": 1 + }, + { + "fieldname": "column_break_sigr", + "fieldtype": "Column Break" + }, + { + "columns": 1, + "fieldname": "user", + "fieldtype": "Data", + "in_list_view": 1, + "label": "User", + "read_only": 1 + }, + { + "columns": 2, + "fieldname": "key", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Key", + "read_only": 1 + }, + { + "columns": 2, + "fieldname": "key_pagetype", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Key PageType", + "options": "PageType", + "read_only": 1 + }, + { + "columns": 2, + "fieldname": "key_document", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Key Document", + "options": "key_pagetype", + "read_only": 1 + }, + { + "fieldname": "key_field", + "fieldtype": "Data", + "label": "Key Field", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-01-30 17:37:44.830485", + "modified_by": "Administrator", + "module": "Infrastructure", + "name": "SSH Access Audit Violation", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/infrastructure/pagetype/ssh_access_audit_violation/ssh_access_audit_violation.py b/jcloud/infrastructure/pagetype/ssh_access_audit_violation/ssh_access_audit_violation.py new file mode 100644 index 0000000..9aba5d2 --- /dev/null +++ b/jcloud/infrastructure/pagetype/ssh_access_audit_violation/ssh_access_audit_violation.py @@ -0,0 +1,31 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +# import jingrow +from __future__ import annotations + +from jingrow.model.document import Document + + +class SSHAccessAuditViolation(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + host: DF.Data | None + key: DF.Code | None + key_pagetype: DF.Link | None + key_document: DF.DynamicLink | None + key_field: DF.Data | None + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + user: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/infrastructure/pagetype/virtual_disk_resize/__init__.py b/jcloud/infrastructure/pagetype/virtual_disk_resize/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/virtual_disk_resize/test_virtual_disk_resize.py b/jcloud/infrastructure/pagetype/virtual_disk_resize/test_virtual_disk_resize.py new file mode 100644 index 0000000..7e31cad --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_disk_resize/test_virtual_disk_resize.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestVirtualDiskResize(UnitTestCase): + """ + Unit tests for VirtualDiskResize. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestVirtualDiskResize(IntegrationTestCase): + """ + Integration tests for VirtualDiskResize. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/infrastructure/pagetype/virtual_disk_resize/virtual_disk_resize.js b/jcloud/infrastructure/pagetype/virtual_disk_resize/virtual_disk_resize.js new file mode 100644 index 0000000..4ec2a09 --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_disk_resize/virtual_disk_resize.js @@ -0,0 +1,25 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on("Virtual Disk Resize", { + refresh(frm) { + [ + [__('Start'), 'execute', frm.pg.status === 'Pending'], + [__('Force Continue'), 'force_continue', frm.pg.status === 'Failure'], + [__('Force Fail'), 'force_fail', frm.pg.status === 'Running'], + ].forEach(([label, method, condition]) => { + if (condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => frm.call(method).then(() => frm.refresh()), + ); + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/infrastructure/pagetype/virtual_disk_resize/virtual_disk_resize.json b/jcloud/infrastructure/pagetype/virtual_disk_resize/virtual_disk_resize.json new file mode 100644 index 0000000..c8e4fa3 --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_disk_resize/virtual_disk_resize.json @@ -0,0 +1,325 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2025-01-21 14:22:10.675196", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "virtual_machine", + "status", + "column_break_tjrh", + "start", + "end", + "duration", + "column_break_puhr", + "downtime_start", + "downtime_end", + "downtime_duration", + "volumes_section", + "column_break_qwbl", + "old_volume_id", + "old_volume_status", + "old_volume_size", + "old_volume_iops", + "old_volume_throughput", + "virtual_disk_snapshot", + "column_break_fzja", + "new_volume_id", + "new_volume_status", + "new_volume_size", + "new_volume_iops", + "new_volume_throughput", + "filesystems_section", + "filesystem_mount_point", + "filesystem_type", + "service", + "column_break_npqx", + "old_filesystem_uuid", + "old_filesystem_device", + "old_filesystem_size", + "old_filesystem_used", + "column_break_ublt", + "new_filesystem_uuid", + "new_filesystem_temporary_mount_point", + "section_break_mjhg", + "steps", + "section_break_nfeq", + "devices", + "filesystems" + ], + "fields": [ + { + "fieldname": "virtual_machine", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Virtual Machine", + "link_filters": "[[\"Virtual Machine\",\"status\",\"not in\",[\"Draft\",\"Terminated\",null]]]", + "options": "Virtual Machine", + "reqd": 1, + "set_only_once": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_tjrh", + "fieldtype": "Column Break" + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Duration", + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "section_break_mjhg", + "fieldtype": "Section Break", + "read_only": 1 + }, + { + "fieldname": "steps", + "fieldtype": "Table", + "label": "Steps", + "options": "Virtual Machine Migration Step", + "read_only": 1 + }, + { + "fieldname": "section_break_nfeq", + "fieldtype": "Section Break" + }, + { + "fieldname": "new_volume_id", + "fieldtype": "Data", + "label": "New Volume ID", + "read_only": 1 + }, + { + "fieldname": "new_volume_status", + "fieldtype": "Select", + "label": "New Volume Status", + "options": "Unprovisioned\nAttached", + "read_only": 1 + }, + { + "fieldname": "new_volume_size", + "fieldtype": "Int", + "label": "New Volume Size", + "read_only": 1 + }, + { + "fieldname": "volumes_section", + "fieldtype": "Section Break", + "label": "Volumes" + }, + { + "fieldname": "column_break_qwbl", + "fieldtype": "Column Break" + }, + { + "fieldname": "old_volume_id", + "fieldtype": "Data", + "label": "Old Volume ID", + "read_only": 1 + }, + { + "fieldname": "old_volume_status", + "fieldtype": "Select", + "label": "Old Volume Status", + "options": "Attached\nDeleted", + "read_only": 1 + }, + { + "fieldname": "old_volume_size", + "fieldtype": "Int", + "label": "Old Volume Size", + "read_only": 1 + }, + { + "fieldname": "new_volume_iops", + "fieldtype": "Int", + "label": "New Volume IOPS", + "read_only": 1 + }, + { + "fieldname": "new_volume_throughput", + "fieldtype": "Int", + "label": "New Volume Throughput", + "read_only": 1 + }, + { + "fieldname": "old_volume_iops", + "fieldtype": "Int", + "label": "Old Volume IOPS", + "read_only": 1 + }, + { + "fieldname": "old_volume_throughput", + "fieldtype": "Int", + "label": "Old Volume Throughput", + "read_only": 1 + }, + { + "fieldname": "column_break_fzja", + "fieldtype": "Column Break" + }, + { + "fieldname": "filesystems_section", + "fieldtype": "Section Break", + "label": "Filesystems" + }, + { + "fieldname": "old_filesystem_uuid", + "fieldtype": "Data", + "label": "Old Filesystem UUID", + "read_only": 1 + }, + { + "fieldname": "column_break_npqx", + "fieldtype": "Column Break" + }, + { + "fieldname": "new_filesystem_uuid", + "fieldtype": "Data", + "label": "New Filesystem UUID", + "read_only": 1 + }, + { + "fieldname": "old_filesystem_size", + "fieldtype": "Int", + "label": "Old Filesystem Size", + "read_only": 1 + }, + { + "fieldname": "old_filesystem_used", + "fieldtype": "Int", + "label": "Old Filesystem Used", + "read_only": 1 + }, + { + "fieldname": "old_filesystem_device", + "fieldtype": "Data", + "label": "Old Filesystem Device", + "read_only": 1 + }, + { + "fieldname": "filesystems", + "fieldtype": "Code", + "label": "Filesystems", + "options": "JSON", + "read_only": 1 + }, + { + "fieldname": "devices", + "fieldtype": "Code", + "label": "Devices", + "options": "JSON", + "read_only": 1 + }, + { + "fieldname": "filesystem_type", + "fieldtype": "Data", + "label": "Filesystem Type", + "read_only": 1 + }, + { + "fieldname": "filesystem_mount_point", + "fieldtype": "Data", + "label": "Filesystem Mount Point", + "read_only": 1 + }, + { + "fieldname": "column_break_ublt", + "fieldtype": "Column Break" + }, + { + "fieldname": "new_filesystem_temporary_mount_point", + "fieldtype": "Data", + "label": "New Filesystem Temporary Mount Point", + "read_only": 1 + }, + { + "fieldname": "service", + "fieldtype": "Data", + "label": "Service", + "read_only": 1 + }, + { + "fieldname": "column_break_puhr", + "fieldtype": "Column Break" + }, + { + "fieldname": "downtime_start", + "fieldtype": "Datetime", + "label": "Downtime Start", + "read_only": 1 + }, + { + "fieldname": "downtime_end", + "fieldtype": "Datetime", + "label": "Downtime End", + "read_only": 1 + }, + { + "fieldname": "downtime_duration", + "fieldtype": "Duration", + "label": "Downtime Duration", + "read_only": 1 + }, + { + "fieldname": "virtual_disk_snapshot", + "fieldtype": "Link", + "label": "Virtual Disk Snapshot", + "options": "Virtual Disk Snapshot", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-02-07 20:14:17.624669", + "modified_by": "Administrator", + "module": "Infrastructure", + "name": "Virtual Disk Resize", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "creation", + "sort_order": "DESC", + "states": [], + "title_field": "virtual_machine", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/infrastructure/pagetype/virtual_disk_resize/virtual_disk_resize.py b/jcloud/infrastructure/pagetype/virtual_disk_resize/virtual_disk_resize.py new file mode 100644 index 0000000..356ead6 --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_disk_resize/virtual_disk_resize.py @@ -0,0 +1,628 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json +import time +from enum import Enum + +import botocore +import jingrow +from jingrow.core.utils import find, find_all +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.ansible_console.ansible_console import AnsibleAdHoc + +SUPPORTED_FILESYSTEMS = ["ext4"] + + +class VirtualDiskResize(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.infrastructure.pagetype.virtual_machine_migration_step.virtual_machine_migration_step import ( + VirtualMachineMigrationStep, + ) + + devices: DF.Code | None + downtime_duration: DF.Duration | None + downtime_end: DF.Datetime | None + downtime_start: DF.Datetime | None + duration: DF.Duration | None + end: DF.Datetime | None + filesystem_mount_point: DF.Data | None + filesystem_type: DF.Data | None + filesystems: DF.Code | None + name: DF.Int | None + new_filesystem_temporary_mount_point: DF.Data | None + new_filesystem_uuid: DF.Data | None + new_volume_id: DF.Data | None + new_volume_iops: DF.Int + new_volume_size: DF.Int + new_volume_status: DF.Literal["Unprovisioned", "Attached"] + new_volume_throughput: DF.Int + old_filesystem_device: DF.Data | None + old_filesystem_size: DF.Int + old_filesystem_used: DF.Int + old_filesystem_uuid: DF.Data | None + old_volume_id: DF.Data | None + old_volume_iops: DF.Int + old_volume_size: DF.Int + old_volume_status: DF.Literal["Attached", "Deleted"] + old_volume_throughput: DF.Int + service: DF.Data | None + start: DF.Datetime | None + status: DF.Literal["Pending", "Running", "Success", "Failure"] + steps: DF.Table[VirtualMachineMigrationStep] + virtual_disk_snapshot: DF.Link | None + virtual_machine: DF.Link + # end: auto-generated types + + def before_insert(self): + self.validate_aws_only() + self.validate_existing_migration() + self.set_old_volume_attributes() + self.add_steps() + + def after_insert(self): + self.set_filesystem_attributes() + self.set_new_volume_attributes() + self.create_new_volume() + self.save() + + def add_steps(self): + for step in self.shrink_steps: + step.update({"status": StepStatus.Pending}) + self.append("steps", step) + + def validate_aws_only(self): + if self.machine.cloud_provider != "AWS EC2": + jingrow.throw("This feature is only available for AWS EC2") + + def validate_existing_migration(self): + if existing := jingrow.get_all( + self.pagetype, + { + "status": ("in", [Status.Pending, Status.Running]), + "virtual_machine": self.virtual_machine, + "name": ("!=", self.name), + }, + pluck="status", + limit=1, + ): + jingrow.throw(f"An existing shrink document is already {existing[0].lower()}.") + + def set_filesystem_attributes(self): + devices = self.fetch_devices() + if len(devices) != 1: + jingrow.throw("Multiple filesystems found on volume. Can't shrink") + + self.old_filesystem_device = f"/dev/{devices[0]['name']}" + + filesystems = self.fetch_filesystems() + + self.verify_mount_point(devices[0], filesystems[0]) + self.set_old_filesystem_attributes(devices[0], filesystems[0]) + + self.devices = json.dumps(devices, indent=2) + self.filesystems = json.dumps(filesystems, indent=2) + + def fetch_devices(self): + device_name = self._get_device_from_volume_id(self.old_volume_id) + command = f"lsblk --json --output name,type,uuid,mountpoint,size,fstype {device_name}" + output = self.ansible_run(command)["output"] + + """Sample outputs of the command + { + "blockdevices": [ + {"name":"nvme1n1", "type":"disk", "uuid":null, "mountpoint":null, "size":"200G", "fstype":null, + "children": [ + {"name":"nvme1n1p1", "type":"part", "uuid":"db7f5fbc-cf4b-45ae-985d-11e4b2222934", "mountpoint":"/opt/volumes/mariadb", "size":"199.9G", "fstype":"ext4"}, + {"name":"nvme1n1p14", "type":"part", "uuid":null, "mountpoint":null, "size":"4M", "fstype":null}, + {"name":"nvme1n1p15", "type":"part", "uuid":"1284-3BC2", "mountpoint":null, "size":"106M", "fstype":"vfat"} + ] + } + ] + } + + { + "blockdevices": [ + {"name":"nvme1n1", "type":"disk", "uuid":"d7ed9d71-e496-4ea7-b141-dffb3b1f4884", "mountpoint":"/opt/volumes/mariadb", "size":"20G", "fstype":"ext4"} + ] + } + """ + return self._parse_devices(json.loads(output)["blockdevices"]) + + def _get_device_from_volume_id(self, volume_id): + stripped_id = volume_id.replace("-", "") + return f"/dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_{stripped_id}" + + def _parse_devices(self, devices): + parsed = [] + for device in devices: + # We only care about disks and partitions + if device["type"] != "disk": + continue + + # Disk has partitions. e.g root volume + if "children" in device: + for partition in device["children"]: + # We only care about data filesystems (ext4) + # Exclude tmpfs, squashfs, devtmpfs, etc + if partition["fstype"] not in SUPPORTED_FILESYSTEMS: + continue + if partition["type"] == "part": + parsed.append(partition) + else: + # Single partition. e.g data volume + # We only care about data filesystems (ext4) + # Exclude tmpfs, squashfs, devtmpfs, etc + if device["fstype"] not in SUPPORTED_FILESYSTEMS: + continue + parsed.append(device) + return parsed + + def fetch_filesystems(self): + command = "df -k --sync --local --print-type" + # Note: ansible run doesn't support --arg=value syntax + output = self.ansible_run(command)["output"] + """Sample output of the command + Filesystem Type 1K-blocks Used Available Use% Mounted on + /dev/root ext4 7950536 3179104 4755048 41% / + devtmpfs devtmpfs 1961436 0 1961436 0% /dev + tmpfs tmpfs 1966204 0 1966204 0% /dev/shm + tmpfs tmpfs 393244 1036 392208 1% /run + tmpfs tmpfs 5120 0 5120 0% /run/lock + tmpfs tmpfs 1966204 0 1966204 0% /sys/fs/cgroup + /dev/loop0 squashfs 23424 23424 0 100% /snap/amazon-ssm-agent/9882 + /dev/loop1 squashfs 60928 60928 0 100% /snap/core20/2437 + /dev/loop3 squashfs 94208 94208 0 100% /snap/lxd/29631 + /dev/loop2 squashfs 70528 70528 0 100% /snap/core22/1720 + /dev/loop4 squashfs 39680 39680 0 100% /snap/snapd/23546 + /dev/nvme0n1p15 vfat 99801 6427 93374 7% /boot/efi + /dev/nvme1n1 ext4 20466256 1602488 17798808 9% /opt/volumes/mariadb + /dev/loop5 squashfs 70528 70528 0 100% /snap/core22/1752 + tmpfs tmpfs 393240 0 393240 0% /run/user/0 + """ + return self._parse_filesystems(output) + + def _parse_filesystems(self, raw_filesystems): + filesystems = [] + for line in raw_filesystems.splitlines()[1:]: # Skip the header + if not line: + continue + filesystem, type, size, used, available, *_, mountpoint = line.split() + # We only care about data filesystems (ext4) + # Exclude tmpfs, squashfs, devtmpfs, etc + if type not in SUPPORTED_FILESYSTEMS: + continue + + if filesystem != self.old_filesystem_device: + continue + + # size and used are number of 1k blocks. We convert them to GB + # AWS sizing API deals with integer GB + filesystems.append( + { + "filesystem": filesystem, + "type": type, + "mount_point": mountpoint, + "size": jingrow.utils.rounded(int(size) / (1024 * 1024), 1), + "used": jingrow.utils.rounded(int(used) / (1024 * 1024), 1), + "available": jingrow.utils.rounded(int(available) / (1024 * 1024), 1), + } + ) + return filesystems + + def verify_mount_point(self, device, filesystem): + if device["mountpoint"] != filesystem["mount_point"]: + jingrow.throw("Device and Filesystem mount point don't match. Can't shrink") + + def set_old_filesystem_attributes(self, device, filesystem): + self.filesystem_mount_point = device["mountpoint"] + self.filesystem_type = device["fstype"] + self.old_filesystem_uuid = device["uuid"] + self.old_filesystem_size = filesystem["size"] + self.old_filesystem_used = filesystem["used"] + + SERVICES = { + "/opt/volumes/benches": "docker", + "/opt/volumes/mariadb": "mariadb", + } + self.service = SERVICES.get(self.filesystem_mount_point) + + def set_old_volume_id(self): + machine = self.machine + root_volume = machine.get_root_volume() + + volumes = find_all(machine.volumes, lambda v: v.volume_id != root_volume.volume_id) + if len(volumes) != 1: + jingrow.throw("Multiple volumes found. Please select the volume to shrink") + + self.old_volume_id = volumes[0].volume_id + + def set_old_volume_attributes(self): + if not self.old_volume_id: + self.set_old_volume_id() + + volume = self.old_volume + self.old_volume_size = volume.size + self.old_volume_iops = volume.iops + self.old_volume_throughput = volume.throughput + + def set_new_volume_attributes(self): + # Set size and performance attributes for new volume + # New volume should be roughly 85% full after copying files + new_size = int(self.old_filesystem_used * 100 / 85) + self.new_filesystem_size = max(new_size, 10) # Minimum 10 GB + self.new_volume_size = self.new_filesystem_size + self.new_volume_iops, self.new_volume_throughput = self.get_optimal_performance_attributes() + + def create_new_volume(self): + # Create new volume + self.new_volume_id = self.machine.attach_new_volume( + self.new_volume_size, iops=self.new_volume_iops, throughput=self.new_volume_throughput + ) + self.new_volume_status = "Attached" + + def get_optimal_performance_attributes(self): + MAX_THROUGHPUT = 1000 # 1000 MB/s + MAX_BLOCK_SIZE = 256 # 256k + BUFFER = 1.2 # 20% buffer iops for overhead + + throughput = MAX_THROUGHPUT + iops = int(BUFFER * throughput * 1024 / MAX_BLOCK_SIZE) + + return iops, throughput + + def increase_old_volume_performance(self) -> StepStatus: + "Increase performance of old volume" + iops, throughput = self.get_optimal_performance_attributes() + volume = self.old_volume + if volume.iops == iops and volume.throughput == throughput: + return StepStatus.Success + try: + self.machine.update_ebs_performance(volume.volume_id, iops, throughput) + except botocore.exceptions.ClientError as e: + if e.response.get("Error", {}).get("Code") == "VolumeModificationRateExceeded": + return StepStatus.Failure + return StepStatus.Success + + def wait_for_increased_performance(self) -> StepStatus: + "Wait for increased performance to take effect" + modification = self.machine.get_volume_modifications(self.old_volume.volume_id) + if modification and modification["ModificationState"] != "completed": + return StepStatus.Pending + return StepStatus.Success + + def format_new_volume(self) -> StepStatus: + "Format new volume" + device = self._get_device_from_volume_id(self.new_volume_id) + output = self.ansible_run(f"mkfs -t {self.filesystem_type} {device}")["output"] + """Sample output of the command + Creating filesystem with 2621440 4k blocks and 655360 inodes + Filesystem UUID: f82d5b68-765a-4a4c-8fda-67c224726afe + Superblock backups stored on blocks: + 32768, 98304, 163840, 229376, 294912, 819200, 884736, 1605632 + + Allocating group tables: done + Writing inode tables: done + Creating journal (16384 blocks): done + Writing superblocks and filesystem accounting information: done + """ + for line in output.splitlines(): + if "UUID" not in line: + continue + self.new_filesystem_uuid = line.split()[-1] + return StepStatus.Success + + def mount_new_volume(self) -> StepStatus: + "Mount new volume" + device = self._get_device_from_volume_id(self.new_volume_id) + self.new_filesystem_temporary_mount_point = "/opt/volumes/resize" + self.ansible_run(f"mkdir {self.new_filesystem_temporary_mount_point}") + self.ansible_run(f"mount {device} {self.new_filesystem_temporary_mount_point}") + return StepStatus.Success + + def stop_service(self) -> StepStatus: + "Stop service" + self.downtime_start = jingrow.utils.now_datetime() + if self.service: + self.ansible_run(f"systemctl stop {self.service}") + # Filebeat keeps the file open and prevents unmounting + self.ansible_run("systemctl stop filebeat") + return StepStatus.Success + + def unmount_bind_mounts(self) -> StepStatus: + "Unmount bind mounts" + output = self.ansible_run( + f"findmnt --json --source {self.old_filesystem_device} --output target,source" + )["output"] + mounts = json.loads(output)["filesystems"] + for mount in mounts: + if "[/" not in mount["source"]: + continue + self.ansible_run(f"umount {mount['target']}") + return StepStatus.Success + + def snapshot_machine(self) -> StepStatus: + "Snapshot machine" + machine = self.machine + machine.create_snapshots() + + snapshots = jingrow.get_all( + "Virtual Disk Snapshot", + {"name": ("in", machine.flags.created_snapshots), "volume_id": self.old_volume_id}, + pluck="name", + ) + if len(snapshots) == 0: + jingrow.throw("Failed to create a snapshot") + + self.virtual_disk_snapshot = snapshots[0] + return StepStatus.Success + + def start_copy(self) -> StepStatus: + "Start copying files" + server = self.machine.get_server() + server.copy_files( + source=self.filesystem_mount_point, + destination=self.new_filesystem_temporary_mount_point, + ) + return StepStatus.Success + + def wait_for_copy(self) -> StepStatus: + "Wait for files to be copied" + plays = jingrow.get_all( + "Ansible Play", + { + "server": self.machine.get_server().name, + "play": "Copy Files", + "creation": (">", self.creation), + }, + ["status"], + order_by="creation desc", + limit=1, + ) + if not plays: + return StepStatus.Running + + play_status = plays[0].status + if play_status == "Success": + return StepStatus.Success + if play_status in ("Failure", "Unreachable"): + return StepStatus.Failure + + return StepStatus.Running + + def unmount_old_volume(self) -> StepStatus: + "Unmount old volume" + self.ansible_run(f"umount {self.filesystem_mount_point}") + return StepStatus.Success + + def unmount_new_volume(self) -> StepStatus: + "Unmount new volume" + self.ansible_run(f"umount {self.new_filesystem_temporary_mount_point}") + return StepStatus.Success + + def update_mount(self) -> StepStatus: + "Mount new volume on old mount point" + # Mount the new volume using the new UUID + # Update fstab + # 1. Find mount matching the old UUID in fstab + # 2. Update UUID for this mountpoint + # Reference: https://stackoverflow.com/questions/16637799/sed-error-invalid-reference-1-on-s-commands-rhs#comment88576787_16637847 + self.ansible_run( + f"sed -Ei 's/^UUID\\={self.old_filesystem_uuid}\\s(.*$)/UUID\\={self.new_filesystem_uuid} \\1/g' /etc/fstab" + ) + self.ansible_run("systemctl daemon-reload") + return StepStatus.Success + + def start_service(self) -> StepStatus: + "Start service" + if self.service: + self.ansible_run(f"systemctl start {self.service}") + + # We had stopped filebeat, start it again + self.ansible_run("systemctl start filebeat") + self.downtime_end = jingrow.utils.now_datetime() + self.downtime_duration = (self.downtime_end - self.downtime_start).total_seconds() + return StepStatus.Success + + def reduce_performance_of_new_volume(self) -> StepStatus: + "Reduce performance of new volume" + self.machine.update_ebs_performance( + self.new_volume_id, self.old_volume_iops, self.old_volume_throughput + ) + return StepStatus.Success + + def delete_old_volume(self) -> StepStatus: + "Delete old volume" + self.machine.delete_volume(self.old_volume_id) + self.old_volume_status = "Deleted" + return StepStatus.Success + + @property + def machine(self): + return jingrow.get_pg("Virtual Machine", self.virtual_machine) + + @property + def old_volume(self): + return find(self.machine.volumes, lambda v: v.volume_id == self.old_volume_id) + + @property + def shrink_steps(self): + Wait, NoWait = True, False + methods = [ + (self.increase_old_volume_performance, NoWait), + (self.wait_for_increased_performance, Wait), + (self.format_new_volume, NoWait), + (self.mount_new_volume, NoWait), + (self.stop_service, NoWait), + (self.unmount_bind_mounts, NoWait), + (self.snapshot_machine, NoWait), + (self.start_copy, NoWait), + (self.wait_for_copy, Wait), + (self.unmount_old_volume, NoWait), + (self.unmount_new_volume, NoWait), + (self.update_mount, NoWait), + (self.start_service, NoWait), + (self.reduce_performance_of_new_volume, NoWait), + (self.delete_old_volume, NoWait), + ] + + steps = [] + for method, wait_for_completion in methods: + steps.append( + { + "step": method.__pg__, + "method": method.__name__, + "wait_for_completion": wait_for_completion, + } + ) + return steps + + @jingrow.whitelist() + def execute(self): + self.status = Status.Running + self.start = jingrow.utils.now_datetime() + self.save() + self.next() + + def fail(self) -> None: + self.status = Status.Failure + for step in self.steps: + if step.status in (StepStatus.Pending, StepStatus.Running): + step.status = StepStatus.Failure + self.end = jingrow.utils.now_datetime() + self.duration = (self.end - self.start).total_seconds() + self.save() + + def succeed(self) -> None: + self.status = Status.Success + self.end = jingrow.utils.now_datetime() + self.duration = (self.end - self.start).total_seconds() + self.save() + + @jingrow.whitelist() + def next(self, ignore_version=False) -> None: + self.status = Status.Running + self.save(ignore_version=ignore_version) + next_step = self.next_step + + if not next_step: + # We've executed everything + self.succeed() + return + + jingrow.enqueue_pg( + self.pagetype, + self.name, + "execute_step", + step_name=next_step.name, + enqueue_after_commit=True, + at_front=True, + ) + + @jingrow.whitelist() + def force_continue(self) -> None: + # Mark all failed and skipped steps as pending + for step in self.steps: + if step.status in (StepStatus.Failure, StepStatus.Skipped): + step.status = StepStatus.Pending + self.next() + + @jingrow.whitelist() + def force_fail(self) -> None: + # Mark all pending steps as failure + for step in self.steps: + if step.status in (StepStatus.Pending, StepStatus.Running): + step.status = StepStatus.Failure + self.status = Status.Failure + self.save() + + @property + def next_step(self) -> VirtualMachineMigrationStep | None: + for step in self.steps: + if step.status in (StepStatus.Pending, StepStatus.Running): + return step + return None + + @jingrow.whitelist() + def execute_step(self, step_name): + step = self.get_step(step_name) + + if not step.start: + step.start = jingrow.utils.now_datetime() + + step.status = StepStatus.Running + + self.save() + jingrow.db.commit() + + ignore_version_while_saving = False + try: + step.status = getattr(self, step.method)() + if step.wait_for_completion: + step.attempts = step.attempts + 1 + if step.status == StepStatus.Pending: + # Wait some time before the next run + ignore_version_while_saving = True + time.sleep(1) + except Exception: + step.status = StepStatus.Failure + step.traceback = jingrow.get_traceback(with_context=True) + + step.end = jingrow.utils.now_datetime() + step.duration = (step.end - step.start).total_seconds() + + ignore_version_while_saving = True + if step.status == StepStatus.Failure: + self.fail() + else: + self.next(ignore_version_while_saving) + + def get_step(self, step_name) -> VirtualMachineMigrationStep | None: + for step in self.steps: + if step.name == step_name: + return step + return None + + def ansible_run(self, command): + virtual_machine_ip = jingrow.db.get_value("Virtual Machine", self.virtual_machine, "public_ip_address") + inventory = f"{virtual_machine_ip}," + result = AnsibleAdHoc(sources=inventory).run(command, self.name)[0] + self.add_command(command, result) + return result + + def add_command(self, command, result): + pretty_result = json.dumps(result, indent=2, sort_keys=True, default=str) + comment = f"
{command}
{pretty_result}
" + self.add_comment(text=comment) + + +# TODO: Change (str, enum.Enum) to enum.StrEnum when migrating to Python 3.11 +class StepStatus(str, Enum): + Pending = "Pending" + Running = "Running" + Success = "Success" + Failure = "Failure" + Skipped = "Skipped" + + def __str__(self): + return self.value + + +class Status(str, Enum): + Pending = "Pending" + Running = "Running" + Success = "Success" + Failure = "Failure" + + def __str__(self): + return self.value diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration/README.md b/jcloud/infrastructure/pagetype/virtual_machine_migration/README.md new file mode 100644 index 0000000..3b3a079 --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration/README.md @@ -0,0 +1,96 @@ +# Explaining Choices + +Most commits/comments already explain the decisions. Just putting them here for sanity. + +## Mounts + +Going forward the data (mostly machine-independent directories) will be kept on a separate volume. + +For the migration we + +1. Shut down the machine +2. Start a new machine (with a new ARM image) +3. Attach the root volume from the old machine to the new machine +4. Do some mount magic so all services find data where they expect it to be + +### AWS Quirks + +1. We can't attach a volume at boot. The VM must be in the Running state. +2. You can't rely on device_name provided during run_instance. +3. The device will have an alias that looks something like "/dev/disk/by-id/......" + +### Bind Mounts + +Instead of directly mounting the volume to the target mount point, we + +1. Mount the volume to /opt/volumes// +2. Bind mount the relative location from this path. /opt/volumes/mariadb/a/b/c to /a/b/c + +This gives us the ability to + +1. Have two different mounts (/etc/mysql and /var/lib/mysql) +2. Use the same old volumes as-is without any custom mounting scheme. + +### Mount Dependency + +We don't want MariaDB / Docker to start unless the data volume is mounted correctly. +Add a systemd mount dependency (BindsTo) so the services start if and only if the data volume is mounted. + +Note: We define the dependency only on the bind mount. /opt/volumes... is left out as convenience. + +### Relabeling + +The base images are configured to mount partitions labeled UEFI and cloudimg-rootfs + +1. We change these labels so the new machine doesn't accidentally boot from these +2. We update fstab so the old machine can still boot with the modified labels + +Note: EFI partitions have a dirty bit set on them. fatlabel messes this up. We need to run fsck to fix this. + +### UUID + +When we spawn a new machine from the base image, all volumes get their own volume-id. If we rely on volume-id to determine the data volume then we'll have to do some extra work after the first boot. (To tell the machine about the volume) + +When we format the data volume we get a new UUID. This UUID remains the same (since it's part of the data itself) across boots (unless we reformat the volume). This is the easiest way to recognize a volume in fstab. + +During the migration, we need to do the extra step of updating fstab to use the old UUID (from the old root volume). + +We could have modified the UUID of the old root volume (so we don't need to do any work after the migration). But + +1. e2label needs a freshly checked disk (fsck) +2. fsck needs an unmounted partition. We can't unmount the root partition. + +## Misc + +### Hardcoded values + +This is only going to be used for app and db servers. + +- App servers will have /home/jingrow/benches stored on the the data volume +- DB servers will have /var/lib/mysql and /etc/mysql stored on the data volume + +### Wait for ping + cloud init + +During the first boot we + +1. Delete old host keys (to avoid collisions between multiple hosts) +2. Update SSH config +3. Restart SSHD + +During this restart, for a short period, we can't start a new SSH session. (Sometimes we get lucky). +To avoid this. Explicitly wait for cloud-init to finish (and then check if sshd is running). + +--- + +## TODO + +#### Disk Usage Alerts + +We'll need to add alerts for the modified mount points (old alerts rely on /) + +#### Disk Resize + +Resize logic resizes the first volume listed in the volumes table. + +1. This ordering isn't guaranteed to be [root, data] +2. We need a way to specify exactly which volume we need to resize diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration/__init__.py b/jcloud/infrastructure/pagetype/virtual_machine_migration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration/test_virtual_machine_migration.py b/jcloud/infrastructure/pagetype/virtual_machine_migration/test_virtual_machine_migration.py new file mode 100644 index 0000000..756ff46 --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration/test_virtual_machine_migration.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestVirtualMachineMigration(JingrowTestCase): + pass diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration/virtual_machine_migration.js b/jcloud/infrastructure/pagetype/virtual_machine_migration/virtual_machine_migration.js new file mode 100644 index 0000000..d5ceedf --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration/virtual_machine_migration.js @@ -0,0 +1,26 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Virtual Machine Migration', { + refresh(frm) { + [ + [__('Start'), 'execute', frm.pg.status === 'Pending'], + [__('Force Continue'), 'force_continue', frm.pg.status === 'Failure'], + [__('Force Continue'), 'force_continue', frm.pg.status === 'Failure'], + [__('Force Fail'), 'force_fail', frm.pg.status === 'Running'], + ].forEach(([label, method, condition]) => { + if (condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => frm.call(method).then(() => frm.refresh()), + ); + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration/virtual_machine_migration.json b/jcloud/infrastructure/pagetype/virtual_machine_migration/virtual_machine_migration.json new file mode 100644 index 0000000..cbca63a --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration/virtual_machine_migration.json @@ -0,0 +1,188 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2024-09-19 15:30:52.341880", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "virtual_machine", + "status", + "new_plan", + "column_break_pega", + "virtual_machine_image", + "machine_type", + "copied_virtual_machine", + "column_break_tjrh", + "start", + "end", + "duration", + "section_break_pplo", + "volumes", + "mounts", + "raw_devices", + "parsed_devices", + "bind_mounts", + "section_break_mjhg", + "steps" + ], + "fields": [ + { + "fieldname": "virtual_machine", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Virtual Machine", + "link_filters": "[[\"Virtual Machine\",\"status\",\"not in\",[\"Draft\",\"Terminated\",null]]]", + "options": "Virtual Machine", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "copied_virtual_machine", + "fieldtype": "Link", + "label": "Copied Virtual Machine", + "options": "Virtual Machine", + "read_only": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_pega", + "fieldtype": "Column Break" + }, + { + "fieldname": "steps", + "fieldtype": "Table", + "label": "Steps", + "options": "Virtual Machine Migration Step" + }, + { + "fieldname": "section_break_pplo", + "fieldtype": "Section Break" + }, + { + "fieldname": "section_break_mjhg", + "fieldtype": "Section Break", + "read_only": 1 + }, + { + "fieldname": "volumes", + "fieldtype": "Table", + "label": "Volumes", + "options": "Virtual Machine Migration Volume", + "read_only": 1 + }, + { + "fieldname": "virtual_machine_image", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Virtual Machine Image", + "link_filters": "[[\"Virtual Machine Image\",\"status\",\"=\",\"Available\"]]", + "options": "Virtual Machine Image", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "machine_type", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Machine Type", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_tjrh", + "fieldtype": "Column Break" + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Duration", + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "raw_devices", + "fieldtype": "Code", + "hidden": 1, + "label": "Raw Devices", + "options": "JSON", + "read_only": 1 + }, + { + "fieldname": "parsed_devices", + "fieldtype": "Code", + "hidden": 1, + "label": "Parsed Devices", + "options": "JSON", + "read_only": 1 + }, + { + "fieldname": "mounts", + "fieldtype": "Table", + "label": "Mounts", + "options": "Virtual Machine Migration Mount" + }, + { + "fieldname": "new_plan", + "fieldtype": "Link", + "label": "New Plan", + "options": "Server Plan", + "read_only": 1 + }, + { + "fieldname": "bind_mounts", + "fieldtype": "Table", + "label": "Bind Mounts", + "options": "Virtual Machine Migration Bind Mount" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-12-10 10:51:08.790005", + "modified_by": "Administrator", + "module": "Infrastructure", + "name": "Virtual Machine Migration", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "creation", + "sort_order": "DESC", + "states": [], + "title_field": "virtual_machine", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration/virtual_machine_migration.py b/jcloud/infrastructure/pagetype/virtual_machine_migration/virtual_machine_migration.py new file mode 100644 index 0000000..3d77c8b --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration/virtual_machine_migration.py @@ -0,0 +1,629 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import json +import shlex +import subprocess +import time +from enum import Enum +from typing import TYPE_CHECKING + +import jingrow +from jingrow.core.utils import find +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.ansible_console.ansible_console import AnsibleAdHoc + +if TYPE_CHECKING: + from jcloud.infrastructure.pagetype.virtual_machine_migration_step.virtual_machine_migration_step import ( + VirtualMachineMigrationStep, + ) + + +StepStatus = Enum("StepStatus", ["Pending", "Running", "Success", "Failure"]) + + +class VirtualMachineMigration(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.infrastructure.pagetype.virtual_machine_migration_bind_mount.virtual_machine_migration_bind_mount import ( + VirtualMachineMigrationBindMount, + ) + from jcloud.infrastructure.pagetype.virtual_machine_migration_mount.virtual_machine_migration_mount import ( + VirtualMachineMigrationMount, + ) + from jcloud.infrastructure.pagetype.virtual_machine_migration_step.virtual_machine_migration_step import ( + VirtualMachineMigrationStep, + ) + from jcloud.infrastructure.pagetype.virtual_machine_migration_volume.virtual_machine_migration_volume import ( + VirtualMachineMigrationVolume, + ) + + bind_mounts: DF.Table[VirtualMachineMigrationBindMount] + copied_virtual_machine: DF.Link | None + duration: DF.Duration | None + end: DF.Datetime | None + machine_type: DF.Data + mounts: DF.Table[VirtualMachineMigrationMount] + name: DF.Int | None + new_plan: DF.Link | None + parsed_devices: DF.Code | None + raw_devices: DF.Code | None + start: DF.Datetime | None + status: DF.Literal["Pending", "Running", "Success", "Failure"] + steps: DF.Table[VirtualMachineMigrationStep] + virtual_machine: DF.Link + virtual_machine_image: DF.Link + volumes: DF.Table[VirtualMachineMigrationVolume] + # end: auto-generated types + + def before_insert(self): + self.validate_aws_only() + self.validate_existing_migration() + self.add_steps() + self.add_volumes() + self.create_machine_copy() + self.set_new_plan() + + def after_insert(self): + self.add_devices() + self.set_default_mounts() + self.set_default_bind_mounts() + + def add_devices(self): + command = "lsblk --json --output name,type,uuid,mountpoint,size,label,fstype" + output = self.ansible_run(command)["output"] + + """Sample output of the command + { + "blockdevices": [ + {"name":"loop0", "type":"loop", "uuid":null, "mountpoint":"/snap/amazon-ssm-agent/9882", "size":"22.9M", "label":null, "fstype":null}, + {"name":"loop1", "type":"loop", "uuid":null, "mountpoint":"/snap/core20/2437", "size":"59.5M", "label":null, "fstype":null}, + {"name":"loop2", "type":"loop", "uuid":null, "mountpoint":"/snap/core22/1666", "size":"68.9M", "label":null, "fstype":null}, + {"name":"loop3", "type":"loop", "uuid":null, "mountpoint":"/snap/snapd/21761", "size":"33.7M", "label":null, "fstype":null}, + {"name":"loop4", "type":"loop", "uuid":null, "mountpoint":"/snap/lxd/29631", "size":"92M", "label":null, "fstype":null}, + {"name":"nvme0n1", "type":"disk", "uuid":null, "mountpoint":null, "size":"25G", "label":null, "fstype":null, + "children": [ + {"name":"nvme0n1p1", "type":"part", "uuid":"b8932e17-9ed7-47b7-8bf3-75ff6669e018", "mountpoint":"/", "size":"24.9G", "label":"cloudimg-rootfs", "fstype":"ext4"}, + {"name":"nvme0n1p15", "type":"part", "uuid":"7569-BCF0", "mountpoint":"/boot/efi", "size":"99M", "label":"UEFI", "fstype":"vfat"} + ] + }, + {"name":"nvme1n1", "type":"disk", "uuid":"41527fb0-f6e9-404e-9dba-0451dfa2195e", "mountpoint":"/opt/volumes/mariadb", "size":"10G", "label":null, "fstype":"ext4"} + ] + }""" + devices = json.loads(output)["blockdevices"] + self.raw_devices = json.dumps(devices, indent=2) + self.parsed_devices = json.dumps(self._parse_devices(devices), indent=2) + self.save() + + def _parse_devices(self, devices): + parsed = [] + for device in devices: + # We only care about disks and partitions + if device["type"] != "disk": + continue + + # Disk has partitions. e.g root volume + if "children" in device: + for partition in device["children"]: + if partition["type"] == "part": + parsed.append(partition) + else: + # Single partition. e.g data volume + parsed.append(device) + return parsed + + def set_default_mounts(self): + # Set root partition from old machine as the data partition in the new machine + + if self.mounts: + # We've already set the mounts + return + + parsed_devices = json.loads(self.parsed_devices) + device = find(parsed_devices, lambda x: x["mountpoint"] == "/") + if not device: + # No root volume found + return + + server_type = self.machine.get_server().pagetype + if server_type == "Server": + target_mount_point = "/opt/volumes/benches" + service = "docker" + elif server_type == "Database Server": + target_mount_point = "/opt/volumes/mariadb" + service = "mariadb" + else: + # Data volumes are only supported for Server and Database Server + return + + self.append( + "mounts", + { + "uuid": device["uuid"], + "source_mount_point": device["mountpoint"], + "target_mount_point": target_mount_point, + "service": service, + }, + ) + self.save() + + def set_default_bind_mounts(self): + # These are the same as Server.set_default_mount_points + if self.bind_mounts: + return + + server_type = self.machine.get_server().pagetype + if server_type == "Server": + self.append( + "bind_mounts", + { + "source_mount_point": "/opt/volumes/benches/home/jingrow/benches", + "service": "docker", + "mount_point_owner": "jingrow", + "mount_point_group": "jingrow", + }, + ) + elif server_type == "Database Server": + self.append( + "bind_mounts", + { + "source_mount_point": "/opt/volumes/mariadb/var/lib/mysql", + "service": "mariadb", + "mount_point_owner": "mysql", + "mount_point_group": "mysql", + }, + ) + # Don't worry about /etc/mysql + # It is going to be owned by root, uid=0 and gid=0 everywhere + else: + return + + self.save() + + def add_steps(self): + for step in self.migration_steps: + step.update({"status": "Pending"}) + self.append("steps", step) + + def add_volumes(self): + # Prepare volumes to attach to new machine + for index, volume in enumerate(self.machine.volumes): + device_name_index = chr(ord("f") + index) + self.append( + "volumes", + { + "status": "Unattached", + "volume_id": volume.volume_id, + # This is the device name that will be used in the new machine + # Only needed for the attach_volumes call + "device_name": f"/dev/sd{device_name_index}", + }, + ) + + def create_machine_copy(self): + # Create a copy of the current machine + # So we don't lose the instance ids + self.copied_virtual_machine = f"{self.virtual_machine}-copy" + + if jingrow.db.exists("Virtual Machine", self.copied_virtual_machine): + jingrow.delete_pg("Virtual Machine", self.copied_virtual_machine) + + copied_machine = jingrow.copy_pg(self.machine) + copied_machine.insert(set_name=self.copied_virtual_machine) + + def set_new_plan(self): + server = self.machine.get_server() + old_plan = jingrow.get_pg("Server Plan", server.plan) + matching_plans = jingrow.get_all( + "Server Plan", + { + "enabled": True, + "server_type": old_plan.server_type, + "cluster": old_plan.cluster, + "instance_type": self.machine_type, + "premium": old_plan.premium, + }, + pluck="name", + limit=1, + ) + if matching_plans: + self.new_plan = matching_plans[0] + + def validate_aws_only(self): + if self.machine.cloud_provider != "AWS EC2": + jingrow.throw("This feature is only available for AWS EC2") + + def validate_existing_migration(self): + if existing := jingrow.get_all( + self.pagetype, + { + "status": ("in", ["Pending", "Running"]), + "virtual_machine": self.virtual_machine, + "name": ("!=", self.name), + }, + pluck="status", + limit=1, + ): + jingrow.throw(f"An existing migration is already {existing[0].lower()}.") + + @property + def machine(self): + return jingrow.get_pg("Virtual Machine", self.virtual_machine) + + @property + def copied_machine(self): + return jingrow.get_pg("Virtual Machine", self.copied_virtual_machine) + + @property + def migration_steps(self): + Wait = True + NoWait = False + methods = [ + (self.update_partition_labels, NoWait), + (self.stop_machine, Wait), + (self.wait_for_machine_to_stop, Wait), + (self.disable_delete_on_termination_for_all_volumes, NoWait), + (self.terminate_previous_machine, Wait), + (self.wait_for_previous_machine_to_terminate, Wait), + (self.reset_virtual_machine_attributes, NoWait), + (self.provision_new_machine, NoWait), + (self.wait_for_machine_to_start, Wait), + (self.attach_volumes, NoWait), + (self.wait_for_machine_to_be_accessible, Wait), + (self.remove_old_host_key, NoWait), + (self.update_mounts, NoWait), + (self.update_bind_mount_permissions, NoWait), + (self.update_plan, NoWait), + (self.update_tls_certificate, NoWait), + ] + + steps = [] + for method, wait_for_completion in methods: + steps.append( + { + "step": method.__pg__, + "method": method.__name__, + "wait_for_completion": wait_for_completion, + } + ) + return steps + + def update_partition_labels(self) -> StepStatus: + "Update partition labels" + # Ubuntu images have labels for root (cloudimg-rootfs) and efi (UEFI) partitions + # Remove these labels from the old volume + # So the new machine doesn't mount these as root or efi partitions + # Important: Update fstab so we can still boot the old machine + parsed_devices = json.loads(self.parsed_devices) + for device in parsed_devices: + old_label = device["label"] + if not old_label: + continue + + labeler = {"ext4": "e2label", "vfat": "fatlabel"}[device["fstype"]] + new_label = {"cloudimg-rootfs": "old-rootfs", "UEFI": "OLD-UEFI"}[old_label] + commands = [ + # Reference: https://wiki.archlinux.org/title/Persistent_block_device_naming#by-label + f"{labeler} /dev/{device['name']} {new_label}", + f"sed -i 's/LABEL\\={old_label}/LABEL\\={new_label}/g' /etc/fstab", # Ansible implementation quirk + ] + if old_label == "UEFI": + # efi mounts have dirty bit set. This resets it. + commands.append(f"fsck -a /dev/{device['name']}") + + for command in commands: + result = self.ansible_run(command) + if result["status"] != "Success": + self.add_comment(text=f"Error updating partition labels: {result}") + return StepStatus.Failure + return StepStatus.Success + + def stop_machine(self) -> StepStatus: + "Stop machine" + machine = self.machine + machine.sync() + if machine.status == "Stopped": + return StepStatus.Success + if machine.status == "Pending": + return StepStatus.Pending + machine.stop() + return StepStatus.Success + + def wait_for_machine_to_stop(self) -> StepStatus: + "Wait for machine to stop" + # We need to make sure the machine is stopped before we proceed + machine = self.machine + machine.sync() + if machine.status == "Stopped": + return StepStatus.Success + return StepStatus.Pending + + def disable_delete_on_termination_for_all_volumes(self) -> StepStatus: + "Disable Delete-on-Termination for all volumes" + # After this we can safely terminate the instance without losing any data + copied_machine = self.copied_machine + if copied_machine.volumes: + copied_machine.disable_delete_on_termination_for_all_volumes() + return StepStatus.Success + + def terminate_previous_machine(self) -> StepStatus: + "Terminate previous machine" + copied_machine = self.copied_machine + if copied_machine.status == "Terminated": + return StepStatus.Success + if copied_machine.status == "Pending": + copied_machine.sync() + return StepStatus.Pending + + copied_machine.disable_termination_protection() + copied_machine.reload() + copied_machine.terminate() + return StepStatus.Success + + def wait_for_previous_machine_to_terminate(self) -> StepStatus: + "Wait for previous machine to terminate" + # Private ip address is released when the machine is terminated + copied_machine = self.copied_machine + copied_machine.sync() + if copied_machine.status == "Terminated": + return StepStatus.Success + return StepStatus.Pending + + def reset_virtual_machine_attributes(self) -> StepStatus: + "Reset virtual machine attributes" + machine = self.machine + machine.instance_id = None + machine.public_ip_address = None + machine.volumes = [] + + # Set new machine image and machine type + machine.virtual_machine_image = self.virtual_machine_image + machine.machine_image = None + machine.machine_type = self.machine_type + machine.root_disk_size = 10 # Default root disk size for new machines + machine.has_data_volume = True # VM Migration always adds a data volume + machine.save() + return StepStatus.Success + + def provision_new_machine(self) -> StepStatus: + "Provision new machine" + # Create new machine in place. So we retain Name, IP etc. + self.machine._provision_aws() + return StepStatus.Success + + def wait_for_machine_to_start(self) -> StepStatus: + "Wait for new machine to start" + # We can't attach volumes to a machine that is not running + machine = self.machine + machine.sync() + if machine.status == "Running": + return StepStatus.Success + return StepStatus.Pending + + def attach_volumes(self) -> StepStatus: + "Attach volumes" + machine = self.machine + for volume in self.volumes: + try: + machine.client().attach_volume( + InstanceId=machine.instance_id, + Device=volume.device_name, + VolumeId=volume.volume_id, + ) + volume.status = "Attached" + except Exception as e: + self.add_comment(text=f"Error attaching volume {volume.volume_id}: {e}") + machine.sync() + return StepStatus.Success + + def wait_for_machine_to_be_accessible(self): + "Wait for machine to be accessible" + server = self.machine.get_server() + server.ping_ansible() + + plays = jingrow.get_all( + "Ansible Play", + {"server": server.name, "play": "Ping Server", "creation": (">", self.creation)}, + ["status"], + order_by="creation desc", + limit=1, + ) + if plays and plays[0].status == "Success": + return StepStatus.Success + return StepStatus.Pending + + def remove_old_host_key(self) -> StepStatus: + "Remove old host key" + command = f"ssh-keygen -R '{self.virtual_machine}'" + subprocess.check_call(shlex.split(command)) + return StepStatus.Success + + def update_mounts(self) -> StepStatus: + "Update mounts" + # Mount the volume using the old UUID + # Update fstab + # 1. Find mount matching the source mount point in fstab + # 2. Update UUID for this mountpoint + AllowFailure, DontAllowFailure = True, False + for mount in self.mounts: + escaped_mount_point = mount.target_mount_point.replace("/", "\\/") + # Reference: https://stackoverflow.com/questions/16637799/sed-error-invalid-reference-1-on-s-commands-rhs#comment88576787_16637847 + commands = [ + ( + f"sed -Ei 's/^UUID\\=.*\\s({escaped_mount_point}\\s.*$)/UUID\\={mount.uuid} \\1/g' /etc/fstab", + DontAllowFailure, + ), + ("systemctl daemon-reload", DontAllowFailure), + ] + if mount.service: + commands.append((f"systemctl start {mount.service}", AllowFailure)) + for command, allow_failure in commands: + result = self.ansible_run(command) + if allow_failure == DontAllowFailure and result["status"] != "Success": + self.add_comment(text=f"Error updating mounts: {result}") + return StepStatus.Failure + + return StepStatus.Success + + def update_bind_mount_permissions(self) -> StepStatus: + "Update bind mount permissions" + # linux uid / gid might not be the same in the new machine + for mount in self.bind_mounts: + commands = [ + f"chown -R {mount.mount_point_owner}:{mount.mount_point_group} {mount.source_mount_point}", + # The dependent service might have failed. Start it + f"systemctl start {mount.service}", + ] + for command in commands: + result = self.ansible_run(command) + if result["status"] != "Success": + self.add_comment(text=f"Error updating bind mount permissions: {result}") + return StepStatus.Failure + + return StepStatus.Success + + def update_plan(self) -> StepStatus: + "Update plan" + if self.new_plan: + server = self.machine.get_server() + plan = jingrow.get_pg("Server Plan", self.new_plan) + server._change_plan(plan) + return StepStatus.Success + + def update_tls_certificate(self) -> StepStatus: + "Update TLS certificate" + server = self.machine.get_server() + server.update_tls_certificate() + + plays = jingrow.get_all( + "Ansible Play", + {"server": server.name, "play": "Setup TLS Certificates", "creation": (">", self.creation)}, + ["status"], + order_by="creation desc", + limit=1, + ) + if not plays: + return StepStatus.Failure + if plays[0].status == "Success": + return StepStatus.Success + return StepStatus.Failure + + @jingrow.whitelist() + def execute(self): + self.status = "Running" + self.start = jingrow.utils.now_datetime() + self.save() + self.next() + + def fail(self) -> None: + self.status = "Failure" + for step in self.steps: + if step.status == "Pending": + step.status = "Skipped" + self.end = jingrow.utils.now_datetime() + self.duration = (self.end - self.start).total_seconds() + self.save() + + def succeed(self) -> None: + self.status = "Success" + self.end = jingrow.utils.now_datetime() + self.duration = (self.end - self.start).total_seconds() + self.save() + + @jingrow.whitelist() + def next(self, ignore_version=False) -> None: + self.status = "Running" + self.save(ignore_version=ignore_version) + next_step = self.next_step + + if not next_step: + # We've executed everything + self.succeed() + return + + jingrow.enqueue_pg( + self.pagetype, + self.name, + "execute_step", + step_name=next_step.name, + enqueue_after_commit=True, + at_front=True, + ) + + @jingrow.whitelist() + def force_continue(self) -> None: + # Mark all failed and skipped steps as pending + for step in self.steps: + if step.status in ("Failure", "Skipped"): + step.status = "Pending" + self.next() + + @jingrow.whitelist() + def force_fail(self) -> None: + # Mark all pending steps as failure + for step in self.steps: + if step.status == "Pending": + step.status = "Failure" + self.status = "Failure" + + @property + def next_step(self) -> VirtualMachineMigrationStep | None: + for step in self.steps: + if step.status == "Pending": + return step + return None + + @jingrow.whitelist() + def execute_step(self, step_name): + step = self.get_step(step_name) + + if not step.start: + step.start = jingrow.utils.now_datetime() + step.status = "Running" + ignore_version_while_saving = False + try: + result = getattr(self, step.method)() + step.status = result.name + if step.wait_for_completion: + step.attempts = step.attempts + 1 + if result == StepStatus.Pending: + # Wait some time before the next run + ignore_version_while_saving = True + time.sleep(1) + except Exception: + step.status = "Failure" + step.traceback = jingrow.get_traceback(with_context=True) + + step.end = jingrow.utils.now_datetime() + step.duration = (step.end - step.start).total_seconds() + + if step.status == "Failure": + self.fail() + else: + self.next(ignore_version_while_saving) + + def get_step(self, step_name) -> VirtualMachineMigrationStep | None: + for step in self.steps: + if step.name == step_name: + return step + return None + + def ansible_run(self, command): + virtual_machine_ip = jingrow.db.get_value("Virtual Machine", self.virtual_machine, "public_ip_address") + inventory = f"{virtual_machine_ip}," + result = AnsibleAdHoc(sources=inventory).run(command, self.name)[0] + self.add_command(command, result) + return result + + def add_command(self, command, result): + pretty_result = json.dumps(result, indent=2, sort_keys=True, default=str) + comment = f"
{command}
{pretty_result}
" + self.add_comment(text=comment) diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_bind_mount/__init__.py b/jcloud/infrastructure/pagetype/virtual_machine_migration_bind_mount/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_bind_mount/virtual_machine_migration_bind_mount.json b/jcloud/infrastructure/pagetype/virtual_machine_migration_bind_mount/virtual_machine_migration_bind_mount.json new file mode 100644 index 0000000..959f182 --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration_bind_mount/virtual_machine_migration_bind_mount.json @@ -0,0 +1,75 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2024-12-10 10:47:58.739108", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "column_break_ygbk", + "source_mount_point", + "service", + "column_break_llpc", + "mount_point_owner", + "mount_point_group" + ], + "fields": [ + { + "fieldname": "column_break_ygbk", + "fieldtype": "Column Break" + }, + { + "columns": 2, + "default": "root", + "fieldname": "mount_point_owner", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Mount Point Owner", + "reqd": 1 + }, + { + "columns": 2, + "default": "root", + "fieldname": "mount_point_group", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Mount Point Group", + "reqd": 1 + }, + { + "columns": 1, + "fieldname": "service", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Service", + "reqd": 1 + }, + { + "fieldname": "column_break_llpc", + "fieldtype": "Column Break" + }, + { + "columns": 5, + "fieldname": "source_mount_point", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Source Mount Point", + "mandatory_depends_on": "eval: pg.mount_type === \"Bind\"", + "read_only_depends_on": "eval: pg.mount_type === \"Volume\"", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-12-10 10:54:24.835566", + "modified_by": "Administrator", + "module": "Infrastructure", + "name": "Virtual Machine Migration Bind Mount", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_bind_mount/virtual_machine_migration_bind_mount.py b/jcloud/infrastructure/pagetype/virtual_machine_migration_bind_mount/virtual_machine_migration_bind_mount.py new file mode 100644 index 0000000..bb70f58 --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration_bind_mount/virtual_machine_migration_bind_mount.py @@ -0,0 +1,29 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from __future__ import annotations + +from jingrow.model.document import Document + + +class VirtualMachineMigrationBindMount(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + mount_point_group: DF.Data + mount_point_owner: DF.Data + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + service: DF.Data + source_mount_point: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_mount/__init__.py b/jcloud/infrastructure/pagetype/virtual_machine_migration_mount/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_mount/virtual_machine_migration_mount.json b/jcloud/infrastructure/pagetype/virtual_machine_migration_mount/virtual_machine_migration_mount.json new file mode 100644 index 0000000..607bc1e --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration_mount/virtual_machine_migration_mount.json @@ -0,0 +1,56 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-11-26 15:14:54.328130", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "source_mount_point", + "target_mount_point", + "uuid", + "service" + ], + "fields": [ + { + "columns": 2, + "fieldname": "source_mount_point", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Source Mount Point" + }, + { + "columns": 3, + "fieldname": "target_mount_point", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Target Mount Point" + }, + { + "columns": 4, + "fieldname": "uuid", + "fieldtype": "Data", + "in_list_view": 1, + "label": "UUID" + }, + { + "columns": 1, + "fieldname": "service", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Service" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-12-09 16:32:43.323509", + "modified_by": "Administrator", + "module": "Infrastructure", + "name": "Virtual Machine Migration Mount", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_mount/virtual_machine_migration_mount.py b/jcloud/infrastructure/pagetype/virtual_machine_migration_mount/virtual_machine_migration_mount.py new file mode 100644 index 0000000..916016c --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration_mount/virtual_machine_migration_mount.py @@ -0,0 +1,28 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from __future__ import annotations + +from jingrow.model.document import Document + + +class VirtualMachineMigrationMount(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + service: DF.Data | None + source_mount_point: DF.Data | None + target_mount_point: DF.Data | None + uuid: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_step/__init__.py b/jcloud/infrastructure/pagetype/virtual_machine_migration_step/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_step/virtual_machine_migration_step.json b/jcloud/infrastructure/pagetype/virtual_machine_migration_step/virtual_machine_migration_step.json new file mode 100644 index 0000000..c3e11d9 --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration_step/virtual_machine_migration_step.json @@ -0,0 +1,120 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-09-19 16:13:15.738853", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status_column", + "step", + "status", + "method", + "column_break_sobn", + "start", + "end", + "duration", + "column_break_uwto", + "wait_for_completion", + "attempts", + "section_break_jaoq", + "traceback" + ], + "fields": [ + { + "columns": 7, + "fieldname": "step", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Step", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "status_column", + "fieldtype": "Column Break", + "label": "Status" + }, + { + "columns": 2, + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nRunning\nSkipped\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "columns": 1, + "default": "0", + "fieldname": "wait_for_completion", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Wait for Completion", + "read_only": 1 + }, + { + "fieldname": "attempts", + "fieldtype": "Int", + "label": "Attempts", + "read_only": 1 + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Duration", + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "column_break_uwto", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_sobn", + "fieldtype": "Column Break" + }, + { + "collapsible": 1, + "fieldname": "section_break_jaoq", + "fieldtype": "Section Break" + }, + { + "fieldname": "traceback", + "fieldtype": "Code", + "label": "Traceback", + "read_only": 1 + }, + { + "fieldname": "method", + "fieldtype": "Data", + "label": "Method", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-02-06 14:48:06.668009", + "modified_by": "Administrator", + "module": "Infrastructure", + "name": "Virtual Machine Migration Step", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_step/virtual_machine_migration_step.py b/jcloud/infrastructure/pagetype/virtual_machine_migration_step/virtual_machine_migration_step.py new file mode 100644 index 0000000..8965faa --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration_step/virtual_machine_migration_step.py @@ -0,0 +1,33 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from __future__ import annotations + +from jingrow.model.document import Document + + +class VirtualMachineMigrationStep(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + attempts: DF.Int + duration: DF.Duration | None + end: DF.Datetime | None + method: DF.Data | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + start: DF.Datetime | None + status: DF.Literal["Pending", "Running", "Skipped", "Success", "Failure"] + step: DF.Data + traceback: DF.Code | None + wait_for_completion: DF.Check + # end: auto-generated types + + pass diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_volume/__init__.py b/jcloud/infrastructure/pagetype/virtual_machine_migration_volume/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_volume/virtual_machine_migration_volume.json b/jcloud/infrastructure/pagetype/virtual_machine_migration_volume/virtual_machine_migration_volume.json new file mode 100644 index 0000000..71b659c --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration_volume/virtual_machine_migration_volume.json @@ -0,0 +1,50 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-09-19 16:14:46.399111", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "volume_id", + "status", + "device_name" + ], + "fields": [ + { + "fieldname": "volume_id", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Volume ID", + "reqd": 1 + }, + { + "fieldname": "device_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Device Name", + "reqd": 1 + }, + { + "default": "Unattached", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Unattached\nAttached", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-09-19 18:23:29.834905", + "modified_by": "Administrator", + "module": "Infrastructure", + "name": "Virtual Machine Migration Volume", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/infrastructure/pagetype/virtual_machine_migration_volume/virtual_machine_migration_volume.py b/jcloud/infrastructure/pagetype/virtual_machine_migration_volume/virtual_machine_migration_volume.py new file mode 100644 index 0000000..f8e6128 --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_migration_volume/virtual_machine_migration_volume.py @@ -0,0 +1,25 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class VirtualMachineMigrationVolume(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + device_name: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + status: DF.Literal["Unattached", "Attached"] + volume_id: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/infrastructure/pagetype/virtual_machine_replacement/__init__.py b/jcloud/infrastructure/pagetype/virtual_machine_replacement/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/infrastructure/pagetype/virtual_machine_replacement/test_virtual_machine_replacement.py b/jcloud/infrastructure/pagetype/virtual_machine_replacement/test_virtual_machine_replacement.py new file mode 100644 index 0000000..759cd9a --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_replacement/test_virtual_machine_replacement.py @@ -0,0 +1,29 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestVirtualMachineReplacement(UnitTestCase): + """ + Unit tests for VirtualMachineReplacement. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestVirtualMachineReplacement(IntegrationTestCase): + """ + Integration tests for VirtualMachineReplacement. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/infrastructure/pagetype/virtual_machine_replacement/virtual_machine_replacement.js b/jcloud/infrastructure/pagetype/virtual_machine_replacement/virtual_machine_replacement.js new file mode 100644 index 0000000..fa85b6d --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_replacement/virtual_machine_replacement.js @@ -0,0 +1,26 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Virtual Machine Replacement', { + refresh(frm) { + [ + [__('Start'), 'execute', frm.pg.status === 'Pending'], + [__('Force Continue'), 'force_continue', frm.pg.status === 'Failure'], + [__('Force Continue'), 'force_continue', frm.pg.status === 'Failure'], + [__('Force Fail'), 'force_fail', frm.pg.status === 'Running'], + ].forEach(([label, method, condition]) => { + if (condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => frm.call(method).then(() => frm.refresh()), + ); + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/infrastructure/pagetype/virtual_machine_replacement/virtual_machine_replacement.json b/jcloud/infrastructure/pagetype/virtual_machine_replacement/virtual_machine_replacement.json new file mode 100644 index 0000000..0de4079 --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_replacement/virtual_machine_replacement.json @@ -0,0 +1,123 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2024-12-15 14:26:26.029692", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "virtual_machine", + "status", + "column_break_pega", + "copied_virtual_machine", + "image", + "column_break_tjrh", + "start", + "end", + "duration", + "section_break_mjhg", + "steps" + ], + "fields": [ + { + "fieldname": "virtual_machine", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Virtual Machine", + "link_filters": "[[\"Virtual Machine\",\"status\",\"not in\",[\"Draft\",\"Terminated\",null]]]", + "options": "Virtual Machine", + "reqd": 1, + "set_only_once": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_pega", + "fieldtype": "Column Break" + }, + { + "fieldname": "copied_virtual_machine", + "fieldtype": "Link", + "label": "Copied Virtual Machine", + "options": "Virtual Machine", + "read_only": 1 + }, + { + "fieldname": "column_break_tjrh", + "fieldtype": "Column Break" + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Duration", + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "section_break_mjhg", + "fieldtype": "Section Break", + "read_only": 1 + }, + { + "fieldname": "steps", + "fieldtype": "Table", + "label": "Steps", + "options": "Virtual Machine Migration Step" + }, + { + "fieldname": "image", + "fieldtype": "Link", + "label": "Image", + "options": "Virtual Machine Image", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-12-15 14:46:02.689500", + "modified_by": "Administrator", + "module": "Infrastructure", + "name": "Virtual Machine Replacement", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "creation", + "sort_order": "DESC", + "states": [], + "title_field": "virtual_machine", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/infrastructure/pagetype/virtual_machine_replacement/virtual_machine_replacement.py b/jcloud/infrastructure/pagetype/virtual_machine_replacement/virtual_machine_replacement.py new file mode 100644 index 0000000..56ded8d --- /dev/null +++ b/jcloud/infrastructure/pagetype/virtual_machine_replacement/virtual_machine_replacement.py @@ -0,0 +1,336 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import shlex +import subprocess +import time +from enum import Enum +from typing import TYPE_CHECKING + +import jingrow +from jingrow.model.document import Document + +if TYPE_CHECKING: + from jcloud.infrastructure.pagetype.virtual_machine_migration_step.virtual_machine_migration_step import ( + VirtualMachineMigrationStep, + ) + +StepStatus = Enum("StepStatus", ["Pending", "Running", "Success", "Failure"]) + + +class VirtualMachineReplacement(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.infrastructure.pagetype.virtual_machine_migration_step.virtual_machine_migration_step import ( + VirtualMachineMigrationStep, + ) + + copied_virtual_machine: DF.Link | None + duration: DF.Duration | None + end: DF.Datetime | None + image: DF.Link | None + name: DF.Int | None + start: DF.Datetime | None + status: DF.Literal["Pending", "Running", "Success", "Failure"] + steps: DF.Table[VirtualMachineMigrationStep] + virtual_machine: DF.Link + # end: auto-generated types + + def before_insert(self): + self.validate_aws_only() + self.validate_existing_replacement() + self.add_steps() + self.create_machine_copy() + + def add_steps(self): + for step in self.replacement_steps: + step.update({"status": "Pending"}) + self.append("steps", step) + + def create_machine_copy(self): + # Create a copy of the current machine + # So we don't lose the instance ids + self.copied_virtual_machine = f"{self.virtual_machine}-copy" + + if jingrow.db.exists("Virtual Machine", self.copied_virtual_machine): + jingrow.delete_pg("Virtual Machine", self.copied_virtual_machine) + + copied_machine = jingrow.copy_pg(self.machine) + copied_machine.insert(set_name=self.copied_virtual_machine) + + def validate_aws_only(self): + if self.machine.cloud_provider != "AWS EC2": + jingrow.throw("This feature is only available for AWS EC2") + + def validate_existing_replacement(self): + if existing := jingrow.get_all( + self.pagetype, + { + "status": ("in", ["Pending", "Running"]), + "virtual_machine": self.virtual_machine, + "name": ("!=", self.name), + }, + pluck="status", + limit=1, + ): + jingrow.throw(f"An existing replacement is already {existing[0].lower()}.") + + @property + def machine(self): + return jingrow.get_pg("Virtual Machine", self.virtual_machine) + + @property + def copied_machine(self): + return jingrow.get_pg("Virtual Machine", self.copied_virtual_machine) + + @property + def replacement_steps(self): + Wait = True + NoWait = False + methods = [ + (self.stop_machine, Wait), + (self.wait_for_machine_to_stop, Wait), + (self.create_image, NoWait), + (self.wait_for_image_to_be_available, Wait), + (self.disable_delete_on_termination_for_all_volumes, NoWait), + (self.terminate_previous_machine, Wait), + (self.wait_for_previous_machine_to_terminate, Wait), + (self.reset_virtual_machine_attributes, NoWait), + (self.provision_new_machine, NoWait), + (self.wait_for_machine_to_start, Wait), + (self.wait_for_machine_to_be_accessible, Wait), + (self.remove_old_host_key, NoWait), + ] + + steps = [] + for method, wait_for_completion in methods: + steps.append( + { + "step": method.__pg__, + "method": method.__name__, + "wait_for_completion": wait_for_completion, + } + ) + return steps + + def stop_machine(self) -> StepStatus: + "Stop machine" + machine = self.machine + machine.sync() + if machine.status == "Stopped": + return StepStatus.Success + if machine.status == "Pending": + return StepStatus.Pending + machine.stop() + return StepStatus.Success + + def wait_for_machine_to_stop(self) -> StepStatus: + "Wait for machine to stop" + # We need to make sure the machine is stopped before we proceed + machine = self.machine + machine.sync() + if machine.status == "Stopped": + return StepStatus.Success + return StepStatus.Pending + + def create_image(self) -> StepStatus: + "Create image" + machine = self.machine + self.image = machine.create_image(public=False) + return StepStatus.Success + + def wait_for_image_to_be_available(self) -> StepStatus: + "Wait for image to be available" + # We need to make sure image is ready before we proceed + # Otherwise we might not be able to create a new machine + image = jingrow.get_pg("Virtual Machine Image", self.image) + image.sync() + if image.status == "Available": + return StepStatus.Success + return StepStatus.Pending + + def disable_delete_on_termination_for_all_volumes(self) -> StepStatus: + "Disable Delete-on-Termination for all volumes" + # After this we can safely terminate the instance without losing any data + copied_machine = self.copied_machine + if copied_machine.volumes: + copied_machine.disable_delete_on_termination_for_all_volumes() + return StepStatus.Success + + def terminate_previous_machine(self) -> StepStatus: + "Terminate previous machine" + copied_machine = self.copied_machine + if copied_machine.status == "Terminated": + return StepStatus.Success + if copied_machine.status == "Pending": + return StepStatus.Pending + + copied_machine.disable_termination_protection() + copied_machine.reload() + copied_machine.terminate() + return StepStatus.Success + + def wait_for_previous_machine_to_terminate(self) -> StepStatus: + "Wait for previous machine to terminate" + # Private ip address is released when the machine is terminated + copied_machine = self.copied_machine + copied_machine.sync() + if copied_machine.status == "Terminated": + return StepStatus.Success + return StepStatus.Pending + + def reset_virtual_machine_attributes(self) -> StepStatus: + "Reset virtual machine attributes" + machine = self.machine + machine.instance_id = None + machine.public_ip_address = None + machine.volumes = [] + + # Set new machine image and machine type + machine.virtual_machine_image = self.image + machine.save() + return StepStatus.Success + + def provision_new_machine(self) -> StepStatus: + "Provision new machine" + # Create new machine in place. So we retain Name, IP etc. + self.machine._provision_aws() + return StepStatus.Success + + def wait_for_machine_to_start(self) -> StepStatus: + "Wait for new machine to start" + # We can't attach volumes to a machine that is not running + machine = self.machine + machine.sync() + if machine.status == "Running": + return StepStatus.Success + return StepStatus.Pending + + def wait_for_machine_to_be_accessible(self): + "Wait for machine to be accessible" + server = self.machine.get_server() + server.ping_ansible() + + plays = jingrow.get_all( + "Ansible Play", + {"server": server.name, "play": "Ping Server", "creation": (">", self.creation)}, + ["status"], + order_by="creation desc", + limit=1, + ) + if plays and plays[0].status == "Success": + return StepStatus.Success + return StepStatus.Pending + + def remove_old_host_key(self) -> StepStatus: + "Remove old host key" + command = f"ssh-keygen -R '{self.virtual_machine}'" + subprocess.check_call(shlex.split(command)) + return StepStatus.Success + + @jingrow.whitelist() + def execute(self): + self.status = "Running" + self.start = jingrow.utils.now_datetime() + self.save() + self.next() + + def fail(self) -> None: + self.status = "Failure" + for step in self.steps: + if step.status == "Pending": + step.status = "Skipped" + self.end = jingrow.utils.now_datetime() + self.duration = (self.end - self.start).total_seconds() + self.save() + + def succeed(self) -> None: + self.status = "Success" + self.end = jingrow.utils.now_datetime() + self.duration = (self.end - self.start).total_seconds() + self.save() + + @jingrow.whitelist() + def next(self, ignore_version=False) -> None: + self.status = "Running" + self.save(ignore_version=ignore_version) + next_step = self.next_step + + if not next_step: + # We've executed everything + self.succeed() + return + + jingrow.enqueue_pg( + self.pagetype, + self.name, + "execute_step", + step_name=next_step.name, + enqueue_after_commit=True, + at_front=True, + ) + + @jingrow.whitelist() + def force_continue(self) -> None: + # Mark all failed and skipped steps as pending + for step in self.steps: + if step.status in ("Failure", "Skipped"): + step.status = "Pending" + self.next() + + @jingrow.whitelist() + def force_fail(self) -> None: + # Mark all pending steps as failure + for step in self.steps: + if step.status == "Pending": + step.status = "Failure" + self.status = "Failure" + + @property + def next_step(self) -> VirtualMachineMigrationStep | None: + for step in self.steps: + if step.status == "Pending": + return step + return None + + @jingrow.whitelist() + def execute_step(self, step_name): + step = self.get_step(step_name) + + if not step.start: + step.start = jingrow.utils.now_datetime() + step.status = "Running" + ignore_version_while_saving = False + try: + result = getattr(self, step.method)() + step.status = result.name + if step.wait_for_completion: + step.attempts = step.attempts + 1 + if result == StepStatus.Pending: + # Wait some time before the next run + ignore_version_while_saving = True + time.sleep(1) + except Exception: + step.status = "Failure" + step.traceback = jingrow.get_traceback(with_context=True) + + step.end = jingrow.utils.now_datetime() + step.duration = (step.end - step.start).total_seconds() + + if step.status == "Failure": + self.fail() + else: + self.next(ignore_version_while_saving) + + def get_step(self, step_name) -> VirtualMachineMigrationStep | None: + for step in self.steps: + if step.name == step_name: + return step + return None diff --git a/jcloud/install.py b/jcloud/install.py new file mode 100644 index 0000000..776557c --- /dev/null +++ b/jcloud/install.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def after_install(): + create_administrator_team() + create_default_cluster() + + +def create_administrator_team(): + administrator_team = jingrow.get_pg( + { + "pagetype": "Team", + "name": "Administrator", + "user": "Administrator", + "enabled": 1, + "free_account": 1, + "team_members": [{"user": "Administrator"}], + } + ) + administrator_team.insert() + + +def create_default_cluster(): + default_cluster = jingrow.get_pg({"pagetype": "Cluster", "name": "Default"}) + default_cluster.insert() diff --git a/jcloud/jcloud/__init__.py b/jcloud/jcloud/__init__.py new file mode 100644 index 0000000..49e0fc1 --- /dev/null +++ b/jcloud/jcloud/__init__.py @@ -0,0 +1 @@ +__version__ = "0.7.0" diff --git a/jcloud/jcloud/audit.py b/jcloud/jcloud/audit.py new file mode 100644 index 0000000..b0a6bb1 --- /dev/null +++ b/jcloud/jcloud/audit.py @@ -0,0 +1,561 @@ +"""Functions for automated audit of jingrow cloud systems.""" + +from __future__ import annotations + +import json +from datetime import datetime, timedelta + +import jingrow +from jingrow.utils import rounded + +from jcloud.agent import Agent +from jcloud.jcloud.pagetype.server.server import Server +from jcloud.jcloud.pagetype.subscription.subscription import ( + created_usage_records, + paid_plans, + sites_with_free_hosting, +) + + +class Audit: + """ + Base class for all types of Audit. + + `audit_type` member variable needs to be set to log + """ + + audit_type = None + + def log( + self, log: dict, status: str, telegram_group: str | None = None, telegram_topic: str | None = None + ): + jingrow.get_pg( + { + "pagetype": "Audit Log", + "log": json.dumps(log, indent=2), + "status": status, + "audit_type": self.audit_type, + "telegram_group": telegram_group, + "telegram_topic": telegram_topic, + } + ).insert() + + +def get_benches_in_server(server: str) -> dict: + agent = Agent(server) + return agent.get("/benches") + + +class BenchFieldCheck(Audit): + """Audit to check fields of site in jcloud are correct.""" + + audit_type = "Bench Field Check" + + def __init__(self): + log = {} + self.server_map = {} + self.jcloud_map = {} + status = "Success" + + self.generate_server_map() + self.generate_jcloud_map() + + log = { + "Summary": None, + "potential_fixes": {}, + "sites_only_on_jcloud": self.get_sites_only_on_jcloud(), + "sites_only_on_server": self.get_sites_only_on_server(), + "sites_on_multiple_benches": self.get_sites_on_multiple_benches(), + } + if any(log.values()): + status = "Failure" + + log["potential_fixes"] = self.get_potential_fixes() + log["Summary"] = { + "Potential fixes": sum(len(sites) for sites in log["potential_fixes"].values()), + "Sites only on jcloud": len(log["sites_only_on_jcloud"]), + "Sites only on server": len(log["sites_only_on_server"]), + "Sites on multiple benches": len(log["sites_on_multiple_benches"]), + } + self.apply_potential_fixes() + + self.log(log, status) + + def generate_server_map(self): + servers = Server.get_all_primary_prod() + for server in servers: + benches = get_benches_in_server(server) + if not benches: + continue + for bench_name, bench_desc in benches.items(): + for site in bench_desc["sites"]: + self.server_map.setdefault(site, []).append(bench_name) + + def generate_jcloud_map(self): + jingrow.db.commit() + sites = jingrow.get_all("Site", ["name", "bench"], {"status": ("!=", "Archived")}) + self.jcloud_map = {site.name: site.bench for site in sites} + + def get_sites_only_on_jcloud(self): + sites = [] + for site, _ in self.jcloud_map.items(): + if site not in self.server_map: + sites.append(site) + return sites + + def get_sites_only_on_server(self): + sites = {} + for site, benches in self.server_map.items(): + if site not in self.jcloud_map: + sites[site] = benches[0] if len(benches) == 1 else benches + return sites + + def get_sites_on_multiple_benches(self): + sites = {} + for site, benches in self.server_map.items(): + if len(benches) > 1: + sites[site] = benches + return sites + + def get_potential_fixes(self): + def bench_field_updates(): + fixes = {} + for site, bench in self.jcloud_map.items(): + server_benches = self.server_map.get(site, []) + if len(server_benches) == 1 and server_benches[0] != bench: + fixes[site] = (bench, server_benches[0]) + return fixes + + return {"bench_field_updates": bench_field_updates()} + + def is_site_updating_or_moving(self, site): + """ + During SiteUpdate or SiteMigration, the status of the site is changed to Updating or Pending + """ + return jingrow.db.get_value("Site", site, "status", for_update=True).endswith("ing") + + def apply_potential_fixes(self): + fixes = self.get_potential_fixes() + for site, benches in fixes["bench_field_updates"].items(): + if self.is_site_updating_or_moving(site): + continue + jingrow.db.set_value("Site", site, "bench", benches[1]) + jingrow.db.commit() + + +class AppServerReplicaDirsCheck(Audit): + audit_type = "App Server Replica Dirs Check" + + def __init__(self): + log = {} + status = "Success" + replicas_and_primary = jingrow.get_all( + "Server", {"is_replication_setup": True}, ["name", "primary"], as_list=True + ) + for replica, primary in replicas_and_primary: + replica_benches = get_benches_in_server(replica) + primary_benches = get_benches_in_server(primary) + for bench, bench_desc in primary_benches.items(): + replica_bench_desc = replica_benches.get(bench) + if not replica_bench_desc: + status = "Failure" + log[bench] = {"Sites on primary only": bench_desc["sites"]} + continue + + sites_on_primary_only = list(set(bench_desc["sites"]) - set(replica_bench_desc["sites"])) + if sites_on_primary_only: + status = "Failure" + log[bench] = {"Sites on primary only": sites_on_primary_only} + self.log(log, status) + + +class BackupRecordCheck(Audit): + """Check if latest automated backup records for sites are created.""" + + audit_type = "Backup Record Check" + list_key = "Sites with no backup yesterday" + backup_summary = "Backup Summary" + + def get_sites_with_backup_in_interval(self, trial_plans: tuple[str]): + cond_filters = f" AND site.plan NOT IN {trial_plans}" if trial_plans else "" + return set( + jingrow.db.sql_list( + f""" + SELECT + site.name + FROM + `tabSite Backup` site_backup + JOIN + `tabSite` site + ON + site_backup.site = site.name + WHERE + site.status = "Active" and + site_backup.owner = "Administrator" and + DATE(site_backup.creation) >= "{self.yesterday}" + {cond_filters} + """ + ) + ) + + def get_all_sites(self, trial_plans: tuple[str]): + filters = { + "status": "Active", + "creation": ("<=", datetime.combine(self.yesterday, datetime.min.time())), + "is_standby": False, + "skip_scheduled_backups": False, + } + if trial_plans: + filters.update({"plan": ("not in", trial_plans)}) + return set( + jingrow.get_all( + "Site", + filters=filters, + pluck="name", + ) + ) + + def get_sites_activated_yesterday(self): + from pypika import functions as fn + + site_activites = jingrow.qb.PageType("Site Activity") + return set( + [ + t[0] + for t in jingrow.qb.from_(site_activites) + .select(site_activites.site) + .where(site_activites.action == "Activate Site") + .where(fn.Date(site_activites.creation) >= self.yesterday) + .run() + ] + ) + + def __init__(self): + log = {self.list_key: [], self.backup_summary: {}} + self.yesterday = jingrow.utils.now_datetime().date() - timedelta(days=1) + + trial_plans = tuple(jingrow.get_all("Site Plan", dict(is_trial_plan=1), pluck="name")) + sites_with_backup_in_interval = self.get_sites_with_backup_in_interval(trial_plans) + all_sites = self.get_all_sites(trial_plans) + sites_without_backups = ( + all_sites - sites_with_backup_in_interval - self.get_sites_activated_yesterday() + ) + try: + success_rate = (len(sites_with_backup_in_interval) / len(all_sites)) * 100 + except ZeroDivisionError: + success_rate = 0 + summary = { + "Successful Backups": len(sites_with_backup_in_interval), + "Failed Backups": len(sites_without_backups), + "Total Active Sites": len(all_sites), + "Success Rate": rounded(success_rate, 1), + } + log[self.backup_summary] = summary + + if sites_without_backups: + log[self.list_key] = list(sites_without_backups) + self.log(log, "Failure") + else: + self.log(log, "Success") + + +class OffsiteBackupCheck(Audit): + """Check if files for offsite backup exists on the offsite backup provider.""" + + audit_type = "Offsite Backup Check" + list_key = "Offsite Backup Remote Files unavailable in remote" + + def _get_all_files_in_s3(self) -> list[str]: + all_files = [] + settings = jingrow.get_single("Jcloud Settings") + s3 = settings.boto3_offsite_backup_session.resource("s3") + for s3_object in s3.Bucket(settings.aws_s3_bucket).objects.all(): + all_files.append(s3_object.key) + return all_files + + def __init__(self): + log = {self.list_key: []} + status = "Success" + all_files = self._get_all_files_in_s3() + offsite_remote_files = jingrow.db.sql( + """ + SELECT + remote_file.name, remote_file.file_path, site_backup.site + FROM + `tabRemote File` remote_file + JOIN + `tabSite Backup` site_backup + ON + site_backup.site = remote_file.site + WHERE + site_backup.status = "Success" and + site_backup.files_availability = "Available" and + site_backup.offsite = True + """, + as_dict=True, + ) + for remote_file in offsite_remote_files: + if remote_file["file_path"] not in all_files: + status = "Failure" + log[self.list_key].append(remote_file) + self.log(log, status) + + +def get_teams_with_paid_sites(): + return jingrow.get_all( + "Site", + { + "status": ("not in", ("Archived", "Suspended", "Inactive")), + "free": False, + "plan": ("in", paid_plans()), + "trial_end_date": ("is", "not set"), + }, + pluck="team", + distinct=True, + ) + + +class BillingAudit(Audit): + """Daily audit of billing related checks""" + + audit_type = "Billing Audit" + + def __init__(self): + self.paid_plans = paid_plans() + self.teams_with_paid_sites = jingrow.get_all( + "Site", + { + "status": ("not in", ("Archived", "Suspended", "Inactive")), + "free": False, + "plan": ("in", self.paid_plans), + "trial_end_date": ("is", "not set"), + }, + pluck="team", + distinct=True, + ) + audits = { + "Subscriptions with no usage records created": self.subscriptions_without_usage_record, + "Disabled teams with active sites": self.disabled_teams_with_active_sites, + "Sites active after trial": self.free_sites_after_trial, + "Teams with active sites and unpaid Invoices": self.teams_with_active_sites_and_unpaid_invoices, + "Prepaid Unpaid Invoices with Stripe Invoice ID set": self.prepaid_unpaid_invoices_with_stripe_invoice_id_set, + "Subscriptions with duplicate usage records created": self.subscriptions_with_duplicate_usage_records, + } + + log = {a: [] for a in audits} + status = "Success" + for audit_name in audits: + result = audits[audit_name]() + log[audit_name] += result + status = "Failure" if len(result) > 0 else status + + self.log(log=log, status=status, telegram_group="Billing", telegram_topic="Audits") + + def subscriptions_without_usage_record(self): + free_sites = sites_with_free_hosting() + free_teams = jingrow.get_all("Team", filters={"free_account": True, "enabled": True}, pluck="name") + + return jingrow.db.get_all( + "Subscription", + filters={ + "team": ("not in", free_teams), + "enabled": True, + "plan": ("in", self.paid_plans), + "name": ("not in", created_usage_records(free_sites, jingrow.utils.today())), + "document_name": ("not in", free_sites), + }, + pluck="name", + ) + + def subscriptions_with_duplicate_usage_records(self): + data = jingrow.db.sql( + """ + SELECT subscription, Count(name) as count + FROM `tabUsage Record` as UR + WHERE UR.date = CURDATE() + AND UR.docstatus = 1 + AND UR.plan NOT LIKE '%Marketplace%' + GROUP BY UR.document_name, UR.plan, UR.team + HAVING count > 1 + ORDER BY count DESC + """, + as_dict=True, + ) + + if not data: + return data + + result = [] + for d in data: + result.append(d.subscription) + return result + + def disabled_teams_with_active_sites(self): + return jingrow.get_all( + "Team", + {"name": ("in", self.teams_with_paid_sites), "enabled": False}, + pluck="name", + ) + + def free_sites_after_trial(self): + today = jingrow.utils.today() + free_teams = jingrow.get_all("Team", {"free_account": 1}, pluck="name") + + filters = { + "trial_end_date": ["is", "set"], + "is_standby": 0, + "plan": ["like", "%Trial%"], + "status": ("in", ["Active", "Broken"]), + "team": ("not in", free_teams), + } + + sites = jingrow.db.get_all("Site", filters=filters, fields=["name", "team"], pluck="name") + + # Flake doesn't allow use of duplicate keys in same dictionary + return jingrow.get_all("Site", {"trial_end_date": ["<", today], "name": ("in", sites)}, pluck="name") + + def teams_with_active_sites_and_unpaid_invoices(self): + today = jingrow.utils.getdate() + # last day of previous month + last_day = jingrow.utils.get_last_day(jingrow.utils.add_months(today, -1)) + + plan = jingrow.qb.PageType("Site Plan") + query = ( + jingrow.qb.from_(plan).select(plan.name).where((plan.enabled == 1) & (plan.is_jingrow_plan == 1)) + ).run(as_dict=True) + jingrow_plans = [d.name for d in query] + + invoice = jingrow.qb.PageType("Invoice") + team = jingrow.qb.PageType("Team") + site = jingrow.qb.PageType("Site") + + query = ( + jingrow.qb.from_(invoice) + .inner_join(team) + .on(invoice.team == team.name) + .inner_join(site) + .on(site.team == team.name) + .where( + (site.status).isin(["Active", "Inactive"]) + & (team.enabled == 1) + & (team.free_account == 0) + & (invoice.status == "Unpaid") + & (invoice.docstatus < 2) + & (invoice.type == "Subscription") + & (site.free == 0) + & (site.plan).notin(jingrow_plans) + & (invoice.period_end <= last_day) + ) + .select(invoice.team) + .distinct() + ).run(as_dict=True) + + return [d.team for d in query] + + def prepaid_unpaid_invoices_with_stripe_invoice_id_set(self): + active_teams = jingrow.get_all("Team", {"enabled": 1, "free_account": 0}, pluck="name") + return jingrow.get_all( + "Invoice", + { + "status": "Unpaid", + "payment_mode": "Prepaid Credits", + "type": "Subscription", + "team": ("in", active_teams), + "stripe_invoice_id": ("is", "set"), + }, + pluck="name", + ) + + +class PartnerBillingAudit(Audit): + """Daily Audit of Partner Billings""" + + audit_type = "Partner Billing Audit" + + def __init__(self): + audits = { + "Teams with Paid By Partner mode and billing team not set": self.teams_with_paid_by_partner_and_billing_team_not_set, + "Paid By Partner Teams with Unpaid Invoices": self.paid_by_partner_teams_with_unpaid_invoices, + } + + log = {a: [] for a in audits} + status = "Success" + for audit_name in audits: + result = audits[audit_name]() + log[audit_name] += result + status = "Failure" if len(result) > 0 else status + + self.log(log=log, status=status, telegram_group="Billing", telegram_topic="Audits") + + def teams_with_paid_by_partner_and_billing_team_not_set(self): + return jingrow.get_all( + "Team", + { + "enabled": True, + "payment_mode": "Paid By Partner", + "billing_team": ("is", "not set"), + }, + pluck="name", + ) + + def paid_by_partner_teams_with_unpaid_invoices(self): + paid_by_partner_teams = jingrow.get_all( + "Team", + { + "enabled": True, + "payment_mode": "Paid By Partner", + }, + pluck="name", + ) + return jingrow.get_all( + "Invoice", + { + "status": "Unpaid", + "team": ("in", paid_by_partner_teams), + "type": "Subscription", + }, + pluck="name", + ) + + +def check_bench_fields(): + BenchFieldCheck() + + +def check_backup_records(): + BackupRecordCheck() + + +def check_offsite_backups(): + OffsiteBackupCheck() + + +def check_app_server_replica_benches(): + AppServerReplicaDirsCheck() + + +def billing_audit(): + BillingAudit() + + +def partner_billing_audit(): + PartnerBillingAudit() + + +def suspend_sites_with_disabled_team(): + site = jingrow.qb.PageType("Site") + team = jingrow.qb.PageType("Team") + + disabled_teams_with_active_sites = ( + jingrow.qb.from_(site) + .inner_join(team) + .on(team.name == site.team) + .where((site.status).isin(["Active", "Broken", "Pending"]) & (team.enabled == 0)) + .select(site.team) + .distinct() + .run(pluck="team") + ) + + if disabled_teams_with_active_sites: + for team in disabled_teams_with_active_sites: + jingrow.get_pg("Team", team).suspend_sites(reason="Disabled Team") diff --git a/jcloud/jcloud/cleanup.py b/jcloud/jcloud/cleanup.py new file mode 100644 index 0000000..17e0328 --- /dev/null +++ b/jcloud/jcloud/cleanup.py @@ -0,0 +1,52 @@ +import jingrow + + +def unlink_remote_files_from_site(): + """Remove any remote files attached to the Site pg if older than 12 hours.""" + half_day = jingrow.utils.add_to_date(None, hours=-12) + or_filters = [ + ["remote_config_file", "!=", ""], + ["remote_database_file", "!=", ""], + ["remote_public_file", "!=", ""], + ["remote_private_file", "!=", ""], + ] + filters = [ + ["creation", "<", half_day], + ["status", "not in", "Pending,Installing,Updating,Active,Broken"], + ] + fields = [ + "remote_config_file", + "remote_database_file", + "remote_public_file", + "remote_private_file", + ] + sites = jingrow.get_all( + "Site", fields=["name", *fields], filters=filters, or_filters=or_filters, pluck="name" + ) + + # s3 uploads.jingrow.cloud has a 1 day expiry rule for all objects, so we'll unset those files here + for remote_file_type in fields: + jingrow.db.set_value("Site", {"name": ("in", sites)}, remote_file_type, None) + + +def reset_large_output_fields_from_ansible_tasks(): + # These ansible tasks can create very large output + # Cause table bloat, backup failure etc + # The output of these tasks isn't all that useful after some time + TASKS = [ + "Move Backup Directory to MariaDB Data Directory", + "Prepare MariaBackup", + "RSync Backup Directory From Primary", + "Run MariaDB Upgrade", + "Run migrate on site", + "Start MariaBackup", + ] + + tasks = jingrow.get_all( + "Ansible Task", {"task": ("in", TASKS), "creation": ("<=", jingrow.utils.add_days(None, -2))}, ["name"] + ) + for task in tasks: + jingrow.db.set_value( + "Ansible Task", task.name, {"output": "", "result": "", "exception": "", "error": ""} + ) + jingrow.db.commit() diff --git a/jcloud/jcloud/custom/address.json b/jcloud/jcloud/custom/address.json new file mode 100644 index 0000000..06b3346 --- /dev/null +++ b/jcloud/jcloud/custom/address.json @@ -0,0 +1,266 @@ +{ + "custom_fields": [ + { + "_assign": null, + "_comments": null, + "_liked_by": null, + "_user_tags": null, + "allow_in_quick_entry": 0, + "allow_on_submit": 0, + "bold": 0, + "collapsible": 0, + "collapsible_depends_on": null, + "columns": 0, + "creation": "2020-08-11 16:39:34.959731", + "default": null, + "depends_on": null, + "description": null, + "docstatus": 0, + "dt": "Address", + "fetch_from": null, + "fetch_if_empty": 0, + "fieldname": "gstin", + "fieldtype": "Data", + "hidden": 0, + "hide_border": 0, + "hide_days": 0, + "hide_seconds": 0, + "idx": 14, + "ignore_user_permissions": 0, + "ignore_xss_filter": 0, + "in_global_search": 0, + "in_list_view": 0, + "in_preview": 0, + "in_standard_filter": 0, + "insert_after": "fax", + "label": "GSTIN", + "length": 0, + "mandatory_depends_on": null, + "modified": "2020-08-11 16:39:34.959731", + "modified_by": "faris@jingrow.com", + "name": "Address-gstin", + "no_copy": 0, + "options": null, + "owner": "faris@jingrow.com", + "parent": null, + "parentfield": null, + "parenttype": null, + "permlevel": 0, + "precision": "", + "print_hide": 0, + "print_hide_if_no_value": 0, + "print_width": null, + "read_only": 0, + "read_only_depends_on": null, + "report_hide": 0, + "reqd": 0, + "search_index": 0, + "translatable": 1, + "unique": 0, + "width": null + } + ], + "custom_perms": [ + { + "_assign": null, + "_comments": null, + "_liked_by": null, + "_user_tags": null, + "amend": 0, + "cancel": 0, + "create": 1, + "creation": "2013-01-10 16:34:32", + "delete": 0, + "docstatus": 0, + "email": 1, + "export": 0, + "idx": 2, + "if_owner": 0, + "import": 0, + "modified": "2020-08-26 20:09:40.230586", + "modified_by": "faris@jingrow.com", + "name": "61826a5bb9", + "owner": "Administrator", + "parent": "Address", + "parentfield": "permissions", + "parenttype": "PageType", + "permlevel": 0, + "print": 1, + "read": 1, + "report": 1, + "role": "Purchase User", + "set_user_permissions": 0, + "share": 1, + "submit": 0, + "write": 1 + }, + { + "_assign": null, + "_comments": null, + "_liked_by": null, + "_user_tags": null, + "amend": 0, + "cancel": 0, + "create": 1, + "creation": "2013-01-10 16:34:32", + "delete": 1, + "docstatus": 0, + "email": 1, + "export": 1, + "idx": 5, + "if_owner": 0, + "import": 1, + "modified": "2020-08-26 20:09:40.339659", + "modified_by": "faris@jingrow.com", + "name": "e771a77459", + "owner": "Administrator", + "parent": "Address", + "parentfield": "permissions", + "parenttype": "PageType", + "permlevel": 0, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "set_user_permissions": 1, + "share": 1, + "submit": 0, + "write": 1 + }, + { + "_assign": null, + "_comments": null, + "_liked_by": null, + "_user_tags": null, + "amend": 0, + "cancel": 0, + "create": 1, + "creation": "2013-01-10 16:34:32", + "delete": 0, + "docstatus": 0, + "email": 1, + "export": 0, + "idx": 3, + "if_owner": 0, + "import": 0, + "modified": "2020-08-26 20:09:40.356382", + "modified_by": "faris@jingrow.com", + "name": "62f8500bfe", + "owner": "Administrator", + "parent": "Address", + "parentfield": "permissions", + "parenttype": "PageType", + "permlevel": 0, + "print": 1, + "read": 1, + "report": 1, + "role": "Maintenance User", + "set_user_permissions": 0, + "share": 1, + "submit": 0, + "write": 1 + }, + { + "_assign": null, + "_comments": null, + "_liked_by": null, + "_user_tags": null, + "amend": 0, + "cancel": 0, + "create": 1, + "creation": "2013-01-10 16:34:32", + "delete": 0, + "docstatus": 0, + "email": 1, + "export": 0, + "idx": 1, + "if_owner": 0, + "import": 0, + "modified": "2020-08-26 20:09:40.375437", + "modified_by": "faris@jingrow.com", + "name": "a5cb85bb0b", + "owner": "Administrator", + "parent": "Address", + "parentfield": "permissions", + "parenttype": "PageType", + "permlevel": 0, + "print": 1, + "read": 1, + "report": 1, + "role": "Sales User", + "set_user_permissions": 0, + "share": 1, + "submit": 0, + "write": 1 + }, + { + "_assign": null, + "_comments": null, + "_liked_by": null, + "_user_tags": null, + "amend": 0, + "cancel": 0, + "create": 1, + "creation": "2013-01-10 16:34:32", + "delete": 0, + "docstatus": 0, + "email": 1, + "export": 0, + "idx": 4, + "if_owner": 0, + "import": 0, + "modified": "2020-08-26 20:09:40.393834", + "modified_by": "faris@jingrow.com", + "name": "f8a821ccac", + "owner": "Administrator", + "parent": "Address", + "parentfield": "permissions", + "parenttype": "PageType", + "permlevel": 0, + "print": 1, + "read": 1, + "report": 1, + "role": "Accounts User", + "set_user_permissions": 0, + "share": 1, + "submit": 0, + "write": 1 + }, + { + "_assign": null, + "_comments": null, + "_liked_by": null, + "_user_tags": null, + "amend": 0, + "cancel": 0, + "create": 1, + "creation": "2020-08-26 20:09:40.417634", + "delete": 1, + "docstatus": 0, + "email": 0, + "export": 1, + "idx": 0, + "if_owner": 0, + "import": 0, + "modified": "2020-08-26 20:09:40.417634", + "modified_by": "faris@jingrow.com", + "name": "8297ade273", + "owner": "faris@jingrow.com", + "parent": "Address", + "parentfield": "permissions", + "parenttype": "PageType", + "permlevel": 0, + "print": 0, + "read": 1, + "report": 0, + "role": "Jcloud Admin", + "set_user_permissions": 0, + "share": 0, + "submit": 0, + "write": 1 + } + ], + "pagetype": "Address", + "property_setters": [], + "sync_on_migrate": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/custom/country.json b/jcloud/jcloud/custom/country.json new file mode 100644 index 0000000..a4da3d1 --- /dev/null +++ b/jcloud/jcloud/custom/country.json @@ -0,0 +1,68 @@ +{ + "custom_fields": [ + { + "_assign": null, + "_comments": null, + "_liked_by": null, + "_user_tags": null, + "allow_in_quick_entry": 0, + "allow_on_submit": 0, + "bold": 0, + "collapsible": 0, + "collapsible_depends_on": null, + "columns": 0, + "creation": "2021-04-26 17:02:54.786703", + "default": null, + "depends_on": null, + "description": null, + "docstatus": 0, + "dt": "Country", + "fetch_from": null, + "fetch_if_empty": 0, + "fieldname": "region", + "fieldtype": "Link", + "hidden": 0, + "hide_border": 0, + "hide_days": 0, + "hide_seconds": 0, + "idx": 1, + "ignore_user_permissions": 0, + "ignore_xss_filter": 0, + "in_global_search": 0, + "in_list_view": 0, + "in_preview": 0, + "in_standard_filter": 0, + "insert_after": null, + "label": "Region", + "length": 0, + "mandatory_depends_on": null, + "modified": "2021-04-26 17:02:54.786703", + "modified_by": "Administrator", + "name": "Country-region", + "no_copy": 0, + "non_negative": 0, + "options": "Region", + "owner": "Administrator", + "parent": null, + "parentfield": null, + "parenttype": null, + "permlevel": 0, + "precision": "", + "print_hide": 0, + "print_hide_if_no_value": 0, + "print_width": null, + "read_only": 0, + "read_only_depends_on": null, + "report_hide": 0, + "reqd": 0, + "search_index": 0, + "translatable": 0, + "unique": 0, + "width": null + } + ], + "custom_perms": [], + "pagetype": "Country", + "property_setters": [], + "sync_on_migrate": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/__init__.py b/jcloud/jcloud/pagetype/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/account_request/__init__.py b/jcloud/jcloud/pagetype/account_request/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/account_request/account_request.js b/jcloud/jcloud/pagetype/account_request/account_request.js new file mode 100644 index 0000000..ec7dd18 --- /dev/null +++ b/jcloud/jcloud/pagetype/account_request/account_request.js @@ -0,0 +1,10 @@ +// Copyright (c) 2019, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Account Request', { + refresh: function (frm) { + frm.add_custom_button('Send verification email', () => { + frm.call('send_verification_email'); + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/account_request/account_request.json b/jcloud/jcloud/pagetype/account_request/account_request.json new file mode 100644 index 0000000..18b4ae8 --- /dev/null +++ b/jcloud/jcloud/pagetype/account_request/account_request.json @@ -0,0 +1,309 @@ +{ + "actions": [], + "creation": "2022-03-06 20:52:32.662775", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "request_key", + "team", + "email", + "oauth_signup", + "send_email", + "otp", + "otp_generated_at", + "column_break_4", + "role", + "invited_by", + "ip_address", + "invited_by_parent_team", + "jcloud_roles", + "saas_sign_up_section", + "saas", + "saas_app", + "product_trial", + "column_break_eaxp", + "first_name", + "last_name", + "country", + "section_break_8", + "jerp", + "subdomain", + "plan", + "phone_number", + "company", + "designation", + "column_break_14", + "no_of_users", + "no_of_employees", + "industry", + "state", + "is_us_eu", + "agreed_to_partner_consent", + "referrer_data", + "referral_source", + "url_args", + "referrer_id", + "section_break_31", + "geo_location" + ], + "fields": [ + { + "fieldname": "email", + "fieldtype": "Data", + "label": "Email", + "search_index": 1 + }, + { + "fieldname": "request_key", + "fieldtype": "Data", + "label": "Request Key" + }, + { + "fieldname": "team", + "fieldtype": "Data", + "label": "Team" + }, + { + "fieldname": "role", + "fieldtype": "Data", + "label": "Role" + }, + { + "fieldname": "invited_by", + "fieldtype": "Data", + "label": "Invited By" + }, + { + "fieldname": "ip_address", + "fieldtype": "Data", + "label": "IP Address" + }, + { + "fieldname": "country", + "fieldtype": "Data", + "label": "Country" + }, + { + "fieldname": "first_name", + "fieldtype": "Data", + "label": "名字" + }, + { + "fieldname": "last_name", + "fieldtype": "Data", + "label": "姓" + }, + { + "fieldname": "phone_number", + "fieldtype": "Data", + "label": "Phone Number" + }, + { + "fieldname": "subdomain", + "fieldtype": "Data", + "label": "Subdomain" + }, + { + "default": "0", + "fieldname": "jerp", + "fieldtype": "Check", + "hidden": 1, + "label": "JERP" + }, + { + "fieldname": "company", + "fieldtype": "Data", + "label": "Company" + }, + { + "fieldname": "designation", + "fieldtype": "Data", + "label": "Designation" + }, + { + "fieldname": "referral_source", + "fieldtype": "Data", + "label": "Referral Source" + }, + { + "fieldname": "no_of_employees", + "fieldtype": "Data", + "label": "Number of employees" + }, + { + "collapsible": 1, + "fieldname": "section_break_8", + "fieldtype": "Section Break", + "label": "JERP Sign Up" + }, + { + "fieldname": "column_break_14", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "collapsible": 1, + "fieldname": "referrer_data", + "fieldtype": "Section Break", + "label": "Referrer Data" + }, + { + "fieldname": "plan", + "fieldtype": "Link", + "label": "Plan", + "options": "Site Plan" + }, + { + "fieldname": "industry", + "fieldtype": "Data", + "label": "Industry" + }, + { + "fieldname": "no_of_users", + "fieldtype": "Int", + "label": "Number of Users" + }, + { + "fieldname": "url_args", + "fieldtype": "Code", + "label": "URL Args", + "options": "JSON", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "agreed_to_partner_consent", + "fieldtype": "Check", + "label": "Agreed to Partner Consent", + "read_only": 1 + }, + { + "fieldname": "referrer_id", + "fieldtype": "Data", + "label": "Referrer ID" + }, + { + "default": "0", + "fieldname": "saas", + "fieldtype": "Check", + "hidden": 1, + "label": "Saas" + }, + { + "collapsible": 1, + "fieldname": "saas_sign_up_section", + "fieldtype": "Section Break", + "label": "Saas Sign Up" + }, + { + "collapsible": 1, + "fieldname": "section_break_31", + "fieldtype": "Section Break" + }, + { + "fieldname": "geo_location", + "fieldtype": "Code", + "label": "Geo Location", + "read_only": 1 + }, + { + "fieldname": "state", + "fieldtype": "Data", + "label": "State" + }, + { + "default": "0", + "fieldname": "send_email", + "fieldtype": "Check", + "label": "Send Email" + }, + { + "default": "0", + "fieldname": "is_us_eu", + "fieldtype": "Check", + "label": "Is State or Europe" + }, + { + "default": "0", + "fieldname": "invited_by_parent_team", + "fieldtype": "Check", + "label": "Invited By Parent Team" + }, + { + "default": "0", + "fieldname": "oauth_signup", + "fieldtype": "Check", + "label": "Oauth Signup" + }, + { + "fieldname": "saas_app", + "fieldtype": "Link", + "label": "Saas App", + "options": "Marketplace App" + }, + { + "fieldname": "product_trial", + "fieldtype": "Link", + "label": "Product Trial (New)", + "options": "Product Trial" + }, + { + "fieldname": "jcloud_roles", + "fieldtype": "Table MultiSelect", + "label": "Jcloud Roles", + "options": "Account Request Jcloud Role" + }, + { + "fieldname": "otp", + "fieldtype": "Data", + "label": "OTP" + }, + { + "fieldname": "column_break_eaxp", + "fieldtype": "Column Break" + }, + { + "fieldname": "otp_generated_at", + "fieldtype": "Datetime", + "label": "OTP Generated at" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-02-03 16:12:50.184724", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Account Request", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin" + }, + { + "create": 1, + "role": "Jcloud Member" + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "email", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/account_request/account_request.py b/jcloud/jcloud/pagetype/account_request/account_request.py new file mode 100644 index 0000000..3cf3c38 --- /dev/null +++ b/jcloud/jcloud/pagetype/account_request/account_request.py @@ -0,0 +1,246 @@ +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json + +import jingrow +from jingrow.model.document import Document +from jingrow.utils import get_url, random_string + +from jcloud.utils import get_country_info, is_valid_email_address +from jcloud.utils.otp import generate_otp +from jcloud.utils.telemetry import capture + + +class AccountRequest(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.account_request_jcloud_role.account_request_jcloud_role import ( + AccountRequestJcloudRole, + ) + + agreed_to_partner_consent: DF.Check + company: DF.Data | None + country: DF.Data | None + designation: DF.Data | None + email: DF.Data | None + jerp: DF.Check + first_name: DF.Data | None + geo_location: DF.Code | None + industry: DF.Data | None + invited_by: DF.Data | None + invited_by_parent_team: DF.Check + ip_address: DF.Data | None + is_us_eu: DF.Check + last_name: DF.Data | None + no_of_employees: DF.Data | None + no_of_users: DF.Int + oauth_signup: DF.Check + otp: DF.Data | None + otp_generated_at: DF.Datetime | None + phone_number: DF.Data | None + plan: DF.Link | None + jcloud_roles: DF.TableMultiSelect[AccountRequestJcloudRole] + product_trial: DF.Link | None + referral_source: DF.Data | None + referrer_id: DF.Data | None + request_key: DF.Data | None + role: DF.Data | None + saas: DF.Check + saas_app: DF.Link | None + send_email: DF.Check + state: DF.Data | None + subdomain: DF.Data | None + team: DF.Data | None + url_args: DF.Code | None + # end: auto-generated types + + def before_insert(self): + # This pre-verification is only beneficial for SaaS signup + # because, in general flow we already have e-mail link/otp based verification + if ( + not jingrow.conf.developer_mode + and jingrow.db.get_single_value("Jcloud Settings", "enable_email_pre_verification") + and self.saas + and not self.oauth_signup + and not is_valid_email_address(self.email) + ): + jingrow.throw(f"{self.email} is not a valid email address") + + if not self.team: + self.team = self.email + + if not self.request_key: + self.request_key = random_string(32) + + if not self.otp: + self.otp = generate_otp() + self.otp_generated_at = jingrow.utils.now_datetime() + if jingrow.conf.developer_mode and jingrow.local.dev_server: + self.otp = 111111 + + self.ip_address = jingrow.local.request_ip + geo_location = self.get_country_info() or {} + self.geo_location = json.dumps(geo_location, indent=1, sort_keys=True) + self.state = geo_location.get("regionName") + + # check for US and EU + if ( + geo_location.get("country") == "United States" + or geo_location.get("continent") == "Europe" + or self.country == "United States" + ): + self.is_us_eu = True + else: + self.is_us_eu = False + + def validate(self): + self.email = self.email.strip() + + def after_insert(self): + # Telemetry: Only capture if it's not a saas signup or invited by parent team. Also don't capture if user already have a team + if not ( + jingrow.db.exists("Team", {"user": self.email}) + or self.is_saas_signup() + or self.invited_by_parent_team + ): + # Telemetry: Account Request Created + capture("account_request_created", "fc_signup", self.email) + + if self.is_saas_signup() and self.is_using_new_saas_flow(): + # Telemetry: Account Request Created + capture("account_request_created", "fc_saas", self.email) + + if self.is_saas_signup() and not self.is_using_new_saas_flow(): + # If user used oauth, we don't need to verification email but to track the event in stat, send this dummy event + capture("verification_email_sent", "fc_signup", self.email) + capture("clicked_verify_link", "fc_signup", self.email) + + if self.send_email: + self.send_verification_email() + if self.oauth_signup: + # Telemetry: simulate verification email sent + capture("verification_email_sent", "fc_signup", self.email) + + def get_country_info(self): + return get_country_info() + + def too_many_requests_with_field(self, field_name, limits): + key = getattr(self, field_name) + for allowed_count, kwargs in limits: + count = jingrow.db.count( + self.pagetype, + {field_name: key, "creation": (">", jingrow.utils.add_to_date(None, **kwargs))}, + ) + if count > allowed_count: + return True + return False + + def reset_otp(self): + self.otp = generate_otp() + self.save(ignore_permissions=True) + + @jingrow.whitelist() + def send_verification_email(self): # noqa: C901 + url = self.get_verification_url() + + if jingrow.conf.developer_mode: + print(f"\nSetup account URL for {self.email}:") + print(url) + print(f"\nOTP for {self.email}:") + print(self.otp) + print() + return + + subject = f"{self.otp} - OTP for Jingrow Account Verification" + args = {} + sender = "" + + custom_template = self.saas_app and jingrow.db.get_value( + "Marketplace App", self.saas_app, "custom_verify_template" + ) + if self.is_saas_signup() or custom_template: + subject = "Verify your email for Jingrow" + template = "saas_verify_account" + # If product trial(new saas flow), get the product trial details + if self.product_trial: + template = "product_trial_verify_account" + product_trial = jingrow.get_pg("Product Trial", self.product_trial) + if product_trial.email_subject: + subject = product_trial.email_subject.format(otp=self.otp) + if product_trial.email_account: + sender = jingrow.get_value("Email Account", product_trial.email_account, "email_id") + if product_trial.email_full_logo: + args.update({"image_path": get_url(product_trial.email_full_logo, True)}) + args.update({"header_content": product_trial.email_header_content or ""}) + # If saas_app is set, check for email account in saas settings of that app + elif self.saas_app: + email_account = jingrow.get_value("Saas Settings", self.saas_app, "email_account") + if email_account: + sender = jingrow.get_value("Email Account", email_account, "email_id") + else: + template = "verify_account" + + if self.invited_by and self.role != "Jcloud Admin": + subject = f"You are invited by {self.invited_by} to join Jingrow" + template = "invite_team_member" + + args.update( + { + "invited_by": self.invited_by, + "link": url, + "read_pixel_path": get_url( + f"/api/method/jcloud.utils.telemetry.capture_read_event?email={self.email}" + ), + "otp": self.otp, + } + ) + if not args.get("image_path"): + args.update( + { + "image_path": "http://git.jingrow.com:3000/jingrow/gameplan/assets/9355208/447035d0-0686-41d2-910a-a3d21928ab94" + } + ) + # Telemetry: Verification Email Sent + # Only capture if it's not a saas signup or invited by parent team + if not (self.is_saas_signup() or self.invited_by_parent_team): + # Telemetry: Verification Mail Sent + capture("verification_email_sent", "fc_signup", self.email) + jingrow.sendmail( + sender=sender, + recipients=self.email, + subject=subject, + template=template, + args=args, + now=True, + ) + + def get_verification_url(self): + if self.saas: + return get_url(f"/api/method/jcloud.api.saas.validate_account_request?key={self.request_key}") + if self.product_trial: + return get_url( + f"/dashboard/saas/{self.product_trial}/oauth?key={self.request_key}&email={self.email}" + ) + return get_url(f"/dashboard/setup-account/{self.request_key}") + + @property + def full_name(self): + return " ".join(filter(None, [self.first_name, self.last_name])) + + def get_site_name(self): + return self.subdomain + "." + jingrow.db.get_value("Saas Settings", self.saas_app, "domain") + + def is_using_new_saas_flow(self): + return bool(self.product_trial) + + def is_saas_signup(self): + return bool(self.saas_app or self.saas or self.jerp or self.product_trial) diff --git a/jcloud/jcloud/pagetype/account_request/test_account_request.py b/jcloud/jcloud/pagetype/account_request/test_account_request.py new file mode 100644 index 0000000..0481ca1 --- /dev/null +++ b/jcloud/jcloud/pagetype/account_request/test_account_request.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# See license.txt + + +import unittest +from typing import Optional +from unittest.mock import patch + +import jingrow + +from jcloud.jcloud.pagetype.account_request.account_request import AccountRequest + + +def create_test_account_request( + subdomain: str, + email: str = None, + jerp: bool = True, + creation=None, + saas: bool = False, + saas_app: Optional[str] = None, +): + creation = creation or jingrow.utils.now_datetime() + email = email or jingrow.mock("email") + with patch.object(AccountRequest, "send_verification_email"): + account_request = jingrow.get_pg( + { + "pagetype": "Account Request", + "subdomain": subdomain, + "email": email, + "jerp": jerp, + "saas": saas, + "saas_app": saas_app, + "otp": "", + } + ).insert(ignore_if_duplicate=True) + account_request.db_set("creation", creation) + account_request.reload() + return account_request + + +class TestAccountRequest(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/account_request_jcloud_role/__init__.py b/jcloud/jcloud/pagetype/account_request_jcloud_role/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/account_request_jcloud_role/account_request_jcloud_role.json b/jcloud/jcloud/pagetype/account_request_jcloud_role/account_request_jcloud_role.json new file mode 100644 index 0000000..0fc91c8 --- /dev/null +++ b/jcloud/jcloud/pagetype/account_request_jcloud_role/account_request_jcloud_role.json @@ -0,0 +1,31 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-07-06 23:39:13.802962", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "jcloud_role" + ], + "fields": [ + { + "fieldname": "jcloud_role", + "fieldtype": "Link", + "label": "Jcloud Role", + "options": "Jcloud Role" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-07-07 11:29:10.685626", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Account Request Jcloud Role", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/account_request_jcloud_role/account_request_jcloud_role.py b/jcloud/jcloud/pagetype/account_request_jcloud_role/account_request_jcloud_role.py new file mode 100644 index 0000000..ff4397a --- /dev/null +++ b/jcloud/jcloud/pagetype/account_request_jcloud_role/account_request_jcloud_role.py @@ -0,0 +1,23 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class AccountRequestJcloudRole(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + jcloud_role: DF.Link | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/add_on_settings/__init__.py b/jcloud/jcloud/pagetype/add_on_settings/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/add_on_settings/add_on_settings.js b/jcloud/jcloud/pagetype/add_on_settings/add_on_settings.js new file mode 100644 index 0000000..fda1972 --- /dev/null +++ b/jcloud/jcloud/pagetype/add_on_settings/add_on_settings.js @@ -0,0 +1,28 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Add On Settings', { + refresh: function (frm) { + create_custom_button(frm, 'Reset etcd Admin Data', 'init_etcd_data'); + }, +}); + +function create_custom_button(frm, title, method) { + return frm.add_custom_button( + __(title), + () => { + jingrow.prompt( + { + fieldtype: 'Data', + label: 'Proxy Server Name', + fieldname: 'proxy_server', + reqd: 1, + }, + ({ proxy_server }) => { + frm.call(method, { proxy_server: proxy_server }); + }, + ); + }, + 'Actions', + ); +} diff --git a/jcloud/jcloud/pagetype/add_on_settings/add_on_settings.json b/jcloud/jcloud/pagetype/add_on_settings/add_on_settings.json new file mode 100644 index 0000000..ffff203 --- /dev/null +++ b/jcloud/jcloud/pagetype/add_on_settings/add_on_settings.json @@ -0,0 +1,54 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-02-14 19:25:06.365019", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "s3_credentials_section", + "aws_access_key", + "aws_secret_key" + ], + "fields": [ + { + "collapsible": 1, + "fieldname": "s3_credentials_section", + "fieldtype": "Section Break", + "label": "S3 Credentials" + }, + { + "fieldname": "aws_access_key", + "fieldtype": "Data", + "label": "aws access key" + }, + { + "fieldname": "aws_secret_key", + "fieldtype": "Password", + "label": "aws secret key" + } + ], + "index_web_pages_for_search": 1, + "issingle": 1, + "links": [], + "modified": "2022-02-14 19:25:23.215420", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Add On Settings", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "print": 1, + "read": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/add_on_settings/add_on_settings.py b/jcloud/jcloud/pagetype/add_on_settings/add_on_settings.py new file mode 100644 index 0000000..5000b16 --- /dev/null +++ b/jcloud/jcloud/pagetype/add_on_settings/add_on_settings.py @@ -0,0 +1,36 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + + +class AddOnSettings(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + aws_access_key: DF.Data | None + aws_secret_key: DF.Password | None + # end: auto-generated types + + @jingrow.whitelist() + def init_etcd_data(self, proxy_server): + # TODO: Add a separate agent job for this, instead of doing it recursively here do it on server + subs = jingrow.get_all( + "Storage Integration Subscription", + fields=["name", "enabled"], + filters={"minio_server_on": proxy_server}, + ) + + for sub in subs: + pg = jingrow.get_pg("Storage Integration Subscription", sub["name"]) + pg.create_user() + if pg.enabled == 0: + pg.update_user("disable") + + jingrow.db.commit() diff --git a/jcloud/jcloud/pagetype/add_on_settings/test_add_on_settings.py b/jcloud/jcloud/pagetype/add_on_settings/test_add_on_settings.py new file mode 100644 index 0000000..ec4c30f --- /dev/null +++ b/jcloud/jcloud/pagetype/add_on_settings/test_add_on_settings.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestAddOnSettings(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/agent_job/__init__.py b/jcloud/jcloud/pagetype/agent_job/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/agent_job/agent_job.js b/jcloud/jcloud/pagetype/agent_job/agent_job.js new file mode 100644 index 0000000..7c8039b --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job/agent_job.js @@ -0,0 +1,88 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Agent Job', { + refresh: function (frm) { + frm.add_web_link( + `https://${frm.pg.server}/agent/jobs/${frm.pg.job_id}`, + __('Visit Agent Endpoint'), + ); + frm.add_web_link( + frm.pg.site + ? `/dashboard/sites/${frm.pg.site}/insights/jobs/${frm.pg.name}` + : `/dashboard/servers/${frm.pg.server}/jobs/${frm.pg.name}`, + __('Visit Dashboard'), + ); + + if (!['Success', 'Failure', 'Delivery Failure'].includes(frm.pg.status)) { + frm.add_custom_button( + __('Get Status'), + () => { + frm.call('get_status').then(() => frm.refresh()); + }, + __('Actions'), + ); + } + + frm.add_custom_button( + __('Retry'), + () => { + jingrow.confirm(`Are you sure you want to retry this job?`, () => + frm + .call('retry') + .then((result) => + jingrow.msgprint( + jingrow.utils.get_form_link( + 'Agent Job', + result.message.name, + true, + ), + ), + ), + ); + }, + __('Actions'), + ); + + [ + [__('Retry In-Place'), 'retry_in_place'], + [__('Process Job Updates'), 'process_job_updates'], + [__('Fail and Process Job Updates'), 'fail_and_process_job_updates'], + [ + __('Succeed and Process Job Updates'), + 'succeed_and_process_job_updates', + ], + [__('Cancel Job'), 'cancel_job', ['Pending', "Running"].includes(frm.pg.status)], + ].forEach(([label, method, condition]) => { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => frm.call(method).then(() => frm.refresh()), + ); + }, + __('Actions'), + ); + }); + if (['Update Site Migrate', 'Migrate Site'].includes(frm.pg.job_type)) { + frm.add_custom_button( + 'Run by Skipping Failing Patches', + () => { + frm + .call('retry_skip_failing_patches') + .then((result) => + jingrow.msgprint( + jingrow.utils.get_form_link( + 'Agent Job', + result.message.name, + true, + ), + ), + ); + }, + __('Actions'), + ); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/agent_job/agent_job.json b/jcloud/jcloud/pagetype/agent_job/agent_job.json new file mode 100644 index 0000000..aeac6e0 --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job/agent_job.json @@ -0,0 +1,298 @@ +{ + "actions": [], + "creation": "2020-01-13 17:59:25.842963", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "job_type", + "status", + "server_type", + "server", + "column_break_5", + "bench", + "site", + "code_server", + "upstream", + "host", + "request_section", + "job_id", + "request_path", + "request_method", + "retry_count", + "next_retry_at", + "column_break_10", + "start", + "end", + "duration", + "section_break_zqbm", + "reference_pagetype", + "column_break_ayyq", + "reference_name", + "data_19", + "output", + "data", + "traceback", + "request_data", + "request_files", + "callback_section", + "callback_failure_count" + ], + "fields": [ + { + "fieldname": "server", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Server", + "options": "server_type", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Undelivered\nPending\nRunning\nSuccess\nFailure\nDelivery Failure", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "request_method", + "fieldtype": "Select", + "label": "Request Method", + "options": "GET\nPOST\nDELETE", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "request_data", + "fieldtype": "Code", + "label": "Request Data", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "job_id", + "fieldtype": "Int", + "label": "Job ID", + "read_only": 1, + "search_index": 1 + }, + { + "fieldname": "request_path", + "fieldtype": "Data", + "label": "Request Path", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "job_type", + "fieldtype": "Link", + "label": "Job Type", + "options": "Agent Job Type", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "output", + "fieldtype": "Code", + "label": "Output", + "read_only": 1 + }, + { + "fieldname": "traceback", + "fieldtype": "Code", + "label": "Traceback", + "read_only": 1 + }, + { + "fieldname": "data", + "fieldtype": "Code", + "label": "Data", + "read_only": 1 + }, + { + "fieldname": "server_type", + "fieldtype": "Link", + "label": "Server Type", + "options": "PageType", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "bench", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Bench", + "options": "Bench", + "read_only": 1 + }, + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Site", + "options": "Site", + "read_only": 1 + }, + { + "fieldname": "upstream", + "fieldtype": "Link", + "label": "Upstream", + "options": "Server", + "read_only": 1 + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Time", + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "request_section", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_10", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_5", + "fieldtype": "Column Break" + }, + { + "fieldname": "data_19", + "fieldtype": "Section Break" + }, + { + "fieldname": "host", + "fieldtype": "Link", + "label": "Host", + "options": "Site Domain", + "read_only": 1, + "search_index": 1 + }, + { + "fieldname": "request_files", + "fieldtype": "Code", + "label": "Request Files", + "read_only": 1 + }, + { + "fieldname": "code_server", + "fieldtype": "Link", + "label": "Code Server", + "options": "Code Server", + "read_only": 1 + }, + { + "fieldname": "retry_count", + "fieldtype": "Int", + "label": "Retry Count", + "read_only": 1 + }, + { + "fieldname": "next_retry_at", + "fieldtype": "Datetime", + "label": "Next Retry At", + "read_only": 1 + }, + { + "collapsible": 1, + "fieldname": "callback_section", + "fieldtype": "Section Break", + "label": "Callback" + }, + { + "default": "0", + "fieldname": "callback_failure_count", + "fieldtype": "Int", + "label": "Callback Failure Count" + }, + { + "fieldname": "section_break_zqbm", + "fieldtype": "Section Break" + }, + { + "depends_on": "eval: pg.reference_pagetype", + "fieldname": "reference_pagetype", + "fieldtype": "Link", + "label": "Reference Pagetype", + "options": "PageType", + "read_only": 1 + }, + { + "depends_on": "eval: pg.reference_name", + "fieldname": "reference_name", + "fieldtype": "Dynamic Link", + "label": "Reference Name", + "options": "reference_pagetype", + "read_only": 1 + }, + { + "fieldname": "column_break_ayyq", + "fieldtype": "Column Break" + } + ], + "in_create": 1, + "links": [], + "modified": "2024-08-23 16:32:27.447230", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Agent Job", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "if_owner": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "if_owner": 1, + "role": "Jcloud Member", + "write": 1 + }, + { + "read": 1, + "role": "Site Manager" + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "job_type", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/agent_job/agent_job.py b/jcloud/jcloud/pagetype/agent_job/agent_job.py new file mode 100644 index 0000000..c40b2b0 --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job/agent_job.py @@ -0,0 +1,1227 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import json +import os +import random +import traceback + +import jingrow +from jingrow.core.utils import find +from jingrow.model.document import Document +from jingrow.monitor import add_data_to_monitor +from jingrow.utils import ( + add_days, + cint, + convert_utc_to_system_timezone, + cstr, + get_datetime, + now_datetime, +) + +from jcloud.agent import Agent, AgentCallbackException, AgentRequestSkippedException +from jcloud.api.client import is_owned_by_team +from jcloud.jcloud.pagetype.agent_job_type.agent_job_type import ( + get_retryable_job_types_and_max_retry_count, +) +from jcloud.jcloud.pagetype.site_database_user.site_database_user import SiteDatabaseUser +from jcloud.jcloud.pagetype.site_migration.site_migration import ( + get_ongoing_migration, + job_matches_site_migration, + process_site_migration_job_update, +) +from jcloud.utils import has_role, log_error, timer + +AGENT_LOG_KEY = "agent-jobs" + + +class AgentJob(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + bench: DF.Link | None + callback_failure_count: DF.Int + code_server: DF.Link | None + data: DF.Code | None + duration: DF.Time | None + end: DF.Datetime | None + host: DF.Link | None + job_id: DF.Int + job_type: DF.Link + next_retry_at: DF.Datetime | None + output: DF.Code | None + reference_pagetype: DF.Link | None + reference_name: DF.DynamicLink | None + request_data: DF.Code + request_files: DF.Code | None + request_method: DF.Literal["GET", "POST", "DELETE"] + request_path: DF.Data + retry_count: DF.Int + server: DF.DynamicLink + server_type: DF.Link + site: DF.Link | None + start: DF.Datetime | None + status: DF.Literal["Undelivered", "Pending", "Running", "Success", "Failure", "Delivery Failure"] + traceback: DF.Code | None + upstream: DF.Link | None + # end: auto-generated types + + dashboard_fields = ( + "name", + "job_type", + "creation", + "status", + "start", + "end", + "duration", + "bench", + "site", + "server", + "job_id", + "output", + ) + + @staticmethod + def get_list_query(query, filters=None, **list_args): + site = cstr(filters.get("site", "")) + group = cstr(filters.get("group", "")) + server = cstr(filters.get("server", "")) + bench = cstr(filters.get("bench", "")) + + if not (site or group or server or bench): + jingrow.throw("不允许", jingrow.PermissionError) + + if site and not has_role("Jcloud Support Agent"): + is_owned_by_team("Site", site, raise_exception=True) + + if group: + if not has_role("Jcloud Support Agent"): + is_owned_by_team("Release Group", group, raise_exception=True) + + AgentJob = jingrow.qb.PageType("Agent Job") + Bench = jingrow.qb.PageType("Bench") + benches = jingrow.qb.from_(Bench).select(Bench.name).where(Bench.group == filters.group) + query = query.where(AgentJob.bench.isin(benches)) + + if server: + is_owned_by_team("Server", server, raise_exception=True) + + results = query.run(as_dict=1) + update_query_result_status_timestamps(results) + return results + + def get_pg(self, pg): + if pg.status == "Undelivered": + pg.status = "Pending" + + pg["steps"] = jingrow.get_all( + "Agent Job Step", + filters={"agent_job": self.name}, + fields=[ + "name", + "step_name", + "status", + "start", + "end", + "duration", + "output", + ], + order_by="creation", + ) + # agent job start and end are in utc + if pg.start: + pg.start = convert_utc_to_system_timezone(pg.start).replace(tzinfo=None) + if pg.end: + pg.end = convert_utc_to_system_timezone(pg.end).replace(tzinfo=None) + + for step in pg["steps"]: + if step.status == "Running": + step.output = jingrow.cache.hget("agent_job_step_output", step.name) + + return pg + + def after_insert(self): + self.create_agent_job_steps() + self.log_creation() + self.enqueue_http_request() + + def enqueue_http_request(self): + jingrow.enqueue_pg( + self.pagetype, + self.name, + "create_http_request", + timeout=600, + queue="short", + enqueue_after_commit=True, + ) + + def create_http_request(self): + try: + agent = Agent(self.server, server_type=self.server_type) + if agent.should_skip_requests(): + self.retry_count = 0 + self.set_status_and_next_retry_at() + return + + data = json.loads(self.request_data) + files = json.loads(self.request_files) + + self.job_id = agent.request(self.request_method, self.request_path, data, files, agent_job=self)[ + "job" + ] + + self.status = "Pending" + self.save() + except AgentRequestSkippedException: + self.retry_count = 0 + self.set_status_and_next_retry_at() + + except Exception: + if 400 <= cint(self.flags.get("status_code", 0)) <= 499: + self.status = "Failure" + self.save() + jingrow.db.commit() + + process_job_updates(self.name) + + else: + self.set_status_and_next_retry_at() + + def log_creation(self): + try: + if hasattr(jingrow.local, "monitor"): + monitor = jingrow.local.monitor.data + else: + monitor = None + + data = { + "monitor": monitor, + "timestamp": jingrow.utils.now(), + "job": self.as_dict(), + } + serialized = json.dumps(data, sort_keys=True, default=str, separators=(",", ":")) + jingrow.cache().rpush(AGENT_LOG_KEY, serialized) + except Exception: + traceback.print_exc() + + def set_status_and_next_retry_at(self): + try: + next_retry_at = get_next_retry_at(self.retry_count) + self._update_retry_fields(next_retry_at) + + except jingrow.TimestampMismatchError: + self.reload() + self._update_retry_fields(next_retry_at) + + except Exception: + log_error("Agent Job Set Next Retry Timing", job=self) + + def _update_retry_fields(self, next_retry_at): + if not self.retry_count: + self.retry_count = 1 + + self.status = "Undelivered" + self.next_retry_at = next_retry_at + + self.save() + jingrow.db.commit() + + def create_agent_job_steps(self): + job_type = jingrow.get_pg("Agent Job Type", self.job_type) + for step in job_type.steps: + pg = jingrow.get_pg( + { + "pagetype": "Agent Job Step", + "agent_job": self.name, + "status": "Pending", + "step_name": step.step_name, + "duration": "00:00:00", + } + ) + pg.insert() + + @jingrow.whitelist() + def retry(self): + return jingrow.get_pg( + { + "pagetype": "Agent Job", + "status": "Undelivered", + "job_type": self.job_type, + "server_type": self.server_type, + "server": self.server, + "bench": self.bench, + "site": self.site, + "upstream": self.upstream, + "host": self.host, + "request_path": self.request_path, + "request_data": self.request_data, + "request_files": self.request_files, + "request_method": self.request_method, + } + ).insert() + + @jingrow.whitelist() + def retry_in_place(self): + self.enqueue_http_request() + jingrow.db.commit() + + @jingrow.whitelist() + def get_status(self): + agent = Agent(self.server, server_type=self.server_type) + + if not self.job_id: + job = agent.get_jobs_id(self.name) + if job and len(job) > 0: + self.db_set("job_id", job[0]["id"]) + if self.job_id: + polled_job = agent.get_job_status(self.job_id) + update_job(self.name, polled_job) + update_steps(self.name, polled_job) + + @jingrow.whitelist() + def retry_skip_failing_patches(self): + # Add the skip flag and update request data + updated_request_data = json.loads(self.request_data) if self.request_data else {} + updated_request_data["skip_failing_patches"] = True + self.request_data = json.dumps(updated_request_data, indent=4, sort_keys=True) + + return self.retry() + + @jingrow.whitelist() + def succeed_and_process_job_updates(self): + self.status = "Success" + self.save() + self.process_job_updates() + + @jingrow.whitelist() + def fail_and_process_job_updates(self): + self.status = "Failure" + self.save() + self.process_job_updates() + + @jingrow.whitelist() + def process_job_updates(self): + process_job_updates(self.name) + + @jingrow.whitelist() + def cancel_job(self): + agent = Agent(self.server, server_type=self.server_type) + agent.cancel_job(self.job_id) + + def on_trash(self): + steps = jingrow.get_all("Agent Job Step", filters={"agent_job": self.name}) + for step in steps: + jingrow.delete_pg("Agent Job Step", step.name) + + jingrow.db.delete( + "Jcloud Notification", + {"document_type": self.pagetype, "document_name": self.name}, + ) + + def get_step_status(self, step_name: str): + if statuses := jingrow.get_all( + "Agent Job Step", + fields=["status"], + filters={"agent_job": self.name, "step_name": step_name}, + pluck="status", + limit=1, + ): + return statuses[0] + + return None + + @property + def failed_because_of_agent_update(self) -> bool: + if "BrokenPipeError" in str(self.traceback) and jingrow.db.exists( + "Ansible Play", + { + "play": "Update Agent", + "server": self.server, + "creation": (">", jingrow.utils.add_to_date(None, minutes=-15)), + }, + ): + return True + return False + + @property + def on_public_server(self): + return bool(jingrow.db.get_value(self.server_type, self.server, "public")) + + +def job_detail(job): + job = jingrow.get_pg("Agent Job", job) + steps = [] + current = {} + for index, job_step in enumerate( + jingrow.get_all( + "Agent Job Step", + filters={"agent_job": job.name}, + fields=[ + "name", + "step_name", + "status", + "start", + "end", + "duration", + "output", + ], + order_by="creation", + ) + ): + step = {"name": job_step.step_name, "index": index, **job_step} + if job_step.status == "Running": + step["output"] = jingrow.cache.hget("agent_job_step_output", job_step.name) + current = step + steps.append(step) + + if job.status == "Pending": + current = {"name": job.job_type, "status": "Waiting", "index": -1} + elif job.status in ("Success", "Failure"): + current = {"name": job.job_type, "status": job.status, "index": len(steps)} + + current["total"] = len(steps) + + return { + "id": job.name, + "name": job.job_type, + "server": job.server, + "bench": job.bench, + "site": job.site, + "status": job.status, + "steps": steps, + "current": current, + } + + +def publish_update(job): + message = job_detail(job) + jingrow.publish_realtime(event="agent_job_update", pagetype="Agent Job", docname=job, message=message) + + # publish event for agent job list to update in dashboard + # we are doing this since process agent job doesn't emit list_update for job due to set_value + jingrow.publish_realtime(event="list_update", message={"pagetype": "Agent Job", "name": job}) + + # publish event for site to show job running on dashboard and update site + # we are doing this since process agent job doesn't emit pg_update for site due to set_value + if message["site"]: + jingrow.publish_realtime( + event="pg_update", + pagetype="Site", + docname=message["site"], + message={ + "pagetype": "Site", + "name": message["site"], + "status": message["status"], + "id": message["id"], + "site": message["site"], + }, + ) + + +def suspend_sites(): + """Suspend sites if they have exceeded database or disk limits""" + + if not jingrow.db.get_single_value("Jcloud Settings", "enforce_storage_limits"): + return + + free_teams = jingrow.get_all("Team", filters={"free_account": True, "enabled": True}, pluck="name") + active_sites = jingrow.get_all( + "Site", + filters={"status": "Active", "free": False, "team": ("not in", free_teams)}, + fields=["name", "team", "current_database_usage", "current_disk_usage"], + ) + + issue_reload = False + for site in active_sites: + if site.current_database_usage > 100 or site.current_disk_usage > 100: + jingrow.get_pg("Site", site.name).suspend( + reason="Site Usage Exceeds Plan limits", skip_reload=True + ) + issue_reload = True + + if issue_reload: + proxies = jingrow.get_all("Proxy Server", {"status": "Active"}, pluck="name") + for proxy_name in proxies: + agent = Agent(proxy_name, server_type="Proxy Server") + agent.reload_nginx() + + +@timer +def poll_random_jobs(agent, pending_ids): + random_pending_ids = random.sample(pending_ids, k=min(100, len(pending_ids))) + return agent.get_jobs_status(random_pending_ids) + + +@timer +def handle_polled_jobs(polled_jobs, pending_jobs): + for polled_job in polled_jobs: + if not polled_job: + continue + handle_polled_job(pending_jobs, polled_job) + + +def add_timer_data_to_monitor(server): + if not hasattr(jingrow.local, "timers"): + jingrow.local.timers = {} + + add_data_to_monitor(server=server, timing=jingrow.local.timers) + + +def poll_pending_jobs_server(server): + if jingrow.db.get_value(server.server_type, server.server, "status") != "Active": + return + + agent = Agent(server.server, server_type=server.server_type) + if agent.should_skip_requests(): + return + + pending_jobs = jingrow.get_all( + "Agent Job", + fields=["name", "job_id", "status", "callback_failure_count"], + filters={ + "status": ("in", ["Pending", "Running"]), + "job_id": ("!=", 0), + "server": server.server, + }, + order_by="job_id", + ignore_ifnull=True, + ) + + if not pending_jobs: + retry_undelivered_jobs(server) + add_timer_data_to_monitor(server.server) + return + + pending_ids = [j.job_id for j in pending_jobs] + polled_jobs = poll_random_jobs(agent, pending_ids) + + if not polled_jobs: + retry_undelivered_jobs(server) + add_timer_data_to_monitor(server.server) + return + + handle_polled_jobs(polled_jobs, pending_jobs) + + retry_undelivered_jobs(server) + add_timer_data_to_monitor(server.server) + + +def handle_polled_job(pending_jobs, polled_job): + job = find(pending_jobs, lambda x: x.job_id == polled_job["id"]) + try: + # Update Job Status + # If it is worthy of an update + if job.status != polled_job["status"]: + lock_pg_updated_by_job(job.name) + update_job(job.name, polled_job) + + # Update Steps' Status + update_steps(job.name, polled_job) + populate_output_cache(polled_job, job) + + # Some callbacks rely on step statuses, e.g. archive_site + # so update step status before callbacks are processed + if polled_job["status"] in ("Success", "Failure", "Undelivered"): + skip_pending_steps(job.name) + process_job_updates(job.name, polled_job) + + jingrow.db.commit() + publish_update(job.name) + except AgentCallbackException: + # Don't log error for AgentCallbackException + # it's already logged + # Rollback all other changes and increment the failure count + jingrow.db.rollback() + jingrow.db.set_value( + "Agent Job", + job.name, + "callback_failure_count", + job.callback_failure_count + 1, + ) + jingrow.db.commit() + except Exception: + log_error( + "Agent Job Poll Exception", + job=job, + polled=polled_job, + reference_pagetype="Agent Job", + reference_name=job.name, + ) + jingrow.db.rollback() + + +def populate_output_cache(polled_job, job): + if not cint(jingrow.get_cached_value("Jcloud Settings", None, "realtime_job_updates")): + return + steps = jingrow.get_all( + "Agent Job Step", + filters={"agent_job": job.name, "status": "Running"}, + fields=["name", "step_name"], + ) + for step in steps: + polled_step = find(polled_job["steps"], lambda x: x["name"] == step.step_name) + if polled_step: + lines = [] + for command in polled_step.get("commands", []): + output = command.get("output", "").strip() + if output: + lines.append(output) + jingrow.cache.hset("agent_job_step_output", step.name, "\n".join(lines)) + + +def filter_active_servers(servers): + # Prepare list of all_active_servers for each server_type + # Return servers that are in all_active_servers + server_types = [server.server_type for server in servers] + all_active_servers = {} + for server_type in server_types: + all_active_servers[server_type] = set(jingrow.get_all(server_type, {"status": "Active"}, pluck="name")) + + active_servers = [] + for server in servers: + if server.server in all_active_servers[server.server_type]: + active_servers.append(server) + + return active_servers + + +def filter_request_failures(servers): + request_failures = set(jingrow.get_all("Agent Request Failure", pluck="server")) + + alive_servers = [] + for server in servers: + if server.server not in request_failures: + alive_servers.append(server) + + return alive_servers + + +def poll_pending_jobs(): + filters = {"status": ("in", ["Pending", "Running", "Undelivered"])} + if random.random() > 0.1: + # Experimenting with fewer polls (only for backup jobs) + # Reduce poll frequency for Backup Site jobs + # TODO: Replace this with something deterministic + filters["job_type"] = ("!=", "Backup Site") + servers = jingrow.get_all( + "Agent Job", + fields=["server", "server_type"], + filters=filters, + group_by="server", + order_by="", + ignore_ifnull=True, + ) + + active_servers = filter_active_servers(servers) + alive_servers = filter_request_failures(active_servers) + + for server in alive_servers: + jingrow.enqueue( + "jcloud.jcloud.pagetype.agent_job.agent_job.poll_pending_jobs_server", + queue="short", + server=server, + job_id=f"poll_pending_jobs:{server.server}", + deduplicate=True, + ) + + +def fail_old_jobs(): + def update_status(jobs: list[str], status: str): + for job in jobs: + update_job_and_step_status(job, status) + process_job_updates(job) + jingrow.db.commit() + + failed_jobs = jingrow.db.get_values( + "Agent Job", + { + "status": ("in", ["Pending", "Running"]), + "job_id": ("!=", 0), + "creation": ("<", add_days(None, -2)), + }, + "name", + limit=100, + pluck=True, + ) + update_status(failed_jobs, "Failure") + + delivery_failed_jobs = jingrow.db.get_values( + "Agent Job", + { + "job_id": 0, + "creation": ("<", add_days(None, -2)), + "status": ("!=", "Delivery Failure"), + }, + "name", + limit=100, + pluck=True, + ) + + update_status(delivery_failed_jobs, "Delivery Failure") + + +def get_pair_jobs() -> tuple[str]: + """Return list of jobs who's callback depend on another""" + return ( + "New Site", + "New Site from Backup", + "Add Site to Upstream", + "Archive Site", + "Remove Site from Upstream", + "Rename Site", + "Rename Site on Upstream", + "Add User to ProxySQL", + "Remove User from ProxySQL", + ) + + +def lock_pg_updated_by_job(job_name): + """ + Ensure serializability of callback of jobs associated with the same document + + All select queries in this transaction should have for_update True for this to work correctly + """ + field_values = jingrow.db.get_values( + "Agent Job", + job_name, + ["site", "bench", "server", "server_type", "job_type"], + as_dict=True, + )[0] # relies on order of values to be site, bench.. + + if field_values["job_type"] not in get_pair_jobs(): + return None + + for field, value in field_values.items(): + pagetype = field.capitalize() + if field == "server": + pagetype = field_values["server_type"] + elif field in ( + "server_type", + "job_type", + ): # ideally will never happen, but for sanity + return None + if value: + jingrow.db.get_value(pagetype, value, "modified", for_update=True) + return value + + return None + + +def update_job(job_name, job): + job_data = json.dumps(job["data"], indent=4, sort_keys=True) + jingrow.db.set_value( + "Agent Job", + job_name, + { + "start": job["start"], + "end": job["end"], + "duration": job["duration"], + "status": job["status"], + "data": job_data, + "output": job["data"].get("output"), + "traceback": job["data"].get("traceback"), + }, + ) + + +def update_steps(job_name, job): + step_names = [polled_step["name"] for polled_step in job["steps"]] + steps = jingrow.db.get_all( + "Agent Job Step", + fields=["name", "status", "step_name"], + filters={ + "agent_job": job_name, + "status": ("in", ["Pending", "Running"]), + "step_name": ("in", step_names), + }, + ) + for polled_step in job["steps"]: + step = find(steps, lambda x: x.step_name == polled_step["name"]) + if not step: + continue + + if step.status == polled_step["status"]: + continue + + lock_pg_updated_by_job(job_name) + update_step(step.name, polled_step) + + +def update_step(step_name, step): + step_data = json.dumps(step["data"], indent=4, sort_keys=True) + + output = None + traceback = None + if isinstance(step["data"], dict): + traceback = to_str(step["data"].get("traceback", "")) + output = to_str(step["data"].get("output", "")) + + jingrow.db.set_value( + "Agent Job Step", + step_name, + { + "start": step["start"], + "end": step["end"], + "duration": step["duration"], + "status": step["status"], + "data": step_data, + "output": output, + "traceback": traceback, + }, + ) + + +def skip_pending_steps(job_name): + jingrow.db.sql( + """UPDATE `tabAgent Job Step` SET status = 'Skipped' + WHERE status = 'Pending' AND agent_job = %s""", + job_name, + ) + + +def get_next_retry_at(job_retry_count): + from jingrow.utils import add_to_date, now_datetime + + backoff_in_seconds = 5 + retry_in_seconds = job_retry_count**backoff_in_seconds + + return add_to_date(now_datetime(), seconds=retry_in_seconds) + + +@timer +def retry_undelivered_jobs(server): + """Retry undelivered jobs and update job status if max retry count is reached""" + + if is_auto_retry_disabled(server): + return + + job_types, max_retry_per_job_type = get_retryable_job_types_and_max_retry_count() + server_jobs = get_undelivered_jobs_for_server(server, job_types) + nowtime = now_datetime() + + for server in server_jobs: + delivered_jobs = get_jobs_delivered_to_server(server, server_jobs[server]) + + if delivered_jobs: + update_job_ids_for_delivered_jobs(delivered_jobs) + + undelivered_jobs = list(set(server_jobs[server]) - set(delivered_jobs)) + + for job in undelivered_jobs: + job_pg = jingrow.get_pg("Agent Job", job) + max_retry_count = max_retry_per_job_type[job_pg.job_type] or 0 + + if not job_pg.next_retry_at and job_pg.name not in queued_jobs(): + job_pg.set_status_and_next_retry_at() + continue + + if get_datetime(job_pg.next_retry_at) > nowtime: + continue + + if job_pg.retry_count <= max_retry_count: + retry = job_pg.retry_count + 1 + jingrow.db.set_value("Agent Job", job, "retry_count", retry, update_modified=False) + job_pg.retry_in_place() + else: + update_job_and_step_status(job, "Delivery Failure") + process_job_updates(job) + + +def queued_jobs(): + from jingrow.utils.background_jobs import get_jobs + + return get_jobs(site=jingrow.local.site, queue="default", key="name")[jingrow.local.site] + + +def is_auto_retry_disabled(server): + """Check if auto retry is disabled for the server""" + _auto_retry_disabled = False + + # Global Config + _auto_retry_disabled = jingrow.db.get_single_value("Jcloud Settings", "disable_auto_retry", cache=True) + if _auto_retry_disabled: + return True + + # Server Config + try: + _auto_retry_disabled = jingrow.db.get_value( + server.server_type, + server.server, + "disable_agent_job_auto_retry", + cache=True, + ) + except Exception: + _auto_retry_disabled = False + + return _auto_retry_disabled + + +def update_job_and_step_status(job: str, status: str): + agent_job = jingrow.qb.PageType("Agent Job") + jingrow.qb.update(agent_job).set(agent_job.status, status).where(agent_job.name == job).run() + + agent_job_step = jingrow.qb.PageType("Agent Job Step") + jingrow.qb.update(agent_job_step).set(agent_job_step.status, status).where( + agent_job_step.agent_job == job + ).run() + + +def get_undelivered_jobs_for_server(server, job_types): + jobs = jingrow._dict() + + if not job_types: + return jobs + + for job in jingrow.get_all( + "Agent Job", + { + "status": "Undelivered", + "job_id": 0, + "server": server.server, + "server_type": server.server_type, + "retry_count": (">", 0), + "job_type": ("in", job_types), + }, + ["name", "job_type"], + ignore_ifnull=True, # job type is mandatory and next_retry_at has to be set for retry + ): + jobs.setdefault((server.server, server.server_type), []).append(job["name"]) + + return jobs + + +def get_server_wise_undelivered_jobs(job_types): + jobs = jingrow._dict() + + if not job_types: + return jobs + + for job in jingrow.get_all( + "Agent Job", + { + "status": "Undelivered", + "job_id": 0, + "retry_count": [">=", 1], + "next_retry_at": ("<=", jingrow.utils.now_datetime()), + "job_type": ("in", job_types), + }, + ["name", "server", "server_type"], + ignore_ifnull=True, # job type is mandatory and next_retry_at has to be set for retry + ): + jobs.setdefault((job.server, job.server_type), []).append(job["name"]) + + return jobs + + +def get_jobs_delivered_to_server(server, jobs): + agent = Agent(server[0], server_type=server[1]) + + random_undelivered_ids = random.sample(jobs, k=min(100, len(jobs))) + delivered_jobs = agent.get_jobs_id(random_undelivered_ids) + + return delivered_jobs or [] + + +def update_job_ids_for_delivered_jobs(delivered_jobs): + for job in delivered_jobs: + jingrow.db.set_value( + "Agent Job", + job["agent_job_id"], + { + "job_id": job["id"], + "status": "Pending", + "next_retry_at": None, + "retry_count": 0, + }, + update_modified=False, + ) + + +def process_job_updates(job_name: str, response_data: dict | None = None): # noqa: C901 + job: "AgentJob" = jingrow.get_pg("Agent Job", job_name) + start = now_datetime() + + try: + from jcloud.jcloud.pagetype.agent_job.agent_job_notifications import ( + send_job_failure_notification, + ) + from jcloud.jcloud.pagetype.app_patch.app_patch import AppPatch + from jcloud.jcloud.pagetype.bench.bench import ( + Bench, + process_add_ssh_user_job_update, + process_archive_bench_job_update, + process_new_bench_job_update, + process_remove_ssh_user_job_update, + ) + from jcloud.jcloud.pagetype.code_server.code_server import ( + process_archive_code_server_job_update, + process_new_code_server_job_update, + process_start_code_server_job_update, + process_stop_code_server_job_update, + ) + from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate + from jcloud.jcloud.pagetype.physical_backup_restoration.physical_backup_restoration import ( + process_job_update as process_physical_backup_restoration_job_update, + ) + from jcloud.jcloud.pagetype.proxy_server.proxy_server import ( + process_update_nginx_job_update, + ) + from jcloud.jcloud.pagetype.server.server import process_new_server_job_update + from jcloud.jcloud.pagetype.site.jerp_site import ( + process_setup_jerp_site_job_update, + ) + from jcloud.jcloud.pagetype.site.site import ( + process_add_domain_job_update, + process_archive_site_job_update, + process_complete_setup_wizard_job_update, + process_create_user_job_update, + process_fetch_database_table_schema_job_update, + process_install_app_site_job_update, + process_migrate_site_job_update, + process_move_site_to_bench_job_update, + process_new_site_job_update, + process_reinstall_site_job_update, + process_rename_site_job_update, + process_restore_job_update, + process_restore_tables_job_update, + process_uninstall_app_site_job_update, + ) + from jcloud.jcloud.pagetype.site_backup.site_backup import process_backup_site_job_update + from jcloud.jcloud.pagetype.site_domain.site_domain import ( + process_add_domain_to_upstream_job_update, + process_new_host_job_update, + ) + from jcloud.jcloud.pagetype.site_update.site_update import ( + process_activate_site_job_update, + process_deactivate_site_job_update, + process_update_site_job_update, + process_update_site_recover_job_update, + ) + + site_migration = get_ongoing_migration(job.site) + if site_migration and job_matches_site_migration(job, site_migration): + process_site_migration_job_update(job, site_migration) + elif job.job_type == "Add Upstream to Proxy": + process_new_server_job_update(job) + elif job.job_type == "New Bench": + process_new_bench_job_update(job) + elif job.job_type == "Archive Bench": + process_archive_bench_job_update(job) + elif job.job_type == "New Site": + process_new_site_job_update(job) + elif job.job_type == "New Site from Backup": + process_new_site_job_update(job) + process_restore_job_update(job, force=True) + elif job.job_type == "Restore Site": + process_restore_job_update(job) + elif job.job_type == "Reinstall Site": + process_reinstall_site_job_update(job) + elif job.job_type == "Migrate Site": + process_migrate_site_job_update(job) + elif job.job_type == "Install App on Site": + process_install_app_site_job_update(job) + elif job.job_type == "Uninstall App from Site": + process_uninstall_app_site_job_update(job) + elif job.job_type == "Add Site to Upstream": + process_new_site_job_update(job) + elif job.job_type == "Add Code Server to Upstream" or job.job_type == "Setup Code Server": + process_new_code_server_job_update(job) + elif job.job_type == "Start Code Server": + process_start_code_server_job_update(job) + elif job.job_type == "Stop Code Server": + process_stop_code_server_job_update(job) + elif job.job_type == "Archive Code Server" or job.job_type == "Remove Code Server from Upstream": + process_archive_code_server_job_update(job) + elif job.job_type in ["Backup Site", "Physical Backup Database"]: + process_backup_site_job_update(job) + elif job.job_type == "Archive Site" or job.job_type == "Remove Site from Upstream": + process_archive_site_job_update(job) + elif job.job_type == "Add Host to Proxy": + process_new_host_job_update(job) + elif job.job_type == "Add Domain to Upstream": + process_add_domain_to_upstream_job_update(job) + elif job.job_type == "Update Site Migrate" or job.job_type == "Update Site Pull": + process_update_site_job_update(job) + elif ( + job.job_type == "Recover Failed Site Migrate" + or job.job_type == "Recover Failed Site Pull" + or job.job_type == "Recover Failed Site Update" + ): + process_update_site_recover_job_update(job) + elif job.job_type == "Rename Site" or job.job_type == "Rename Site on Upstream": + process_rename_site_job_update(job) + elif job.job_type == "Setup JERP": + process_setup_jerp_site_job_update(job) + elif job.job_type == "Restore Site Tables": + process_restore_tables_job_update(job) + elif job.job_type == "Add User to Proxy": + process_add_ssh_user_job_update(job) + elif job.job_type == "Remove User from Proxy": + process_remove_ssh_user_job_update(job) + elif job.job_type == "Add User to ProxySQL" or job.job_type == "Remove User from ProxySQL": + if job.reference_pagetype == "Site Database User": + SiteDatabaseUser.process_job_update(job) + elif job.job_type == "Reload NGINX": + process_update_nginx_job_update(job) + elif job.job_type == "Move Site to Bench": + process_move_site_to_bench_job_update(job) + elif job.job_type == "Patch App": + AppPatch.process_patch_app(job) + elif job.job_type == "Run Remote Builder": + DeployCandidate.process_run_build(job, response_data) + elif job.job_type == "Create User": + process_create_user_job_update(job) + elif job.job_type == "Complete Setup Wizard": + process_complete_setup_wizard_job_update(job) + elif job.job_type == "Update Bench In Place": + Bench.process_update_inplace(job) + elif job.job_type == "Recover Update In Place": + Bench.process_recover_update_inplace(job) + elif job.job_type == "Fetch Database Table Schema": + process_fetch_database_table_schema_job_update(job) + elif job.job_type in [ + "Create Database User", + "Remove Database User", + "Modify Database User Permissions", + ]: + SiteDatabaseUser.process_job_update(job) + elif job.job_type == "Physical Restore Database": + process_physical_backup_restoration_job_update(job) + elif job.job_type == "Deactivate Site" and job.reference_pagetype == "Site Update": + process_deactivate_site_job_update(job) + elif job.job_type == "Activate Site" and job.reference_pagetype == "Site Update": + process_activate_site_job_update(job) + elif job.job_type == "Add Domain": + process_add_domain_job_update(job) + + # send failure notification if job failed + if job.status == "Failure": + send_job_failure_notification(job) + + log_update(job, start) + except Exception as e: + failure_count = job.callback_failure_count + 1 + if failure_count in set([10, 100]) or failure_count % 1000 == 0: + log_error( + "Agent Job Callback Exception", + job=job.as_dict(), + reference_pagetype="Agent Job", + reference_name=job_name, + ) + log_update(job, start, e) + raise AgentCallbackException from e + + +def log_update(job, start, exception=None): + try: + data = { + "timestamp": start, + "duration": (now_datetime() - start).total_seconds(), + "name": job.name, + "job_type": job.job_type, + "status": job.status, + "server": job.server, + "site": job.site, + "bench": job.bench, + } + if exception: + data["exception"] = exception + serialized = json.dumps(data, sort_keys=True, default=str, separators=(",", ":")) + jingrow.cache().rpush(AGENT_LOG_KEY, serialized) + except Exception: + traceback.print_exc() + + +def update_job_step_status(): + from jingrow.query_builder.custom import GROUP_CONCAT + + agent_job = jingrow.qb.PageType("Agent Job") + agent_job_step = jingrow.qb.PageType("Agent Job Step") + + steps_to_update = ( + jingrow.qb.from_(agent_job) + .join(agent_job_step) + .on(agent_job.name == agent_job_step.agent_job) + .select( + agent_job.name.as_("agent_job"), + agent_job.status.as_("job_status"), + GROUP_CONCAT(agent_job_step.name, alias="step_names"), + ) + .where( + (agent_job.status.isin(["Failure", "Delivery Failure"])) & (agent_job_step.status == "Pending") + ) + .groupby(agent_job.name) + .limit(100) + ).run(as_dict=True) + + for step in steps_to_update: + ( + jingrow.qb.update(agent_job_step) + .where( + (agent_job_step.agent_job == step.agent_job) + & (agent_job_step.name.isin(step.step_names.split(","))) + & (agent_job_step.status.isin(["Pending", "Running"])) + ) + .set(agent_job_step.status, step.job_status) + ).run() + + +def on_pagetype_update(): + jingrow.db.add_index("Agent Job", ["status", "server"]) + jingrow.db.add_index("Agent Job", ["reference_pagetype", "reference_name"]) + # We don't need modified index, it's harmful on constantly updating tables + jingrow.db.sql_ddl("drop index if exists modified on `tabAgent Job`") + jingrow.db.add_index("Agent Job", ["creation"]) + + +def to_str(data) -> str: + if isinstance(data, str): + return data + + try: + return json.dumps(data, default=str) + except Exception: + pass + + try: + return str(data) + except Exception: + return "" + + +def flush(): + log_file = os.path.join(jingrow.utils.get_bench_path(), "logs", f"{AGENT_LOG_KEY}.json.log") + try: + # Fetch all entries without removing from cache + logs = jingrow.cache().lrange(AGENT_LOG_KEY, 0, -1) + print("LOGS", logs) + if logs: + logs = list(map(jingrow.safe_decode, logs)) + with open(log_file, "a", os.O_NONBLOCK) as f: + f.write("\n".join(logs)) + f.write("\n") + # Remove fetched entries from cache + jingrow.cache().ltrim(AGENT_LOG_KEY, len(logs) - 1, -1) + except Exception: + traceback.print_exc() + + +def update_query_result_status_timestamps(results): + for result in results: + if result.status == "Undelivered": + result.status = "Pending" + elif result.status == "Delivery Failure": + result.status = "Failure" + + # agent job start and end are in utc + if result.start: + result.start = convert_utc_to_system_timezone(result.start).replace(tzinfo=None) + + if result.end: + result.end = convert_utc_to_system_timezone(result.end).replace(tzinfo=None) diff --git a/jcloud/jcloud/pagetype/agent_job/agent_job_dashboard.py b/jcloud/jcloud/pagetype/agent_job/agent_job_dashboard.py new file mode 100644 index 0000000..86b93b3 --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job/agent_job_dashboard.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow import _ + + +def get_data(): + return { + "fieldname": "agent_job", + "transactions": [{"label": _("Related Documents"), "items": ["Agent Job Step"]}], + } diff --git a/jcloud/jcloud/pagetype/agent_job/agent_job_notifications.py b/jcloud/jcloud/pagetype/agent_job/agent_job_notifications.py new file mode 100644 index 0000000..f524d82 --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job/agent_job_notifications.py @@ -0,0 +1,351 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import typing +from enum import Enum, auto +from typing import Protocol, TypedDict + +import jingrow + +""" +Used to create notifications if the Agent Job error is something that can +be handled by the user. + +Based on http://git.jingrow.com:3000/jingrow/jcloud/pull/1544 + +To handle an error: +1. Create a pg page that helps the user get out of it under: jingrow.com/docs/common-issues +2. Check if the error is the known/expected one in `get_details`. +3. Update the details object with the correct values. +""" + + +class Details(TypedDict): + title: str | None + message: str + traceback: str | None + is_actionable: bool + assistance_url: str | None + + +# These strings are checked against the traceback or output of the job +MatchStrings = str | list[str] + +if typing.TYPE_CHECKING: + # TYPE_CHECKING guard for code below cause DeployCandidate + # might cause circular import. + + from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob + + class UserAddressableHandler(Protocol): + def __call__( + self, + details: Details, + job: AgentJob, + ) -> bool: # Return True if is_actionable + ... + + UserAddressableHandlerTuple = tuple[ + MatchStrings, + UserAddressableHandler, + ] + + +class JobErr(Enum): + OOM = auto() + ROW_SIZE_TOO_LARGE = auto() + DATA_TRUNCATED_FOR_COLUMN = auto() + BROKEN_PIPE_ERR = auto() + CANT_CONNECT_TO_MYSQL = auto() + GZIP_TAR_ERR = auto() + UNKNOWN_COMMAND_HYPHEN = auto() + + +DOC_URLS = { + JobErr.OOM: "https://jingrow.com/docs/common-issues/oom-issues", + JobErr.ROW_SIZE_TOO_LARGE: "https://jingrow.com/docs/faq/site#row-size-too-large-error-on-migrate", + JobErr.DATA_TRUNCATED_FOR_COLUMN: "https://jingrow.com/docs/faq/site#data-truncated-for-column", + JobErr.BROKEN_PIPE_ERR: None, + JobErr.CANT_CONNECT_TO_MYSQL: "https://jingrow.com/docs/cant-connect-to-mysql-server", + JobErr.GZIP_TAR_ERR: "https://jingrow.com/docs/sites/migrate-an-existing-site#tar-gzip-command-fails-with-unexpected-eof", + JobErr.UNKNOWN_COMMAND_HYPHEN: "https://jingrow.com/docs/unknown-command-", +} + + +def handlers() -> list[UserAddressableHandlerTuple]: + """ + Before adding anything here, view the type: + `UserAddressableHandlerTuple` + + The first value of the tuple is `MatchStrings` which + a list of strings (or a single string) which if they + are present in the `traceback` or the `output` + then then second value i.e. `UserAddressableHandler` + is called. + + `UserAddressableHandler` is used to update the details + used to create the Jcloud Notification + + `UserAddressableHandler` can return False if it isn't + user addressable, in this case the remaining handler + tuple will be checked. + + Due to this order of the tuples matter. + """ + return [ + ("returned non-zero exit status 137", update_with_oom_err), + ("returned non-zero exit status 143", update_with_oom_err), + ("Row size too large", update_with_row_size_too_large_err), + ("Data truncated for column", update_with_data_truncated_for_column_err), + ("BrokenPipeError", update_with_broken_pipe_err), + ("ERROR 2002 (HY000)", update_with_cant_connect_to_mysql_err), + ("gzip: stdin: unexpected end of file", update_with_gzip_tar_err), + ("tar: Unexpected EOF in archive", update_with_gzip_tar_err), + ("Unknown command '\\-'.", update_with_unknown_command_hyphen_err), + ] + + +def create_job_failed_notification( + job: AgentJob, + team: str, + notification_type: str = "Agent Job Failure", + title: str = "", + message: str = "", +) -> bool: + """ + Used to create jcloud notifications on Job failures. If the notification + is actionable then it will be displayed on the dashboard. + + Returns True if job failure is_actionable + """ + + details = get_details(job, title, message) + pg_dict = { + "pagetype": "Jcloud Notification", + "team": team, + "type": notification_type, + "document_type": job.pagetype, + "document_name": job.name, + "class": "Error", + **details, + } + pg = jingrow.get_pg(pg_dict) + pg.insert() + jingrow.db.commit() + + jingrow.publish_realtime("jcloud_notification", pagetype="Jcloud Notification", message={"team": team}) + + return details["is_actionable"] + + +def get_details(job: AgentJob, title: str, message: str) -> Details: + tb = job.traceback or "" + output = job.output or "" + title = title or get_default_title(job) + message = message or get_default_message(job) + + details = Details( + title=title, + message=message, + traceback=tb, + is_actionable=False, + assistance_url=None, + ) + + for strs, handler in handlers(): + if isinstance(strs, str): + strs = [strs] + + if not (is_match := all(s in tb for s in strs)): + is_match = all(s in output for s in strs) + + if not is_match: + continue + + if handler(details, job): + details["is_actionable"] = True + break + details["title"] = title + details["message"] = message + details["traceback"] = tb + details["is_actionable"] = False + details["assistance_url"] = None + + return details + + +def update_with_oom_err( + details: Details, + job: AgentJob, +): + details["title"] = "Server out of memory error" + + details[ + "message" + ] = f"""

The server ran out of memory while {job.job_type} job was running and was killed by the system.

+

It is recommended to increase the memory available for the server {job.server}.

+

To rectify this issue, please follow the steps mentioned in Help.

+ """ + + details["assistance_url"] = DOC_URLS[JobErr.OOM] + + # user addressable if the server is a dedicated server + if not jingrow.db.get_value(job.server_type, job.server, "public"): + return True + return False + + +def update_with_row_size_too_large_err(details: Details, job: AgentJob): + details["title"] = "Row size too large error" + + details[ + "message" + ] = f"""

The server encountered a row size too large error while migrating the site {job.site}.

+

This tends to happen on doctypes with many custom fields

+

To rectify this issue, please follow the steps mentioned in Help.

+ """ + + details["assistance_url"] = DOC_URLS[JobErr.ROW_SIZE_TOO_LARGE] + + return True + + +def update_with_data_truncated_for_column_err(details: Details, job: AgentJob): + details["title"] = "Data truncated for column error" + + details[ + "message" + ] = f"""

The server encountered a data truncated for column error while migrating the site {job.site}.

+

This tends to happen when the datatype of a field changes, but there is existing data in the pagetype that don't fit to the new datatype

+

To rectify this issue, please follow the steps mentioned in Help.

+ """ + + details["assistance_url"] = DOC_URLS[JobErr.DATA_TRUNCATED_FOR_COLUMN] + + return True + + +def update_with_broken_pipe_err(details: Details, job: AgentJob): + if not job.failed_because_of_agent_update: + return False + + details["title"] = "Job failed due to maintenance activity on the server" + + details[ + "message" + ] = f"""

The ongoing job coincided with a maintenance activity on the server {job.server} and hence failed.

+

Please try again in a few minutes.

+ """ + + return True + + +def update_with_cant_connect_to_mysql_err(details: Details, job: AgentJob): + details["title"] = "Can't connect to MySQL server" + + suggestion = "To rectify this issue, please follow the steps mentioned in Help." + if job.on_public_server: + suggestion = "Please raise a support ticket if the issue persists." + + details[ + "message" + ] = f"""

The server couldn't connect to MySQL server during the job. This likely happened as the mysql server restarted as it didn't have sufficient memory for the operation

+

{suggestion}

+ """ + + details["assistance_url"] = DOC_URLS[JobErr.CANT_CONNECT_TO_MYSQL] + + return True + + +def update_with_gzip_tar_err(details: Details, job: AgentJob): + details["title"] = "Corrupt backup file" + + details["message"] = f"""

An error occurred when extracting the backup to {job.site}.

+

To rectify this issue, please follow the steps mentioned in Help.

+ """ + + details["assistance_url"] = DOC_URLS[JobErr.GZIP_TAR_ERR] + + return True + + +def update_with_unknown_command_hyphen_err(details: Details, job: AgentJob): + details["title"] = "Incompatible site backup" + + details["message"] = f"""

An error occurred when extracting the backup to {job.site}.

+

This happens when the backup is taken from a later version of MariaDB and restored on a older version.

+

To rectify this issue, please follow the steps mentioned in Help.

+ """ + + details["assistance_url"] = DOC_URLS[JobErr.UNKNOWN_COMMAND_HYPHEN] + + return True + + +def get_default_title(job: AgentJob) -> str: + if job.job_type == "Update Site Migrate": + return "Site Migrate" + if job.job_type == "Update Site Pull": + return "Site Update" + if job.job_type.startswith("Recover Failed"): + return "Site Recovery" + return "Job Failure" + + +def get_default_message(job: AgentJob) -> str: + if job.job_type == "Update Site Migrate": + return f"Site {job.site} failed to migrate" + if job.job_type == "Update Site Pull": + return f"Site {job.site} failed to update" + if job.job_type.startswith("Recover Failed"): + return f"Site {job.site} failed to recover after a failed update/migration" + if job.site: + return f"{job.job_type} job failed on site {job.site}." + return f"{job.job_type} job failed on server {job.server}." + + +def send_job_failure_notification(job: AgentJob): + from jcloud.jcloud.pagetype.site_migration.site_migration import ( + get_ongoing_migration, + job_matches_site_migration, + ) + + # site migration has its own notification handling + site_migration = get_ongoing_migration(job.site) if job.site else None + if site_migration and job_matches_site_migration(job, site_migration): + return + + notification_type = get_notification_type(job) + team = None + + if job.reference_pagetype == "Site Database User": + return + + if job.site: + team = jingrow.get_value("Site", job.site, "team") + else: + if job.server_type not in ["Server", "Database Server"]: + return + + server = jingrow.db.get_value(job.server_type, job.server, ["team", "public"], as_dict=True) + if server["public"]: + return + + team = server["team"] + + if not team: + return + + create_job_failed_notification(job, team, notification_type) + + +def get_notification_type(job: AgentJob) -> str: + if job.job_type == "Update Site Migrate": + return "Site Migrate" + if job.job_type == "Update Site Pull": + return "Site Update" + if job.job_type.startswith("Recover Failed"): + return "Site Recovery" + return "Agent Job Failure" diff --git a/jcloud/jcloud/pagetype/agent_job/patches/__init__.py b/jcloud/jcloud/pagetype/agent_job/patches/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/agent_job/patches/update_status_for_undelivered_jobs.py b/jcloud/jcloud/pagetype/agent_job/patches/update_status_for_undelivered_jobs.py new file mode 100644 index 0000000..e0efa21 --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job/patches/update_status_for_undelivered_jobs.py @@ -0,0 +1,15 @@ +import jingrow +from jingrow.utils import add_to_date, now_datetime + + +def execute(): + five_minute_ago = add_to_date(now_datetime(), minutes=-5) + + jingrow.db.sql( + """ + UPDATE `tabAgent Job` + SET status = 'Delivery Failure' + WHERE job_id = 0 and status = 'Undelivered' and creation <= %s + """, + five_minute_ago, + ) diff --git a/jcloud/jcloud/pagetype/agent_job/test_agent_job.py b/jcloud/jcloud/pagetype/agent_job/test_agent_job.py new file mode 100644 index 0000000..9cc535f --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job/test_agent_job.py @@ -0,0 +1,276 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +import json +import unittest +from contextlib import contextmanager +from typing import Callable, Literal +from unittest.mock import Mock, patch + +import jingrow +import responses +from jingrow.model.naming import make_autoname + +from jcloud.agent import Agent +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob, lock_pg_updated_by_job +from jcloud.jcloud.pagetype.site.test_site import create_test_bench, create_test_site +from jcloud.jcloud.pagetype.team.test_team import create_test_jcloud_admin_team +from jcloud.utils.test import foreground_enqueue, foreground_enqueue_pg + + +def fn_appender(before_insert: Callable, prepare_agent_responses: Callable): + def new_before_insert(self): + before_insert(self) + prepare_agent_responses(self) + + return new_before_insert + + +def before_insert(self): + return None + + +def fake_agent_job_req( + job_type: str, + status: Literal["Success", "Pending", "Running", "Failure"], + data: dict, + steps: list[dict], +) -> Callable: + data = data or {} + steps = steps or [] + + def prepare_agent_responses(self): + """ + Fake successful delivery with fake job id + + Also return fake result on polling + steps: list of {"name": "Step name", "status": "status"} dictionaries + """ + nonlocal status + nonlocal job_type + if self.job_type != job_type: # only fake the job we want to fake + return + job_id = int(make_autoname(".#")) + if steps: + needed_steps = jingrow.get_all( + "Agent Job Type Step", {"parent": job_type}, pluck="step_name" + ) + for step in needed_steps: + if not any(step == s["name"] for s in steps): + steps.append( + { + "name": step, + "status": "Success", + "data": {}, + } + ) + + for step in steps: + step["start"] = "2023-08-20 18:24:28.024885" + step["data"] = {} + if step["status"] in ["Success", "Failure"]: + step["duration"] = "00:00:13.464445" + step["end"] = "2023-08-20 18:24:41.489330" + if step["status"] in ["Success", "Failure", "Running"]: + step["start"] = "2023-08-20 18:24:28.024885" + step["end"] = None + step["duration"] = None + + # TODO: auto add response corresponding to request type # + responses.post( + f"https://{self.server}:443/agent/{self.request_path}", + json={"job": job_id}, + status=200, + ) + responses.delete( + f"https://{self.server}:443/agent/{self.request_path}", + json={"job": job_id}, + status=200, + ) + # TODO: make the next url regex for multiple job ids # + responses.add( + responses.GET, + f"https://{self.server}:443/agent/jobs/{str(job_id)}", + # TODO: populate steps with data from agent job type # + json={ + "data": data, + # TODO: uncomment lines as needed and make new parameters # + "duration": "00:00:13.496281", + "end": "2023-08-20 18:24:41.506067", + # "enqueue": "2023-08-20 18:24:27.907340", + "id": job_id, + # "name": "Install App on Site", + "start": "2023-08-20 18:24:28.009786", + "status": status, + "steps": steps + or [ + { + "data": { + # "command": "docker exec -w /home/jingrow/jingrow-bench bench-0001-000025-f1 bench --site fdesk-old.local.jingrow.dev install-app helpdesk", + # "directory": "/home/jingrow/benches/bench-0001-000025-f1", + # "duration": "0:00:13.447104", + # "end": "2023-08-20 18:24:41.482031", + # "output": "Installing helpdesk...\nUpdating PageTypes for helpdesk\t : [========================================] 100%\nUpdating Dashboard for helpdesk", + # "start": "2023-08-20 18:24:28.034927", + }, + "duration": "00:00:13.464445", + "end": "2023-08-20 18:24:41.489330", + # "id": 1350, + "name": job_type, + "start": "2023-08-20 18:24:28.024885", + "status": status, + } + ], + }, + status=200, + ) + + global before_insert + before_insert = fn_appender(before_insert, prepare_agent_responses) + return before_insert + + +@contextmanager +def fake_agent_job( + job_type: str, + status: Literal["Success", "Pending", "Running", "Failure"] = "Success", + data: dict = None, + steps: list[dict] = None, +): + """Fakes agent job request and response. Also polls the job. + + HEADS UP: Don't use this when you're mocking enqueue_http_request in your test context + """ + with responses.mock, patch.object( + AgentJob, + "before_insert", + fake_agent_job_req(job_type, status, data, steps), + create=True, + ), patch( + "jcloud.jcloud.pagetype.agent_job.agent_job.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ), patch( + "jcloud.jcloud.pagetype.agent_job.agent_job.jingrow.enqueue", + new=foreground_enqueue, + ), patch( + "jcloud.jcloud.pagetype.agent_job.agent_job.jingrow.db.commit", new=Mock() + ), patch( + "jcloud.jcloud.pagetype.agent_job.agent_job.jingrow.db.rollback", new=Mock() + ): + jingrow.local.role_permissions = ( + {} + ) # due to bug in FF related to only_if_creator docperm + yield + global before_insert + before_insert = lambda self: None # noqa + + +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +class TestAgentJob(unittest.TestCase): + def setUp(self): + self.team = create_test_jcloud_admin_team() + self.team.allocate_credit_amount(1000, source="Prepaid Credits", remark="Test") + self.team.payment_mode = "Prepaid Credits" + self.team.save() + + def tearDown(self): + jingrow.db.rollback() + jingrow.set_user("Administrator") + + @patch.object(Agent, "reload_nginx") + def test_suspend_sites_issues_reload_in_bulk(self, mock_reload_nginx): + from .agent_job import suspend_sites + + bench1 = create_test_bench() + bench2 = create_test_bench() + bench3 = create_test_bench() + + jingrow.set_user(self.team.user) + site1 = create_test_site(bench=bench1) + site2 = create_test_site(bench=bench2) + create_test_site(bench=bench3) # control; no suspend + + site1.db_set("current_database_usage", 101) + site2.db_set("current_disk_usage", 101) + jingrow.db.set_single_value("Jcloud Settings", "enforce_storage_limits", True) + suspend_sites() + suspend_jobs = jingrow.get_all( + "Agent Job", {"job_type": "Update Site Status"}, ["request_data"] + ) + for job in suspend_jobs: + self.assertTrue(json.loads(job.request_data).get("skip_reload")) + + self.assertEqual(len(suspend_jobs), 2) + self.assertEqual( + mock_reload_nginx.call_count, + jingrow.db.count("Proxy Server", {"status": "Active"}), + ) + + def test_lock_pg_updated_by_job_respects_hierarchy(self): + """ + Site > Bench > Server + """ + site = create_test_site() # creates job + site.update_site_config({"maintenance_mode": "1"}) + job = jingrow.get_last_pg("Agent Job", {"job_type": "Update Site Configuration"}) + pg_name = lock_pg_updated_by_job(job.name) + self.assertIsNone(pg_name) + job = jingrow.get_last_pg("Agent Job", {"job_type": "New Site"}) + pg_name = lock_pg_updated_by_job(job.name) + self.assertEqual(site.name, pg_name) + job.db_set("site", None) + pg_name = lock_pg_updated_by_job(job.name) + self.assertEqual(site.bench, pg_name) + job.db_set("bench", None) + pg_name = lock_pg_updated_by_job(job.name) + self.assertEqual(site.server, pg_name) + job.db_set("server", None) # will realistically never happen + pg_name = lock_pg_updated_by_job(job.name) + self.assertIsNone(pg_name) + + @patch("jcloud.jcloud.pagetype.site.site.create_dns_record", new=Mock()) + @patch("jcloud.jcloud.pagetype.site.site._change_dns_record", new=Mock()) + def test_lock_pg_updated_by_job_locks_on_site_rename(self): + site = create_test_site() + site.subdomain = "renamed-domain" + site.save() + job = jingrow.get_last_pg("Agent Job", {"job_type": "Rename Site"}) + pg_name = lock_pg_updated_by_job(job.name) + self.assertEqual(site.name, pg_name) + job = jingrow.get_last_pg("Agent Job", {"job_type": "Rename Site on Upstream"}) + pg_name = lock_pg_updated_by_job(job.name) + self.assertEqual(site.name, pg_name) + + def test_no_duplicate_undelivered_job(self): + site = create_test_site() + site.update_site_config({"maintenance_mode": "1"}) + job = jingrow.get_last_pg("Agent Job", {"job_type": "Update Site Configuration"}) + + jingrow.db.set_single_value("Jcloud Settings", "disable_agent_job_deduplication", False) + + # create a new job with same type and site + job_name = site.update_site_config({"host_name": f"https://{site.host_name}"}) + + self.assertEqual(job_name.name, job.name) + + def test_get_similar_in_execution_job(self): + site = create_test_site() + site.update_site_config({"maintenance_mode": "1"}) + job = jingrow.get_last_pg("Agent Job", {"job_type": "Update Site Configuration"}) + + jingrow.db.set_single_value("Jcloud Settings", "disable_agent_job_deduplication", False) + + # check if similar job exists + agent = Agent(site.server) + in_execution_job = agent.get_similar_in_execution_job( + job_type="Update Site Configuration", + path=f"benches/{site.bench}/sites/{site.name}/config", + bench=site.bench, + site=site.name, + ) + + self.assertEqual(in_execution_job.name, job.name) + + jingrow.db.set_single_value("Jcloud Settings", "disable_agent_job_deduplication", True) diff --git a/jcloud/jcloud/pagetype/agent_job_step/__init__.py b/jcloud/jcloud/pagetype/agent_job_step/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/agent_job_step/agent_job_step.js b/jcloud/jcloud/pagetype/agent_job_step/agent_job_step.js new file mode 100644 index 0000000..12fb2ce --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job_step/agent_job_step.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Agent Job Step', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/agent_job_step/agent_job_step.json b/jcloud/jcloud/pagetype/agent_job_step/agent_job_step.json new file mode 100644 index 0000000..4f6d806 --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job_step/agent_job_step.json @@ -0,0 +1,129 @@ +{ + "actions": [], + "creation": "2020-01-14 10:47:46.916471", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "step_name", + "status", + "agent_job", + "column_break_4", + "start", + "end", + "duration", + "section_break_8", + "data", + "output", + "traceback" + ], + "fields": [ + { + "fieldname": "agent_job", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Agent Job", + "options": "Agent Job", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "step_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Step Name", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure\nSkipped\nDelivery Failure", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "output", + "fieldtype": "Code", + "label": "Output", + "read_only": 1 + }, + { + "fieldname": "traceback", + "fieldtype": "Code", + "label": "Traceback", + "read_only": 1 + }, + { + "fieldname": "data", + "fieldtype": "Code", + "label": "Data", + "read_only": 1 + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Time", + "in_list_view": 1, + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + } + ], + "in_create": 1, + "links": [], + "modified": "2023-10-20 11:27:22.573494", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Agent Job Step", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin" + }, + { + "create": 1, + "role": "Jcloud Member" + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [], + "title_field": "step_name", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/agent_job_step/agent_job_step.py b/jcloud/jcloud/pagetype/agent_job_step/agent_job_step.py new file mode 100644 index 0000000..3033d9c --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job_step/agent_job_step.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + + +class AgentJobStep(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + agent_job: DF.Link + data: DF.Code | None + duration: DF.Time | None + end: DF.Datetime | None + output: DF.Code | None + start: DF.Datetime | None + status: DF.Literal[ + "Pending", "Running", "Success", "Failure", "Skipped", "Delivery Failure" + ] + step_name: DF.Data + traceback: DF.Code | None + # end: auto-generated types + + +def on_pagetype_update(): + # We don't need modified index, it's harmful on constantly updating tables + jingrow.db.sql_ddl("drop index if exists modified on `tabAgent Job Step`") + jingrow.db.add_index("Agent Job Step", ["creation"]) diff --git a/jcloud/jcloud/pagetype/agent_job_step/test_agent_job_step.py b/jcloud/jcloud/pagetype/agent_job_step/test_agent_job_step.py new file mode 100644 index 0000000..72d493d --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job_step/test_agent_job_step.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestAgentJobStep(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/agent_job_type/__init__.py b/jcloud/jcloud/pagetype/agent_job_type/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/agent_job_type/agent_job_type.js b/jcloud/jcloud/pagetype/agent_job_type/agent_job_type.js new file mode 100644 index 0000000..2bd700c --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job_type/agent_job_type.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Agent Job Type', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/agent_job_type/agent_job_type.json b/jcloud/jcloud/pagetype/agent_job_type/agent_job_type.json new file mode 100644 index 0000000..0426c46 --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job_type/agent_job_type.json @@ -0,0 +1,86 @@ +{ + "actions": [], + "autoname": "Prompt", + "creation": "2020-01-14 10:43:38.943661", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "steps", + "agent_request_section", + "request_path", + "request_method", + "job_retry_section", + "disabled_auto_retry", + "max_retry_count" + ], + "fields": [ + { + "fieldname": "steps", + "fieldtype": "Table", + "label": "Steps", + "options": "Agent Job Type Step", + "reqd": 1 + }, + { + "fieldname": "agent_request_section", + "fieldtype": "Section Break", + "label": "Agent Request" + }, + { + "fieldname": "request_path", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Request Path" + }, + { + "fieldname": "request_method", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Request Method", + "options": "POST\nDELETE" + }, + { + "default": "3", + "fieldname": "max_retry_count", + "fieldtype": "Int", + "label": "Max Retry Count" + }, + { + "default": "1", + "fieldname": "disabled_auto_retry", + "fieldtype": "Check", + "label": "Disabled" + }, + { + "fieldname": "job_retry_section", + "fieldtype": "Section Break", + "label": "Job Retry" + } + ], + "links": [], + "modified": "2023-11-16 12:19:35.365980", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Agent Job Type", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/agent_job_type/agent_job_type.py b/jcloud/jcloud/pagetype/agent_job_type/agent_job_type.py new file mode 100644 index 0000000..088647f --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job_type/agent_job_type.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + +from jcloud.utils import ttl_cache + + +class AgentJobType(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.agent_job_type_step.agent_job_type_step import ( + AgentJobTypeStep, + ) + + disabled_auto_retry: DF.Check + max_retry_count: DF.Int + request_method: DF.Literal["POST", "DELETE"] + request_path: DF.Data | None + steps: DF.Table[AgentJobTypeStep] + # end: auto-generated types + + def on_update(self): + get_retryable_job_types_and_max_retry_count.cache.invalidate() + + +@ttl_cache() +def get_retryable_job_types_and_max_retry_count(): + job_types, max_retry_per_job_type = [], {} + for job_type in jingrow.get_all( + "Agent Job Type", + filters={"disabled_auto_retry": 0, "max_retry_count": [">", 0]}, + fields=["name", "max_retry_count"], + ): + job_types.append(job_type["name"]) + max_retry_per_job_type[job_type["name"]] = job_type["max_retry_count"] + + return job_types, max_retry_per_job_type diff --git a/jcloud/jcloud/pagetype/agent_job_type/test_agent_job_type.py b/jcloud/jcloud/pagetype/agent_job_type/test_agent_job_type.py new file mode 100644 index 0000000..49f00ca --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job_type/test_agent_job_type.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestAgentJobType(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/agent_job_type_step/__init__.py b/jcloud/jcloud/pagetype/agent_job_type_step/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/agent_job_type_step/agent_job_type_step.json b/jcloud/jcloud/pagetype/agent_job_type_step/agent_job_type_step.json new file mode 100644 index 0000000..0e2b92f --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job_type_step/agent_job_type_step.json @@ -0,0 +1,31 @@ +{ + "actions": [], + "creation": "2020-01-14 10:43:26.284328", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "step_name" + ], + "fields": [ + { + "fieldname": "step_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Step Name", + "reqd": 1 + } + ], + "istable": 1, + "links": [], + "modified": "2020-01-14 10:43:26.284328", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Agent Job Type Step", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/agent_job_type_step/agent_job_type_step.py b/jcloud/jcloud/pagetype/agent_job_type_step/agent_job_type_step.py new file mode 100644 index 0000000..b41146b --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_job_type_step/agent_job_type_step.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class AgentJobTypeStep(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + step_name: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/agent_request_failure/__init__.py b/jcloud/jcloud/pagetype/agent_request_failure/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/agent_request_failure/agent_request_failure.js b/jcloud/jcloud/pagetype/agent_request_failure/agent_request_failure.js new file mode 100644 index 0000000..fc6f3ba --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_request_failure/agent_request_failure.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Agent Request Failure", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/agent_request_failure/agent_request_failure.json b/jcloud/jcloud/pagetype/agent_request_failure/agent_request_failure.json new file mode 100644 index 0000000..37029b5 --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_request_failure/agent_request_failure.json @@ -0,0 +1,98 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-06-10 15:55:14.514080", + "pagetype": "PageType", + "engine": "MyISAM", + "field_order": [ + "server_type", + "server", + "column_break_bxet", + "failure_count", + "section_break_xror", + "error", + "traceback" + ], + "fields": [ + { + "fieldname": "server_type", + "fieldtype": "Link", + "in_filter": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Server Type", + "options": "PageType", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "server", + "fieldtype": "Dynamic Link", + "in_filter": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Server", + "options": "server_type", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "column_break_bxet", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_xror", + "fieldtype": "Section Break" + }, + { + "fieldname": "error", + "fieldtype": "Code", + "label": "Error", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "traceback", + "fieldtype": "Code", + "label": "Traceback", + "read_only": 1, + "reqd": 1 + }, + { + "default": "1", + "fieldname": "failure_count", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Failure Count", + "read_only": 1, + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-06-12 15:05:52.948905", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Agent Request Failure", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "creation", + "sort_order": "DESC", + "states": [], + "title_field": "server" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/agent_request_failure/agent_request_failure.py b/jcloud/jcloud/pagetype/agent_request_failure/agent_request_failure.py new file mode 100644 index 0000000..4dfb3b6 --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_request_failure/agent_request_failure.py @@ -0,0 +1,81 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow +import requests +from jingrow.model.document import Document + +from jcloud.agent import Agent +from jcloud.utils import log_error + + +class AgentRequestFailure(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + error: DF.Code + failure_count: DF.Int + server: DF.DynamicLink + server_type: DF.Link + traceback: DF.Code + # end: auto-generated types + + pass + + def before_insert(self): + # TODO: Remove once tests pass + if jingrow.flags.in_test: + print(jingrow.get_traceback(with_context=True)) + + +def is_server_archived(failure): + # Server was archived more than an hour ago + server = jingrow.db.get_value(failure.server_type, failure.server, ["status", "modified"], as_dict=True) + if (server.status == "Archived") and (server.modified < jingrow.utils.add_to_date(None, hours=-1)): + return True + return False + + +def remove_old_failures(): + failures = jingrow.get_all( + "Agent Request Failure", + ["name", "server_type", "server", "failure_count"], + order_by="creation ASC", + ) + for failure in failures: + delta = 0 + try: + agent = Agent(failure.server, failure.server_type) + agent.raw_request("GET", "ping", raises=True, timeout=(1, 5)) + except (requests.ConnectTimeout, requests.ReadTimeout, requests.ConnectionError): + # Server is still down, either because + # 1. Couldn't connect + # 2. Couldn't respond in time, + # increment the failure count + delta = 1 + except requests.RequestException: + # Something still wrong with the connection, ignore for now. + pass + except Exception: + # Something weird happened, probably not related to requests + log_error("Agent Status Check Failure", failure=failure) + else: + # Server responded, aggressively decrement the failure count + # Aggressively + delta = -100 + + if delta: + if failure.failure_count + delta <= 0 or is_server_archived(failure): + jingrow.delete_pg("Agent Request Failure", failure.name) + else: + jingrow.db.set_value( + "Agent Request Failure", + failure.name, + "failure_count", + failure.failure_count + delta, + ) diff --git a/jcloud/jcloud/pagetype/agent_request_failure/test_agent_request_failure.py b/jcloud/jcloud/pagetype/agent_request_failure/test_agent_request_failure.py new file mode 100644 index 0000000..e9935fd --- /dev/null +++ b/jcloud/jcloud/pagetype/agent_request_failure/test_agent_request_failure.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestAgentRequestFailure(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/alertmanager_webhook_log/__init__.py b/jcloud/jcloud/pagetype/alertmanager_webhook_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/alertmanager_webhook_log/alertmanager_webhook_log.js b/jcloud/jcloud/pagetype/alertmanager_webhook_log/alertmanager_webhook_log.js new file mode 100644 index 0000000..5b6a8f6 --- /dev/null +++ b/jcloud/jcloud/pagetype/alertmanager_webhook_log/alertmanager_webhook_log.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Alertmanager Webhook Log', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/alertmanager_webhook_log/alertmanager_webhook_log.json b/jcloud/jcloud/pagetype/alertmanager_webhook_log/alertmanager_webhook_log.json new file mode 100644 index 0000000..9d44918 --- /dev/null +++ b/jcloud/jcloud/pagetype/alertmanager_webhook_log/alertmanager_webhook_log.json @@ -0,0 +1,161 @@ +{ + "actions": [], + "creation": "2021-05-25 12:20:51.312501", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "alert", + "severity", + "column_break_3", + "external_url", + "truncated_alerts", + "combined_alerts", + "section_break_6", + "group_labels", + "group_key", + "column_break_8", + "common_labels", + "section_break_10", + "payload", + "reactions_tab", + "reaction_jobs" + ], + "fields": [ + { + "fieldname": "payload", + "fieldtype": "Code", + "label": "Payload", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Firing\nResolved", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "group_key", + "fieldtype": "Code", + "label": "Group Key", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "external_url", + "fieldtype": "Data", + "label": "External URL", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "truncated_alerts", + "fieldtype": "Int", + "label": "Truncated Alerts", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "section_break_6", + "fieldtype": "Section Break" + }, + { + "fieldname": "group_labels", + "fieldtype": "Code", + "label": "Group Labels", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_8", + "fieldtype": "Column Break" + }, + { + "fieldname": "common_labels", + "fieldtype": "Code", + "label": "Common Labels", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "section_break_10", + "fieldtype": "Section Break" + }, + { + "fieldname": "combined_alerts", + "fieldtype": "Int", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Combined Alerts", + "read_only": 1, + "reqd": 1 + }, + { + "default": "Critical", + "fieldname": "severity", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Severity", + "options": "Critical\nWarning\nInformation", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "alert", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Alert", + "options": "Prometheus Alert Rule", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "reactions_tab", + "fieldtype": "Tab Break", + "label": "Reactions" + }, + { + "fieldname": "reaction_jobs", + "fieldtype": "Table", + "label": "Reaction Jobs", + "options": "Alertmanager Webhook Log Reaction Job" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-04-30 15:39:21.126637", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Alertmanager Webhook Log", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/alertmanager_webhook_log/alertmanager_webhook_log.py b/jcloud/jcloud/pagetype/alertmanager_webhook_log/alertmanager_webhook_log.py new file mode 100644 index 0000000..717eed6 --- /dev/null +++ b/jcloud/jcloud/pagetype/alertmanager_webhook_log/alertmanager_webhook_log.py @@ -0,0 +1,292 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import json +from functools import cached_property +from typing import TYPE_CHECKING + +import jingrow +from jingrow.core.utils import find +from jingrow.model.document import Document +from jingrow.utils import get_url_to_form +from jingrow.utils.background_jobs import enqueue_pg +from jingrow.utils.data import add_to_date + +from jcloud.exceptions import AlertRuleNotEnabled +from jcloud.jcloud.pagetype.incident.incident import INCIDENT_ALERT, INCIDENT_SCOPE +from jcloud.jcloud.pagetype.telegram_message.telegram_message import TelegramMessage +from jcloud.utils import log_error + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.prometheus_alert_rule.prometheus_alert_rule import ( + PrometheusAlertRule, + ) + +TELEGRAM_NOTIFICATION_TEMPLATE = """ +*{{ status }}* - *{{ severity }}*: {{ rule.name }} on {{ combined_alerts }} instances + +{{ rule.description }} + +Instances: +{%- for instance in instances %} + - [{{ instance["name"] }}]({{ instance["link"] }}) [→]({{ instance["name"] }}) +{%- endfor %} + +{% if labels -%} +Labels: +{%- for key, value in labels.items() %} + - {{ key }}: {{ value }} +{%- endfor %} +{%- endif %} + +""" + + +class AlertmanagerWebhookLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.alertmanager_webhook_log_reaction_job.alertmanager_webhook_log_reaction_job import ( + AlertmanagerWebhookLogReactionJob, + ) + + alert: DF.Link + combined_alerts: DF.Int + common_labels: DF.Code + external_url: DF.Data + group_key: DF.Code + group_labels: DF.Code + payload: DF.Code + reaction_jobs: DF.Table[AlertmanagerWebhookLogReactionJob] + severity: DF.Literal["Critical", "Warning", "Information"] + status: DF.Literal["Firing", "Resolved"] + truncated_alerts: DF.Int + # end: auto-generated types + + @staticmethod + def clear_old_logs(days=10): + from jingrow.query_builder import Interval + from jingrow.query_builder.functions import Now + + table = jingrow.qb.PageType("Alertmanager Webhook Log") + jingrow.db.delete(table, filters=(table.modified < (Now() - Interval(days=days)))) + + def validate(self): + self.parsed = json.loads(self.payload) + self.alert = self.parsed["groupLabels"].get("alertname") + if not self.alert: + raise AlertRuleNotEnabled("No alertname found in groupLabels") + self.status = self.parsed["status"].capitalize() + self.severity = self.parsed["commonLabels"]["severity"].capitalize() + self.group_key = self.parsed["groupKey"] + self.external_url = self.parsed["externalURL"] + self.truncated_alerts = self.parsed["truncatedAlerts"] + self.combined_alerts = len(self.parsed["alerts"]) + self.common_labels = json.dumps(self.parsed["commonLabels"], indent=2, sort_keys=True) + self.common_annotations = json.dumps(self.parsed["commonAnnotations"], indent=2, sort_keys=True) + self.group_labels = json.dumps(self.parsed["groupLabels"], indent=2, sort_keys=True) + self.common_labels = json.dumps(self.parsed["commonLabels"], indent=2, sort_keys=True) + + self.payload = json.dumps(self.parsed, indent=2, sort_keys=True) + + @property + def incident_scope(self): + return self.parsed_group_labels.get(INCIDENT_SCOPE) + + def after_insert(self): + if self.alert == INCIDENT_ALERT: + enqueue_pg( + self.pagetype, + self.name, + "validate_and_create_incident", + enqueue_after_commit=True, + job_id=f"validate_and_create_incident:{self.incident_scope}:{self.alert}", + deduplicate=True, + ) + if not jingrow.get_cached_value("Prometheus Alert Rule", self.alert, "silent"): + enqueue_pg(self.pagetype, self.name, "send_telegram_notification", enqueue_after_commit=True) + if self.status == "Firing" and jingrow.get_cached_value( + "Prometheus Alert Rule", self.alert, "jcloud_job_type" + ): + enqueue_pg( + self.pagetype, + self.name, + "react", + enqueue_after_commit=True, + job_id=f"react:{self.alert}:{self.server}", + deduplicate=True, + ) + + def react_for_instance(self, instance) -> dict: + instance_type = self.guess_pagetype(instance) + if not instance_type: + # Prometheus is monitoring instances we don't know about + return {} + rule: "PrometheusAlertRule" = jingrow.get_pg("Prometheus Alert Rule", self.alert) + labels = self.get_labels_for_instance(instance) + job = rule.react(instance_type, instance, labels) + if job: + return {"jcloud_job_type": job.job_type, "jcloud_job": job.name} + return {} + + def react(self): + for instance in self.get_instances_from_alerts_payload(self.payload): + reaction_job = self.react_for_instance(instance) + if reaction_job: + self.append("reaction_jobs", reaction_job) + self.save() + + def get_instances_from_alerts_payload(self, payload: str) -> set[str]: + instances = [] + payload = json.loads(payload) + instances.extend([alert["labels"]["instance"] for alert in payload["alerts"]]) # sites + return set(instances) + + def get_labels_for_instance(self, instance: str) -> dict: + # Find first alert that matches the instance + payload = json.loads(self.payload) + alert = find(payload["alerts"], lambda x: x["labels"]["instance"] == instance) + if alert: + return alert["labels"] + return {} + + def get_past_alert_instances(self): + past_alerts = jingrow.get_all( + self.pagetype, + fields=["payload"], + filters={ + "alert": self.alert, + "severity": self.severity, + "status": self.status, + "group_key": ("like", f"%{self.incident_scope}%"), + "modified": [ + ">", + add_to_date(jingrow.utils.now(), hours=-self.get_repeat_interval()), + ], + }, + group_by="group_key", + ignore_ifnull=True, + ) # get site down alerts grouped by benches + + instances = [] + for alert in past_alerts: + instances.extend(self.get_instances_from_alerts_payload(alert["payload"])) + return set(instances) + + def total_instances(self) -> int: + return jingrow.db.count( + "Site", + {"status": "Active", INCIDENT_SCOPE: self.incident_scope}, + ) + + def validate_and_create_incident(self): + if not jingrow.db.get_single_value("Incident Settings", "enable_incident_detection"): + return + if not (self.alert == INCIDENT_ALERT and self.severity == "Critical" and self.status == "Firing"): + return + cluster = jingrow.get_value("Server", self.incident_scope, "cluster") + rule: "PrometheusAlertRule" = jingrow.get_pg("Prometheus Alert Rule", self.alert) + if find(rule.ignore_on_clusters, lambda x: x.cluster == cluster): + return + + instances = self.get_past_alert_instances() + if len(instances) > min(0.4 * self.total_instances(), 15): + self.create_incident() + + def get_repeat_interval(self): + repeat_interval = jingrow.db.get_value("Prometheus Alert Rule", self.alert, "repeat_interval") + hours = repeat_interval.split("h")[0] # assume hours + return int(hours) + + def generate_telegram_message(self): + context = self.as_dict() + rule = jingrow.get_pg("Prometheus Alert Rule", self.alert) + + self.parsed = json.loads(self.payload) + self.instances = [ + { + "name": alert["labels"]["instance"], + "pagetype": alert["labels"].get("pagetype", self.guess_pagetype(alert["labels"]["instance"])), + } + for alert in self.parsed["alerts"][:20] + ] + + labels = self.parsed["groupLabels"] + labels.pop("alertname", None) + + for instance in self.instances: + if instance["pagetype"]: + instance["link"] = get_url_to_form(instance["pagetype"], instance["name"]) + + context.update({"instances": self.instances, "labels": labels, "rule": rule}) + return jingrow.render_template(TELEGRAM_NOTIFICATION_TEMPLATE, context) + + def guess_pagetype(self, name): + doctypes = [ + "Site", + "Bench", + "Server", + "Proxy Server", + "Database Server", + "Monitor Server", + "Log Server", + "Registry Server", + "Analytics Server", + "Site Domain", + "Trace Server", + ] + for pagetype in doctypes: + if jingrow.db.exists(pagetype, name): + return pagetype + return None + + def send_telegram_notification(self): + message = self.generate_telegram_message() + TelegramMessage.enqueue(message=message, topic=self.severity) + + @property + def bench(self): + return self.parsed_group_labels.get("bench") + + @property + def cluster(self): + return self.parsed_group_labels.get("cluster") + + @property + def server(self): + return self.parsed_group_labels.get("server") + + @cached_property + def parsed_group_labels(self) -> dict: + return json.loads(self.group_labels) + + def ongoing_incident_exists(self) -> bool: + ongoing_incident_status = jingrow.db.get_value( + "Incident", + { + "alert": self.alert, + INCIDENT_SCOPE: self.incident_scope, + "status": ("in", ["Validating", "Confirmed", "Acknowledged"]), + }, + "status", + for_update=True, + ) + return bool(ongoing_incident_status) + + def create_incident(self): + try: + if self.ongoing_incident_exists(): + return + incident = jingrow.new_pg("Incident") + incident.alert = self.alert + incident.server = self.server + incident.cluster = self.cluster + incident.save() + except Exception: + log_error("Incident creation failed") diff --git a/jcloud/jcloud/pagetype/alertmanager_webhook_log/test_alertmanager_webhook_log.py b/jcloud/jcloud/pagetype/alertmanager_webhook_log/test_alertmanager_webhook_log.py new file mode 100644 index 0000000..9de8996 --- /dev/null +++ b/jcloud/jcloud/pagetype/alertmanager_webhook_log/test_alertmanager_webhook_log.py @@ -0,0 +1,90 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +import json +import typing +import unittest +from datetime import datetime + +import jingrow + +from jcloud.jcloud.pagetype.prometheus_alert_rule.test_prometheus_alert_rule import ( + create_test_prometheus_alert_rule, +) +from jcloud.jcloud.pagetype.site.test_site import create_test_site + +if typing.TYPE_CHECKING: + from jcloud.jcloud.pagetype.prometheus_alert_rule.prometheus_alert_rule import ( + PrometheusAlertRule, + ) + from jcloud.jcloud.pagetype.site.site import Site + + +def create_test_alertmanager_webhook_log( + alert: "PrometheusAlertRule" = None, + creation: datetime = None, + site: "Site" = None, + status: str = "firing", +): + alert = alert or create_test_prometheus_alert_rule() + site = site or create_test_site() + return jingrow.get_pg( + { + "pagetype": "Alertmanager Webhook Log", + "alert": alert.name, + "creation": creation or jingrow.utils.now_datetime(), + "payload": json.dumps( + { + "alerts": [ + { + "annotations": { + "description": alert.description, + }, + "endsAt": "0001-01-01T00:00:00Z", + "fingerprint": "343699f90f81ee7b", + "labels": { + "alertname": alert.name, + "bench": site.bench, + "cluster": site.cluster, + "group": "bench-0001", + "instance": site.name, + "job": "site", + "server": site.server, + "severity": alert.severity.lower(), + }, + "startsAt": "2023-12-15T01:02:56.363Z", + "status": status, + } + ], + "commonAnnotations": { + "description": alert.description, + }, + "commonLabels": { + "alertname": alert.name, + "severity": alert.severity.lower(), + "status": status, + "bench": site.bench, + "cluster": site.cluster, + "job": "site", + "server": site.server, + }, + "groupKey": f'{{}}/{{alertname="{alert.name}"}}:{{alertname="{alert.name}", bench="{site.bench}", cluster="{site.cluster}", server="{site.server}"}}', + "groupLabels": { + "alertname": alert.name, + "bench": site.bench, + "cluster": site.cluster, + "server": site.server, + }, + "receiver": "web\\.hook", + "status": status, + "truncatedAlerts": 0, + "version": "4", + "externalURL": "http://localhost:9093", + } + ), + }, + ).insert() + + +class TestAlertmanagerWebhookLog(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/__init__.py b/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/alertmanager_webhook_log_reaction_job.js b/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/alertmanager_webhook_log_reaction_job.js new file mode 100644 index 0000000..e0e3f99 --- /dev/null +++ b/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/alertmanager_webhook_log_reaction_job.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Alertmanager Webhook Log Reaction Job", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/alertmanager_webhook_log_reaction_job.json b/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/alertmanager_webhook_log_reaction_job.json new file mode 100644 index 0000000..3f00d49 --- /dev/null +++ b/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/alertmanager_webhook_log_reaction_job.json @@ -0,0 +1,41 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-04-30 15:36:25.358922", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "jcloud_job_type", + "jcloud_job" + ], + "fields": [ + { + "fieldname": "jcloud_job_type", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Jcloud Job Type", + "options": "Jcloud Job Type", + "read_only": 1 + }, + { + "fieldname": "jcloud_job", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Jcloud Job", + "options": "Jcloud Job", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-12-26 13:54:57.631772", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Alertmanager Webhook Log Reaction Job", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/alertmanager_webhook_log_reaction_job.py b/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/alertmanager_webhook_log_reaction_job.py new file mode 100644 index 0000000..15fac4b --- /dev/null +++ b/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/alertmanager_webhook_log_reaction_job.py @@ -0,0 +1,24 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class AlertmanagerWebhookLogReactionJob(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + jcloud_job: DF.Link | None + jcloud_job_type: DF.Link | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/test_alertmanager_webhook_log_reaction_job.py b/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/test_alertmanager_webhook_log_reaction_job.py new file mode 100644 index 0000000..67bc91b --- /dev/null +++ b/jcloud/jcloud/pagetype/alertmanager_webhook_log_reaction_job/test_alertmanager_webhook_log_reaction_job.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestAlertmanagerWebhookLogReactionJob(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/alipay_payment_record/__init__.py b/jcloud/jcloud/pagetype/alipay_payment_record/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/alipay_payment_record/alipay_payment_record.js b/jcloud/jcloud/pagetype/alipay_payment_record/alipay_payment_record.js new file mode 100644 index 0000000..5686ec3 --- /dev/null +++ b/jcloud/jcloud/pagetype/alipay_payment_record/alipay_payment_record.js @@ -0,0 +1,8 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Alipay Payment Record", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/alipay_payment_record/alipay_payment_record.json b/jcloud/jcloud/pagetype/alipay_payment_record/alipay_payment_record.json new file mode 100644 index 0000000..8227e42 --- /dev/null +++ b/jcloud/jcloud/pagetype/alipay_payment_record/alipay_payment_record.json @@ -0,0 +1,79 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-03-18 21:38:31.742549", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "team", + "payment_id", + "order_id", + "amount", + "status" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "fieldname": "payment_id", + "fieldtype": "Data", + "in_standard_filter": 1, + "label": "Payment ID", + "read_only": 1, + "unique": 1 + }, + { + "fieldname": "order_id", + "fieldtype": "Data", + "label": "Order ID", + "read_only": 1, + "unique": 1 + }, + { + "fieldname": "amount", + "fieldtype": "Float", + "in_list_view": 1, + "label": "Amount", + "precision": "2", + "read_only": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nSuccess\nFailed", + "read_only": 1 + } + ], + "grid_page_length": 50, + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-03-18 22:19:50.142714", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Alipay Payment Record", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/alipay_payment_record/alipay_payment_record.py b/jcloud/jcloud/pagetype/alipay_payment_record/alipay_payment_record.py new file mode 100644 index 0000000..0194b1d --- /dev/null +++ b/jcloud/jcloud/pagetype/alipay_payment_record/alipay_payment_record.py @@ -0,0 +1,23 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class AlipayPaymentRecord(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + amount: DF.Float + order_id: DF.Data | None + payment_id: DF.Data | None + status: DF.Literal["Pending", "Success", "Failed"] + team: DF.Link | None + # end: auto-generated types + pass diff --git a/jcloud/jcloud/pagetype/alipay_payment_record/test_alipay_payment_record.py b/jcloud/jcloud/pagetype/alipay_payment_record/test_alipay_payment_record.py new file mode 100644 index 0000000..e48f8f4 --- /dev/null +++ b/jcloud/jcloud/pagetype/alipay_payment_record/test_alipay_payment_record.py @@ -0,0 +1,9 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestAlipayPaymentRecord(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/analytics_server/README.md b/jcloud/jcloud/pagetype/analytics_server/README.md new file mode 100644 index 0000000..be37470 --- /dev/null +++ b/jcloud/jcloud/pagetype/analytics_server/README.md @@ -0,0 +1,45 @@ +Added following locations to NGINX config on jingrow.com + +Some adblockers don't seem to like third party domains and "plausible.js" + +``` +location = /js/script.js { + proxy_pass https://analytics.jingrow.cloud/js/plausible.js; + proxy_buffering on; + + proxy_cache jscache; + proxy_cache_valid 200 6h; + proxy_cache_use_stale updating error timeout invalid_header http_500; + + proxy_set_header Host analytics.jingrow.cloud; + proxy_ssl_name analytics.jingrow.cloud; + proxy_ssl_server_name on; + proxy_ssl_session_reuse off; + + proxy_ssl_protocols TLSv1.3; + +} + +location = /api/event { + proxy_pass https://analytics.jingrow.cloud/api/event; + proxy_buffering on; + proxy_http_version 1.1; + + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Host $host; + + proxy_set_header Host analytics.jingrow.cloud; + proxy_ssl_name analytics.jingrow.cloud; + proxy_ssl_server_name on; + proxy_ssl_session_reuse off; + + proxy_ssl_protocols TLSv1.3; +} +``` + +Also added a cache zone for caching the plausible script + +``` +proxy_cache_path /var/cache/nginx/jscache levels=1:2 keys_zone=jscache:100m inactive=30d use_temp_path=off max_size=100m; +``` diff --git a/jcloud/jcloud/pagetype/analytics_server/__init__.py b/jcloud/jcloud/pagetype/analytics_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/analytics_server/analytics_server.js b/jcloud/jcloud/pagetype/analytics_server/analytics_server.js new file mode 100644 index 0000000..fcff193 --- /dev/null +++ b/jcloud/jcloud/pagetype/analytics_server/analytics_server.js @@ -0,0 +1,58 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Analytics Server', { + refresh: function (frm) { + [ + [__('Ping Agent'), 'ping_agent', false, frm.pg.is_server_setup], + [__('Ping Ansible'), 'ping_ansible', true], + [__('Ping Ansible Unprepared'), 'ping_ansible_unprepared', true], + [__('Update Agent'), 'update_agent', true, frm.pg.is_server_setup], + [__('Prepare Server'), 'prepare_server', true, !frm.pg.is_server_setup], + [__('Setup Server'), 'setup_server', true, !frm.pg.is_server_setup], + [ + __('Fetch Keys'), + 'fetch_keys', + false, + frm.pg.is_server_setup && + (!frm.pg.jingrow_public_key || !frm.pg.root_public_key), + ], + [ + __('Show Plausible Password'), + 'show_plausible_password', + false, + frm.pg.is_server_setup, + ], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/analytics_server/analytics_server.json b/jcloud/jcloud/pagetype/analytics_server/analytics_server.json new file mode 100644 index 0000000..2de3629 --- /dev/null +++ b/jcloud/jcloud/pagetype/analytics_server/analytics_server.json @@ -0,0 +1,278 @@ +{ + "actions": [], + "creation": "2022-03-16 18:28:50.237675", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "hostname", + "domain", + "column_break_4", + "provider", + "virtual_machine", + "is_server_setup", + "networking_section", + "ip", + "column_break_10", + "private_ip", + "private_mac_address", + "private_vlan_id", + "agent_section", + "agent_password", + "ssh_section", + "jingrow_user_password", + "jingrow_public_key", + "column_break_19", + "root_public_key", + "monitoring_section", + "monitoring_password", + "plausible_section", + "plausible_password", + "plausible_mail_server", + "plausible_mail_port", + "column_break_27", + "plausible_mail_login", + "plausible_mail_password", + "google_search_console_section", + "google_client_id", + "column_break_32", + "google_client_secret" + ], + "fields": [ + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nInstalling\nActive\nBroken\nArchived", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "hostname", + "fieldtype": "Data", + "label": "Hostname", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "label": "Domain", + "options": "Root Domain", + "read_only": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "default": "Generic", + "fieldname": "provider", + "fieldtype": "Select", + "label": "Provider", + "options": "Generic\nScaleway\nAWS EC2\nOCI", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.provider === \"AWS EC2\"", + "fieldname": "virtual_machine", + "fieldtype": "Link", + "label": "Virtual Machine", + "mandatory_depends_on": "eval:pg.provider === \"AWS EC2\"", + "options": "Virtual Machine" + }, + { + "default": "0", + "fieldname": "is_server_setup", + "fieldtype": "Check", + "label": "Server Setup", + "read_only": 1 + }, + { + "fieldname": "networking_section", + "fieldtype": "Section Break", + "label": "Networking" + }, + { + "fetch_from": "virtual_machine.public_ip_address", + "fieldname": "ip", + "fieldtype": "Data", + "in_list_view": 1, + "label": "IP", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_10", + "fieldtype": "Column Break" + }, + { + "fetch_from": "virtual_machine.private_ip_address", + "fieldname": "private_ip", + "fieldtype": "Data", + "label": "Private IP", + "reqd": 1, + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_mac_address", + "fieldtype": "Data", + "label": "Private Mac Address", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_vlan_id", + "fieldtype": "Data", + "label": "Private VLAN ID", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "fieldname": "agent_section", + "fieldtype": "Section Break", + "label": "Agent" + }, + { + "fieldname": "agent_password", + "fieldtype": "Password", + "label": "Agent Password", + "set_only_once": 1 + }, + { + "fieldname": "ssh_section", + "fieldtype": "Section Break", + "label": "SSH" + }, + { + "fieldname": "jingrow_user_password", + "fieldtype": "Password", + "label": "Jingrow User Password", + "set_only_once": 1 + }, + { + "fieldname": "jingrow_public_key", + "fieldtype": "Code", + "label": "Jingrow Public Key", + "read_only": 1 + }, + { + "fieldname": "column_break_19", + "fieldtype": "Column Break" + }, + { + "fieldname": "root_public_key", + "fieldtype": "Code", + "label": "Root Public Key", + "read_only": 1 + }, + { + "fieldname": "monitoring_section", + "fieldtype": "Section Break", + "label": "Monitoring" + }, + { + "fieldname": "monitoring_password", + "fieldtype": "Password", + "label": "Monitoring Password", + "set_only_once": 1 + }, + { + "fieldname": "plausible_section", + "fieldtype": "Section Break", + "label": "Plausible" + }, + { + "fieldname": "plausible_password", + "fieldtype": "Password", + "label": "Plausible Password", + "set_only_once": 1 + }, + { + "fieldname": "plausible_mail_server", + "fieldtype": "Data", + "label": "Plausible Mail Server", + "set_only_once": 1 + }, + { + "default": "587", + "fieldname": "plausible_mail_port", + "fieldtype": "Int", + "label": "Plausible Mail Port", + "set_only_once": 1 + }, + { + "fieldname": "column_break_27", + "fieldtype": "Column Break" + }, + { + "fieldname": "plausible_mail_password", + "fieldtype": "Password", + "label": "Plausible Mail Password", + "set_only_once": 1 + }, + { + "fieldname": "plausible_mail_login", + "fieldtype": "Data", + "label": "Plausible Mail Login", + "set_only_once": 1 + }, + { + "fieldname": "google_search_console_section", + "fieldtype": "Section Break", + "label": "Google Search Console" + }, + { + "fieldname": "google_client_id", + "fieldtype": "Data", + "label": "Google Client ID", + "set_only_once": 1 + }, + { + "fieldname": "column_break_32", + "fieldtype": "Column Break" + }, + { + "fieldname": "google_client_secret", + "fieldtype": "Password", + "label": "Google Client Secret", + "set_only_once": 1 + } + ], + "links": [ + { + "link_pagetype": "Ansible Play", + "link_fieldname": "server" + } + ], + "modified": "2023-12-13 15:09:40.978998", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Analytics Server", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/analytics_server/analytics_server.py b/jcloud/jcloud/pagetype/analytics_server/analytics_server.py new file mode 100644 index 0000000..e2d5572 --- /dev/null +++ b/jcloud/jcloud/pagetype/analytics_server/analytics_server.py @@ -0,0 +1,116 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import base64 +import os + +import jingrow + +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class AnalyticsServer(BaseServer): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + agent_password: DF.Password | None + domain: DF.Link | None + jingrow_public_key: DF.Code | None + jingrow_user_password: DF.Password | None + google_client_id: DF.Data | None + google_client_secret: DF.Password | None + hostname: DF.Data + ip: DF.Data + is_server_setup: DF.Check + monitoring_password: DF.Password | None + plausible_mail_login: DF.Data | None + plausible_mail_password: DF.Password | None + plausible_mail_port: DF.Int + plausible_mail_server: DF.Data | None + plausible_password: DF.Password | None + private_ip: DF.Data + private_mac_address: DF.Data | None + private_vlan_id: DF.Data | None + provider: DF.Literal["Generic", "Scaleway", "AWS EC2", "OCI"] + root_public_key: DF.Code | None + status: DF.Literal["Pending", "Installing", "Active", "Broken", "Archived"] + virtual_machine: DF.Link | None + # end: auto-generated types + + def validate(self): + self.validate_agent_password() + self.validate_monitoring_password() + self.validate_plausible_password() + + def validate_monitoring_password(self): + if not self.monitoring_password: + self.monitoring_password = jingrow.generate_hash() + + def validate_plausible_password(self): + if not self.plausible_password: + self.plausible_password = jingrow.generate_hash() + + def _setup_server(self): + agent_repository_url = self.get_agent_repository_url() + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": self.domain}, "name" + ) + certificate = jingrow.get_pg("TLS Certificate", certificate_name) + + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if log_server: + kibana_password = jingrow.get_pg("Log Server", log_server).get_password( + "kibana_password" + ) + else: + kibana_password = None + + try: + ansible = Ansible( + playbook="analytics.yml", + server=self, + variables={ + "server": self.name, + "workers": 1, + "domain": self.domain, + "log_server": log_server, + "agent_password": self.get_password("agent_password"), + "agent_repository_url": agent_repository_url, + "kibana_password": kibana_password, + "plausible_password": self.get_password("plausible_password"), + "plausible_secret": base64.b64encode(os.urandom(64)).decode(), + "plausible_mail_server": self.plausible_mail_server, + "plausible_mail_port": self.plausible_mail_port, + "plausible_mail_login": self.plausible_mail_login, + "plausible_mail_password": self.get_password("plausible_mail_password"), + "google_client_id": self.google_client_id, + "google_client_secret": self.get_password("google_client_secret"), + "monitoring_password": self.get_password("monitoring_password"), + "private_ip": self.private_ip, + "certificate_private_key": certificate.private_key, + "certificate_full_chain": certificate.full_chain, + "certificate_intermediate_chain": certificate.intermediate_chain, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_server_setup = True + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Analytics Server Setup Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def show_plausible_password(self): + return self.get_password("plausible_password") diff --git a/jcloud/jcloud/pagetype/analytics_server/test_analytics_server.py b/jcloud/jcloud/pagetype/analytics_server/test_analytics_server.py new file mode 100644 index 0000000..b079567 --- /dev/null +++ b/jcloud/jcloud/pagetype/analytics_server/test_analytics_server.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestAnalyticsServer(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/ansible_console/__init__.py b/jcloud/jcloud/pagetype/ansible_console/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/ansible_console/ansible_console.js b/jcloud/jcloud/pagetype/ansible_console/ansible_console.js new file mode 100644 index 0000000..2113514 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_console/ansible_console.js @@ -0,0 +1,47 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Ansible Console', { + onload: function (frm) { + jingrow.ui.keys.add_shortcut({ + shortcut: 'shift+enter', + action: () => frm.page.btn_primary.trigger('click'), + page: frm.page, + description: __('Execute Ansible Command'), + ignore_inputs: true, + }); + }, + refresh: function (frm) { + frm.fields_dict.output.grid.grid_pagination.page_length = 500; + frm.disable_save(); + frm.page.set_primary_action(__('Execute'), ($btn) => { + frm.set_value('nonce', Math.random()); + frm.clear_table('output'); + $btn.text(__('Executing...')); + return frm + .execute_action('Execute') + .finally(() => $btn.text(__('Execute'))); + }); + + if (window.localStorage.getItem('ansible_console_command')) { + frm.set_value( + 'inventory', + window.localStorage.getItem('ansible_console_inventory'), + ); + frm.set_value( + 'command', + window.localStorage.getItem('ansible_console_command'), + ); + + window.localStorage.removeItem('ansible_console_inventory'); + window.localStorage.removeItem('ansible_console_command'); + } + + jingrow.realtime.off('ansible_console_update'); + jingrow.realtime.on('ansible_console_update', (message) => { + if (message.nonce == frm.pg.nonce) { + frm.set_value('output', message.output); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/ansible_console/ansible_console.json b/jcloud/jcloud/pagetype/ansible_console/ansible_console.json new file mode 100644 index 0000000..f2ceb10 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_console/ansible_console.json @@ -0,0 +1,91 @@ +{ + "actions": [ + { + "action": "/app/ansible-console-log", + "action_type": "Route", + "label": "Logs" + }, + { + "action": "jcloud.jcloud.pagetype.ansible_console.ansible_console.execute_command", + "action_type": "Server Action", + "hidden": 1, + "label": "Execute" + } + ], + "creation": "2023-11-08 14:10:51.358824", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "inventory_section", + "inventory", + "nonce", + "command", + "output", + "error" + ], + "fields": [ + { + "fieldname": "inventory_section", + "fieldtype": "Section Break" + }, + { + "fieldname": "inventory", + "fieldtype": "Code", + "label": "Inventory", + "max_height": "40px" + }, + { + "fieldname": "command", + "fieldtype": "Code", + "label": "Command", + "max_height": "40px", + "options": "Shell" + }, + { + "fieldname": "nonce", + "fieldtype": "Data", + "hidden": 1, + "label": "Nonce" + }, + { + "fieldname": "output", + "fieldtype": "Table", + "label": "Output", + "options": "Ansible Console Output", + "read_only": 1 + }, + { + "fieldname": "error", + "fieldtype": "Code", + "label": "Error", + "options": "Shell", + "read_only": 1 + } + ], + "hide_toolbar": 1, + "index_web_pages_for_search": 1, + "issingle": 1, + "links": [], + "modified": "2023-11-24 14:46:36.102377", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Ansible Console", + "owner": "Administrator", + "permissions": [ + { + "email": 1, + "print": 1, + "read": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/ansible_console/ansible_console.py b/jcloud/jcloud/pagetype/ansible_console/ansible_console.py new file mode 100644 index 0000000..3d250d2 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_console/ansible_console.py @@ -0,0 +1,180 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import json +import shutil + +import jingrow +from ansible import constants, context +from ansible.executor.task_queue_manager import TaskQueueManager +from ansible.inventory.manager import InventoryManager +from ansible.module_utils.common.collections import ImmutableDict +from ansible.parsing.dataloader import DataLoader +from ansible.playbook.play import Play +from ansible.plugins.callback import CallbackBase +from ansible.vars.manager import VariableManager +from jingrow.model.document import Document +from jingrow.utils import get_timedelta + +from jcloud.utils import reconnect_on_failure + + +class AnsibleConsole(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.ansible_console_output.ansible_console_output import ( + AnsibleConsoleOutput, + ) + + command: DF.Code | None + error: DF.Code | None + inventory: DF.Code | None + nonce: DF.Data | None + output: DF.Table[AnsibleConsoleOutput] + # end: auto-generated types + + def run(self): + jingrow.only_for("System Manager") + try: + ad_hoc = AnsibleAdHoc(sources=self.inventory) + for host in ad_hoc.run(self.command, self.nonce): + self.append("output", host) + except Exception: + self.error = jingrow.get_traceback() + import traceback + + traceback.print_exc() + log = self.as_dict() + log.update({"pagetype": "Ansible Console Log"}) + jingrow.get_pg(log).insert() + jingrow.db.commit() + + +@jingrow.whitelist() +def execute_command(pg): + jingrow.enqueue( + "jcloud.jcloud.pagetype.ansible_console.ansible_console._execute_command", + pg=pg, + timeout=7200, + ) + return pg + + +def _execute_command(pg): + console = jingrow.get_pg(json.loads(pg)) + console.run() + return console.as_dict() + + +class AnsibleCallback(CallbackBase): + def __init__(self, *args, **kwargs): + super(AnsibleCallback, self).__init__(*args, **kwargs) + self.hosts = {} + + def v2_runner_on_ok(self, result, *args, **kwargs): + self.update_task("Success", result) + + def v2_runner_on_failed(self, result, *args, **kwargs): + self.update_task("Failure", result) + + def v2_runner_on_unreachable(self, result): + self.update_task("Unreachable", result) + + @reconnect_on_failure() + def update_task(self, status, result): + host, result = self.parse_result(result) + result.update( + { + "host": host, + "status": status, + } + ) + self.hosts[host] = result + self.publish_update() + + def parse_result(self, result): + host = result._host.get_name() + _result = result._result + return host, jingrow._dict( + { + "output": _result.get("stdout"), + "error": _result.get("stderr"), + "exception": _result.get("msg"), + "exit_code": _result.get("rc"), + "duration": get_timedelta(_result.get("delta", "0:00:00.000000")), + } + ) + + def publish_update(self): + message = {"nonce": self.nonce, "output": list(self.hosts.values())} + jingrow.publish_realtime( + event="ansible_console_update", + pagetype="Ansible Console", + docname="Ansible Console", + user=jingrow.session.user, + message=message, + ) + + +class AnsibleAdHoc: + def __init__(self, sources): + constants.HOST_KEY_CHECKING = False + context.CLIARGS = ImmutableDict( + become_method="sudo", + check=False, + connection="ssh", + extra_vars=[], + remote_user="root", + start_at_task=None, + syntax=False, + verbosity=3, + ) + + self.loader = DataLoader() + self.passwords = dict({}) + + self.inventory = InventoryManager(loader=self.loader, sources=sources) + self.variable_manager = VariableManager(loader=self.loader, inventory=self.inventory) + + self.callback = AnsibleCallback() + + def run(self, command, nonce=None): + self.tasks = [dict(action=dict(module="shell", args=command))] + source = dict( + name="Ansible Play", + hosts="all", + gather_facts="no", + tasks=self.tasks, + ) + + self.play = Play().load( + source, variable_manager=self.variable_manager, loader=self.loader + ) + + self.callback.nonce = nonce + + tqm = TaskQueueManager( + inventory=self.inventory, + variable_manager=self.variable_manager, + loader=self.loader, + passwords=self.passwords, + stdout_callback=self.callback, + forks=16, + ) + + try: + tqm.run(self.play) + finally: + tqm.cleanup() + self.loader.cleanup_all_tmp_files() + + shutil.rmtree(constants.DEFAULT_LOCAL_TMP, True) + + self.callback.publish_update() + return list(self.callback.hosts.values()) diff --git a/jcloud/jcloud/pagetype/ansible_console/test_ansible_console.py b/jcloud/jcloud/pagetype/ansible_console/test_ansible_console.py new file mode 100644 index 0000000..bf9dac7 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_console/test_ansible_console.py @@ -0,0 +1,30 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestAnsibleConsole(JingrowTestCase): + def test_ansible_console_run(self): + console = jingrow.get_pg("Ansible Console") + console.inventory = "localhost," + console.command = "ls" + console.run() + + self.assertEqual(len(console.output), 1) + output = console.output[0] + + self.assertEqual(output.host, "localhost") + self.assertEqual(output.status, "Unreachable") + + def test_ansible_console_run_creates_console_log(self): + count_before = jingrow.db.count("Ansible Console Log") + + console = jingrow.get_pg("Ansible Console") + console.inventory = "localhost," + console.command = "ls" + console.run() + + count_after = jingrow.db.count("Ansible Console Log") + self.assertEqual(count_before + 1, count_after) diff --git a/jcloud/jcloud/pagetype/ansible_console_log/__init__.py b/jcloud/jcloud/pagetype/ansible_console_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/ansible_console_log/ansible_console_log.js b/jcloud/jcloud/pagetype/ansible_console_log/ansible_console_log.js new file mode 100644 index 0000000..3178fd9 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_console_log/ansible_console_log.js @@ -0,0 +1,15 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Ansible Console Log', { + refresh(frm) { + frm.add_custom_button(__('Re-Run in Console'), () => { + window.localStorage.setItem( + 'ansible_console_inventory', + frm.pg.inventory, + ); + window.localStorage.setItem('ansible_console_command', frm.pg.command); + jingrow.set_route('Form', 'Ansible Console'); + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/ansible_console_log/ansible_console_log.json b/jcloud/jcloud/pagetype/ansible_console_log/ansible_console_log.json new file mode 100644 index 0000000..3433b2a --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_console_log/ansible_console_log.json @@ -0,0 +1,71 @@ +{ + "actions": [], + "autoname": "format:Log on {timestamp}", + "creation": "2023-11-08 14:12:56.262141", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "inventory", + "command", + "output", + "error" + ], + "fields": [ + { + "fieldname": "inventory", + "fieldtype": "Code", + "label": "Inventory", + "read_only": 1 + }, + { + "fieldname": "command", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Command", + "read_only": 1 + }, + { + "fieldname": "error", + "fieldtype": "Code", + "label": "Error", + "read_only": 1 + }, + { + "fieldname": "output", + "fieldtype": "Table", + "label": "Output", + "options": "Ansible Console Output", + "read_only": 1 + } + ], + "in_create": 1, + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-11-24 11:52:08.811559", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Ansible Console Log", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/ansible_console_log/ansible_console_log.py b/jcloud/jcloud/pagetype/ansible_console_log/ansible_console_log.py new file mode 100644 index 0000000..1f0746d --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_console_log/ansible_console_log.py @@ -0,0 +1,27 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class AnsibleConsoleLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.ansible_console_output.ansible_console_output import ( + AnsibleConsoleOutput, + ) + + command: DF.Code | None + error: DF.Code | None + inventory: DF.Code | None + output: DF.Table[AnsibleConsoleOutput] + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/ansible_console_log/test_ansible_console_log.py b/jcloud/jcloud/pagetype/ansible_console_log/test_ansible_console_log.py new file mode 100644 index 0000000..f22b24b --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_console_log/test_ansible_console_log.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestAnsibleConsoleLog(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/ansible_console_output/__init__.py b/jcloud/jcloud/pagetype/ansible_console_output/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/ansible_console_output/ansible_console_output.json b/jcloud/jcloud/pagetype/ansible_console_output/ansible_console_output.json new file mode 100644 index 0000000..55047f1 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_console_output/ansible_console_output.json @@ -0,0 +1,77 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2023-11-24 11:04:34.401204", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "host", + "status", + "output", + "column_break_sigr", + "exit_code", + "duration", + "exception" + ], + "fields": [ + { + "columns": 2, + "fieldname": "host", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Host", + "read_only": 1 + }, + { + "columns": 6, + "fieldname": "output", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Output", + "read_only": 1 + }, + { + "fieldname": "exit_code", + "fieldtype": "Int", + "label": "Exit Code", + "read_only": 1 + }, + { + "fieldname": "exception", + "fieldtype": "Code", + "label": "Exception", + "read_only": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Time", + "label": "Duration", + "read_only": 1 + }, + { + "columns": 2, + "fieldname": "status", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Status", + "read_only": 1 + }, + { + "fieldname": "column_break_sigr", + "fieldtype": "Column Break" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-11-24 13:52:32.105261", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Ansible Console Output", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/ansible_console_output/ansible_console_output.py b/jcloud/jcloud/pagetype/ansible_console_output/ansible_console_output.py new file mode 100644 index 0000000..5f8feba --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_console_output/ansible_console_output.py @@ -0,0 +1,29 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class AnsibleConsoleOutput(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + duration: DF.Time | None + exception: DF.Code | None + exit_code: DF.Int + host: DF.Data | None + name: DF.Int | None + output: DF.Code | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + status: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/ansible_play/__init__.py b/jcloud/jcloud/pagetype/ansible_play/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/ansible_play/ansible_play.js b/jcloud/jcloud/pagetype/ansible_play/ansible_play.js new file mode 100644 index 0000000..c5e0bf4 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_play/ansible_play.js @@ -0,0 +1,20 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Ansible Play', { + refresh: function (frm) { + jingrow.realtime.on('ansible_play_progress', (data) => { + if (data.progress && data.play === frm.pg.name) { + const progress_title = __('Ansible Play Progress'); + frm.dashboard.show_progress( + progress_title, + (data.progress / data.total) * 100, + `Ansible Play Progress (${data.progress} tasks completed out of ${data.total})`, + ); + if (data.progress === data.total) { + frm.dashboard.hide_progress(progress_title); + } + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/ansible_play/ansible_play.json b/jcloud/jcloud/pagetype/ansible_play/ansible_play.json new file mode 100644 index 0000000..70a20a0 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_play/ansible_play.json @@ -0,0 +1,209 @@ +{ + "actions": [], + "creation": "2020-09-29 20:35:02.147164", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "play", + "playbook", + "column_break_3", + "server_type", + "server", + "section_break_7", + "start", + "end", + "column_break_10", + "duration", + "statistics_section", + "ok", + "failures", + "changed", + "unreachable", + "column_break_14", + "skipped", + "rescued", + "ignored", + "section_break_5", + "variables" + ], + "fields": [ + { + "fieldname": "server_type", + "fieldtype": "Link", + "label": "Server Type", + "options": "PageType", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "server", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Server", + "options": "server_type", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "playbook", + "fieldtype": "Data", + "label": "Playbook", + "read_only": 1, + "reqd": 1 + }, + { + "default": "{}", + "fieldname": "variables", + "fieldtype": "Code", + "label": "Variables", + "options": "JSON", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "section_break_5", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "play", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Play", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "statistics_section", + "fieldtype": "Section Break", + "label": "Agreagate Statistics" + }, + { + "fieldname": "changed", + "fieldtype": "Int", + "label": "Changed", + "read_only": 1 + }, + { + "fieldname": "unreachable", + "fieldtype": "Int", + "label": "Unreachable", + "read_only": 1 + }, + { + "fieldname": "column_break_14", + "fieldtype": "Column Break" + }, + { + "fieldname": "skipped", + "fieldtype": "Int", + "label": "Skipped", + "read_only": 1 + }, + { + "fieldname": "rescued", + "fieldtype": "Int", + "label": "Rescued", + "read_only": 1 + }, + { + "fieldname": "ignored", + "fieldtype": "Int", + "label": "Ignored", + "read_only": 1 + }, + { + "fieldname": "ok", + "fieldtype": "Int", + "label": "OK", + "read_only": 1 + }, + { + "fieldname": "failures", + "fieldtype": "Int", + "label": "Failures", + "read_only": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "section_break_7", + "fieldtype": "Section Break" + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "fieldname": "column_break_10", + "fieldtype": "Column Break" + }, + { + "fieldname": "duration", + "fieldtype": "Time", + "label": "Duration", + "read_only": 1 + } + ], + "in_create": 1, + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-11-01 18:41:41.302964", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Ansible Play", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "play", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/ansible_play/ansible_play.py b/jcloud/jcloud/pagetype/ansible_play/ansible_play.py new file mode 100644 index 0000000..0373d79 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_play/ansible_play.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.document import Document +from jingrow.utils import cstr + +from jcloud.api.client import is_owned_by_team +from jcloud.utils import poly_get_pagetype + + +class AnsiblePlay(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + changed: DF.Int + duration: DF.Time | None + end: DF.Datetime | None + failures: DF.Int + ignored: DF.Int + ok: DF.Int + play: DF.Data + playbook: DF.Data + rescued: DF.Int + server: DF.DynamicLink + server_type: DF.Link + skipped: DF.Int + start: DF.Datetime | None + status: DF.Literal["Pending", "Running", "Success", "Failure"] + unreachable: DF.Int + variables: DF.Code + # end: auto-generated types + + dashboard_fields = [ + "name", + "creation", + "status", + "start", + "end", + "duration", + "server", + "play", + ] + + @staticmethod + def get_list_query(query, filters=None, **list_args): + server = cstr(filters.get("server", "")) + + if not server: + jingrow.throw("不允许", jingrow.PermissionError) + + if server.startswith("["): + servers = jingrow.parse_json(server.replace("'", '"'))[1] + + for server in servers: + pagetype = poly_get_pagetype(["Server", "Database Server"], server) + is_owned_by_team(pagetype, server, raise_exception=True) + else: + pagetype = poly_get_pagetype(["Server", "Database Server"], server) + is_owned_by_team(pagetype, server, raise_exception=True) + + results = query.run(as_dict=1) + return results + + def get_pg(self, pg): + pg["tasks"] = jingrow.get_all( + "Ansible Task", + filters={"play": self.name}, + fields=["task", "status", "start", "end", "duration"], + order_by="creation", + ) + + return pg + + def on_trash(self): + jingrow.db.delete("Ansible Task", {"play": self.name}) diff --git a/jcloud/jcloud/pagetype/ansible_play/ansible_play_dashboard.py b/jcloud/jcloud/pagetype/ansible_play/ansible_play_dashboard.py new file mode 100644 index 0000000..719343e --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_play/ansible_play_dashboard.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow import _ + + +def get_data(): + return { + "fieldname": "play", + "transactions": [{"label": _("Related Documents"), "items": ["Ansible Task"]}], + } diff --git a/jcloud/jcloud/pagetype/ansible_play/test_ansible_play.py b/jcloud/jcloud/pagetype/ansible_play/test_ansible_play.py new file mode 100644 index 0000000..361a2fa --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_play/test_ansible_play.py @@ -0,0 +1,41 @@ +# Copyright (c) 2020, JINGROW +# See license.txt + +from __future__ import annotations + +import unittest +from typing import TYPE_CHECKING + +import jingrow + +if TYPE_CHECKING: + from jingrow.types.DF import Data + + +def create_test_ansible_play( + play: str = "", + playbook: str = "", + server_type: str = "Server", + server: Data | None = "", + vars: dict | None = None, + status: str = "Success", +): + if vars is None: + vars = {} + play = jingrow.get_pg( + { + "pagetype": "Ansible Play", + "play": play, + "playbook": playbook, + "server_type": server_type, + "server": server, + "variable": vars, + } + ).insert() + play.db_set("status", status) + play.reload() + return play + + +class TestAnsiblePlay(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/ansible_task/__init__.py b/jcloud/jcloud/pagetype/ansible_task/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/ansible_task/ansible_task.js b/jcloud/jcloud/pagetype/ansible_task/ansible_task.js new file mode 100644 index 0000000..01f7610 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_task/ansible_task.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Ansible Task', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/ansible_task/ansible_task.json b/jcloud/jcloud/pagetype/ansible_task/ansible_task.json new file mode 100644 index 0000000..8066c4c --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_task/ansible_task.json @@ -0,0 +1,167 @@ +{ + "actions": [], + "creation": "2020-09-29 20:45:05.363968", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "task", + "status", + "column_break_5", + "role", + "play", + "job_id", + "section_break_7", + "start", + "end", + "column_break_10", + "duration", + "section_break_9", + "output", + "error", + "exception", + "result" + ], + "fields": [ + { + "fieldname": "task", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Task", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "role", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Role", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "play", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Ansible Play", + "options": "Ansible Play", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_5", + "fieldtype": "Column Break" + }, + { + "fieldname": "exception", + "fieldtype": "Code", + "label": "Exception", + "read_only": 1 + }, + { + "fieldname": "error", + "fieldtype": "Code", + "label": "Error", + "read_only": 1 + }, + { + "fieldname": "output", + "fieldtype": "Code", + "label": "Output", + "read_only": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure\nSkipped\nUnreachable", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "section_break_9", + "fieldtype": "Section Break" + }, + { + "fieldname": "result", + "fieldtype": "Code", + "label": "Result", + "read_only": 1 + }, + { + "fieldname": "job_id", + "fieldtype": "Data", + "label": "Job ID", + "read_only": 1 + }, + { + "fieldname": "section_break_7", + "fieldtype": "Section Break" + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "fieldname": "column_break_10", + "fieldtype": "Column Break" + }, + { + "fieldname": "duration", + "fieldtype": "Time", + "label": "Duration", + "read_only": 1 + } + ], + "in_create": 1, + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-11-01 18:42:20.359594", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Ansible Task", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "task", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/ansible_task/ansible_task.py b/jcloud/jcloud/pagetype/ansible_task/ansible_task.py new file mode 100644 index 0000000..293b9db --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_task/ansible_task.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.document import Document + + +class AnsibleTask(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + duration: DF.Time | None + end: DF.Datetime | None + error: DF.Code | None + exception: DF.Code | None + job_id: DF.Data | None + output: DF.Code | None + play: DF.Link + result: DF.Code | None + role: DF.Data + start: DF.Datetime | None + status: DF.Literal[ + "Pending", "Running", "Success", "Failure", "Skipped", "Unreachable" + ] + task: DF.Data + # end: auto-generated types + + def on_update(self): + jingrow.publish_realtime( + "ansible_play_update", + pagetype="Ansible Play", + docname=self.play, + message={"id": self.play}, + ) diff --git a/jcloud/jcloud/pagetype/ansible_task/test_ansible_task.py b/jcloud/jcloud/pagetype/ansible_task/test_ansible_task.py new file mode 100644 index 0000000..4d95309 --- /dev/null +++ b/jcloud/jcloud/pagetype/ansible_task/test_ansible_task.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +import unittest + +import jingrow + + +def create_test_ansible_play_task( + play: str = "", + role: str = "", + task: str = "", + status: str = "Success", + output: str = "", +): + play = jingrow.get_pg( + { + "pagetype": "Ansible Task", + "play": play, + "role": role, + "task": task, + "status": status, + "output": output, + } + ).insert() + # play.db_set("status", status) + play.reload() + return play + + +class TestAnsibleTask(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/app/__init__.py b/jcloud/jcloud/pagetype/app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/app/app.js b/jcloud/jcloud/pagetype/app/app.js new file mode 100644 index 0000000..b0d283c --- /dev/null +++ b/jcloud/jcloud/pagetype/app/app.js @@ -0,0 +1,7 @@ +// Copyright (c) 2019, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('App', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/app/app.json b/jcloud/jcloud/pagetype/app/app.json new file mode 100644 index 0000000..da7f023 --- /dev/null +++ b/jcloud/jcloud/pagetype/app/app.json @@ -0,0 +1,162 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "Prompt", + "creation": "2019-12-09 14:45:21.955245", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "title", + "enabled", + "column_break_5", + "jingrow", + "public", + "section_break_9", + "url", + "scrubbed", + "branch", + "repo_owner", + "repo", + "installation", + "team", + "enable_auto_deploy", + "skip_review" + ], + "fields": [ + { + "fieldname": "branch", + "fieldtype": "Data", + "hidden": 1, + "label": "Branch", + "set_only_once": 1 + }, + { + "fieldname": "scrubbed", + "fieldtype": "Data", + "hidden": 1, + "label": "Scrubbed Name", + "set_only_once": 1 + }, + { + "fieldname": "url", + "fieldtype": "Data", + "hidden": 1, + "label": "Repository URL", + "set_only_once": 1 + }, + { + "fieldname": "repo_owner", + "fieldtype": "Data", + "hidden": 1, + "label": "Repository Owner", + "set_only_once": 1 + }, + { + "default": "0", + "fieldname": "jingrow", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Jingrow", + "set_only_once": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "hidden": 1, + "label": "Team", + "options": "Team" + }, + { + "fieldname": "installation", + "fieldtype": "Data", + "hidden": 1, + "label": "Installation" + }, + { + "default": "0", + "fieldname": "public", + "fieldtype": "Check", + "hidden": 1, + "label": "Public" + }, + { + "fieldname": "repo", + "fieldtype": "Data", + "hidden": 1, + "label": "Repository" + }, + { + "fieldname": "column_break_5", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_9", + "fieldtype": "Section Break" + }, + { + "default": "0", + "fieldname": "enable_auto_deploy", + "fieldtype": "Check", + "hidden": 1, + "label": "Enable Auto Deploy" + }, + { + "default": "0", + "fieldname": "skip_review", + "fieldtype": "Check", + "hidden": 1, + "label": "Skip Review" + }, + { + "default": "1", + "fieldname": "enabled", + "fieldtype": "Check", + "hidden": 1, + "label": "Enabled" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Title", + "reqd": 1 + } + ], + "links": [], + "modified": "2021-02-15 10:11:04.324983", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "App", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin" + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member" + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "title", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app/app.py b/jcloud/jcloud/pagetype/app/app.py new file mode 100644 index 0000000..4a6526c --- /dev/null +++ b/jcloud/jcloud/pagetype/app/app.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import typing + +import rq +import jingrow +from jingrow.model.document import Document +from jcloud.utils.jobs import has_job_timeout_exceeded + +if typing.TYPE_CHECKING: + from jcloud.jcloud.pagetype.app_source.app_source import AppSource + + +class App(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + branch: DF.Data | None + enable_auto_deploy: DF.Check + enabled: DF.Check + jingrow: DF.Check + installation: DF.Data | None + public: DF.Check + repo: DF.Data | None + repo_owner: DF.Data | None + scrubbed: DF.Data | None + skip_review: DF.Check + team: DF.Link | None + title: DF.Data + url: DF.Data | None + # end: auto-generated types + + dashboard_fields = ["title"] + + def add_source( + self, + version, + repository_url, + branch, + team=None, + github_installation_id=None, + public=False, + repository_owner=None, + ) -> "AppSource": + existing_source = jingrow.get_all( + "App Source", + {"app": self.name, "repository_url": repository_url, "branch": branch, "team": team}, + limit=1, + ) + if existing_source: + source = jingrow.get_pg("App Source", existing_source[0].name) + versions = set(version.version for version in source.versions) + if version not in versions: + source.add_version(version) + else: + # Add new App Source + source = jingrow.get_pg( + { + "pagetype": "App Source", + "app": self.name, + "versions": [{"version": version}], + "repository_url": repository_url, + "branch": branch, + "team": team, + "github_installation_id": github_installation_id, + "public": public, + "repository_owner": repository_owner, + } + ).insert() + return source + + def before_save(self): + self.jingrow = self.name == "jingrow" + + +def new_app(name, title): + app: "App" = jingrow.get_pg({"pagetype": "App", "name": name, "title": title}).insert() + return app + + +def poll_new_releases(): + for source in jingrow.get_all( + "App Source", + {"enabled": True, "last_github_poll_failed": False}, + order_by="last_synced", + ): + if has_job_timeout_exceeded(): + return + try: + source = jingrow.get_pg("App Source", source.name) + source.create_release() + jingrow.db.commit() + except rq.timeouts.JobTimeoutException: + jingrow.db.rollback() + return + except Exception: + jingrow.db.rollback() diff --git a/jcloud/jcloud/pagetype/app/app_dashboard.py b/jcloud/jcloud/pagetype/app/app_dashboard.py new file mode 100644 index 0000000..cd2d73b --- /dev/null +++ b/jcloud/jcloud/pagetype/app/app_dashboard.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +def get_data(): + return { + "fieldname": "app", + "transactions": [ + {"items": ["Bench", "Site"]}, + {"items": ["App Source", "Release Group", "App Release"]}, + ], + } diff --git a/jcloud/jcloud/pagetype/app/test_app.py b/jcloud/jcloud/pagetype/app/test_app.py new file mode 100644 index 0000000..db58ee2 --- /dev/null +++ b/jcloud/jcloud/pagetype/app/test_app.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# See license.txt + + +import unittest +from typing import TYPE_CHECKING + +import jingrow + +from jcloud.jcloud.pagetype.team.test_team import create_test_team + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.app.app import App + + +def create_test_app(name: str = "jingrow", title: str = "Jingrow Framework") -> "App": + return jingrow.get_pg({"pagetype": "App", "name": name, "title": title}).insert( + ignore_if_duplicate=True + ) + + +class TestApp(unittest.TestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_create_jingrow_app(self): + app = create_test_app("jingrow", "Jingrow Framework") + self.assertEqual(app.jingrow, True) + + source = app.add_source( + "Version 12", + "http://git.jingrow.com:3000/jingrow/jingrow", + "version-12", + create_test_team().name, + ) + self.assertEqual(source.repository, "jingrow") + self.assertEqual(source.repository_owner, "jingrow") + + self.assertEqual(len(source.versions), 1) + self.assertEqual(source.versions[0].version, "Version 12") + + def test_create_non_jingrow_app(self): + app = create_test_app("jerp", "JERP") + self.assertEqual(app.jingrow, False) + + source = app.add_source( + "Version 12", + "http://git.jingrow.com:3000/jingrow/jerp", + "version-12", + create_test_team().name, + ) + self.assertEqual(source.repository, "jerp") + self.assertEqual(source.repository_owner, "jingrow") + + self.assertEqual(len(source.versions), 1) + self.assertEqual(source.versions[0].version, "Version 12") + + def test_create_app_with_multiple_sources(self): + app = create_test_app("jingrow", "Jingrow Framework") + + source_1 = app.add_source( + "Version 12", + "http://git.jingrow.com:3000/jingrow/jingrow", + "version-12", + create_test_team().name, + ) + source_2 = app.add_source( + "Version 13", + "http://git.jingrow.com:3000/jingrow/jingrow", + "version-13", + create_test_team().name, + ) + self.assertEqual(source_1.branch, "version-12") + self.assertEqual(len(source_1.versions), 1) + self.assertEqual(source_1.versions[0].version, "Version 12") + + self.assertEqual(source_2.branch, "version-13") + self.assertEqual(len(source_2.versions), 1) + self.assertEqual(source_2.versions[0].version, "Version 13") + + def test_create_app_with_one_source_multiple_versions(self): + app = create_test_app("jerp_documentation", "JERP Documentation") + team_name = create_test_team().name + + source_1 = app.add_source( + "Version 12", + "http://git.jingrow.com:3000/jingrow/jerp_documentation", + "master", + team_name, + ) + self.assertEqual(source_1.branch, "master") + self.assertEqual(len(source_1.versions), 1) + self.assertEqual(source_1.versions[0].version, "Version 12") + + source_2 = app.add_source( + "Version 13", + "http://git.jingrow.com:3000/jingrow/jerp_documentation", + "master", + team_name, + ) + + self.assertEqual(source_1.name, source_2.name) + self.assertEqual(len(source_2.versions), 2) + self.assertEqual(source_2.versions[0].version, "Version 12") + self.assertEqual(source_2.versions[1].version, "Version 13") + + def test_create_app_add_second_source_after_insert(self): + app = create_test_app("jingrow", "Jingrow Framework") + source_1 = app.add_source( + "Version 12", + "http://git.jingrow.com:3000/jingrow/jingrow", + "version-12", + create_test_team().name, + ) + self.assertEqual(source_1.branch, "version-12") + self.assertEqual(len(source_1.versions), 1) + self.assertEqual(source_1.versions[0].version, "Version 12") + + source_2 = app.add_source( + "Version 13", + "http://git.jingrow.com:3000/jingrow/jingrow", + "version-13", + create_test_team().name, + ) + self.assertEqual(source_1.branch, "version-12") + self.assertEqual(len(source_1.versions), 1) + self.assertEqual(source_1.versions[0].version, "Version 12") + + self.assertEqual(source_2.branch, "version-13") + self.assertEqual(len(source_2.versions), 1) + self.assertEqual(source_2.versions[0].version, "Version 13") diff --git a/jcloud/jcloud/pagetype/app_group/__init__.py b/jcloud/jcloud/pagetype/app_group/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/app_group/app_group.json b/jcloud/jcloud/pagetype/app_group/app_group.json new file mode 100644 index 0000000..1435328 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_group/app_group.json @@ -0,0 +1,46 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-09-03 15:07:30.256352", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "app", + "column_break_jvou", + "app_title" + ], + "fields": [ + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App", + "options": "App", + "reqd": 1 + }, + { + "fieldname": "column_break_jvou", + "fieldtype": "Column Break" + }, + { + "fetch_from": "app.title", + "fieldname": "app_title", + "fieldtype": "Read Only", + "in_list_view": 1, + "label": "App Title" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-09-04 12:02:25.463128", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "App Group", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app_group/app_group.py b/jcloud/jcloud/pagetype/app_group/app_group.py new file mode 100644 index 0000000..d22238b --- /dev/null +++ b/jcloud/jcloud/pagetype/app_group/app_group.py @@ -0,0 +1,26 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +# import jingrow +from jingrow.model.document import Document + + +class AppGroup(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + app_title: DF.ReadOnly | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/app_patch/__init__.py b/jcloud/jcloud/pagetype/app_patch/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/app_patch/app_patch.js b/jcloud/jcloud/pagetype/app_patch/app_patch.js new file mode 100644 index 0000000..ed58ac7 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_patch/app_patch.js @@ -0,0 +1,25 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('App Patch', { + refresh(frm) { + const custom_buttons = [ + [__('Apply Patch'), 'apply_patch', frm.pg.status !== 'In Process'], + [__('Revert Patch'), 'revert_patch', frm.pg.status !== 'In Process'], + [__('Delete Patch'), 'delete_patch', frm.pg.status !== 'Not Applied'], + ]; + + for (const [label, method, show] of custom_buttons) { + if (!show) { + continue; + } + + const handler = () => + frm + .call(method) + .then((_) => frm.refresh()) + .catch((_) => frm.refresh()); + frm.add_custom_button(label, handler, __('Actions')); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/app_patch/app_patch.json b/jcloud/jcloud/pagetype/app_patch/app_patch.json new file mode 100644 index 0000000..82b4a95 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_patch/app_patch.json @@ -0,0 +1,167 @@ +{ + "actions": [], + "autoname": "autoincrement", + "beta": 1, + "creation": "2024-02-22 10:31:03.366179", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "app", + "filename", + "status", + "column_break_vedh", + "app_release", + "url", + "build_assets", + "meta_section", + "bench", + "team", + "column_break_llye", + "group", + "section_break_fqnr", + "patch" + ], + "fields": [ + { + "fieldname": "bench", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Bench", + "options": "Bench", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "build_assets", + "fieldtype": "Check", + "label": "Build assets after applying patch" + }, + { + "fieldname": "column_break_vedh", + "fieldtype": "Column Break" + }, + { + "fieldname": "app_release", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App Release", + "options": "App Release", + "reqd": 1 + }, + { + "fieldname": "section_break_fqnr", + "fieldtype": "Section Break", + "label": "Patch" + }, + { + "fieldname": "patch", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Patch", + "reqd": 1 + }, + { + "fetch_from": "bench.group", + "fieldname": "group", + "fieldtype": "Link", + "label": "Release Group", + "options": "Release Group", + "read_only": 1, + "reqd": 1 + }, + { + "depends_on": "eval: pg.url", + "fieldname": "url", + "fieldtype": "Data", + "label": "Patch URL" + }, + { + "fieldname": "filename", + "fieldtype": "Data", + "in_list_view": 1, + "label": "File Name", + "reqd": 1 + }, + { + "fieldname": "app", + "fieldtype": "Link", + "label": "App", + "options": "App", + "reqd": 1 + }, + { + "default": "Not Applied", + "fieldname": "status", + "fieldtype": "Select", + "label": "Status", + "options": "Not Applied\nIn Process\nFailed\nApplied" + }, + { + "fieldname": "meta_section", + "fieldtype": "Section Break", + "label": "Meta" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "column_break_llye", + "fieldtype": "Column Break" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-07-29 13:42:12.525421", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "App Patch", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "filename" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app_patch/app_patch.py b/jcloud/jcloud/pagetype/app_patch/app_patch.py new file mode 100644 index 0000000..74bedb5 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_patch/app_patch.py @@ -0,0 +1,213 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import json +import typing +from typing import Optional, TypedDict + +import jingrow +import requests +from jingrow.model.document import Document + +from jcloud.agent import Agent +from jcloud.api.client import dashboard_whitelist + +PatchConfig = TypedDict( + "PatchConfig", + { + "patch": Optional[str], + "filename": str, + "patch_url": str, + "build_assets": bool, + "patch_bench": Optional[str], + "patch_all_benches": bool, + }, +) + +AgentPatchConfig = TypedDict( + "AgentPatchConfig", + { + "patch": str, + "filename": str, + "build_assets": bool, + "revert": bool, + }, +) + +if typing.TYPE_CHECKING: + from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob + + +class AppPatch(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + app_release: DF.Link + bench: DF.Link + build_assets: DF.Check + filename: DF.Data + group: DF.Link + name: DF.Int | None + patch: DF.Code + status: DF.Literal["Not Applied", "In Process", "Failed", "Applied"] + team: DF.Link + url: DF.Data | None + # end: auto-generated types + + dashboard_fields = [ + "name", + "app", + "app_release", + "patch", + "filename", + "bench", + "group", + "build_assets", + "url", + "status", + ] + + def validate(self): + self.validate_bench() + + def validate_bench(self): + if jingrow.get_value("Bench", self.bench, "status") == "Active": + return + jingrow.throw(f"Bench {self.bench} is not Active, patch cannot be applied") + + def before_insert(self): + patches = jingrow.get_all( + "App Patch", + fields=["name", "filename"], + filters={"bench": self.bench, "patch": self.patch}, + ) + if not len(patches): + return + + filename = patches[0].get("filename") + jingrow.throw(f"Patch already exists for {self.bench} by the filename {filename}") + + def after_insert(self): + self.apply_patch() + + @dashboard_whitelist() + def delete(self): + super().delete() + + @dashboard_whitelist() + def apply_patch(self): + self.patch_app(revert=False) + + @dashboard_whitelist() + def revert_patch(self): + self.patch_app(revert=True) + + @jingrow.whitelist() + def delete_patch(self): + if self.status != "Not Applied": + jingrow.throw( + f"Cannot delete patch if status is not 'Not Applied'. Current status is '{self.status}'" + ) + + self.delete() + + def patch_app(self, revert: bool): + server = jingrow.db.get_value("Bench", self.bench, "server") + data = dict( + patch=self.patch, + filename=self.filename, + build_assets=self.build_assets, + revert=revert, + ) + Agent(server).patch_app(self, data) + self.status = "In Process" + self.save() + + @staticmethod + def process_patch_app(agent_job: "AgentJob"): + request_data = json.loads(agent_job.request_data) + app_patch = jingrow.get_pg("App Patch", agent_job.reference_name, for_update=True) + + revert = request_data.get("revert") + if agent_job.status == "Failure" and revert: + app_patch.status = "Applied" + elif agent_job.status == "Failure" and not revert: + app_patch.status = "Failed" + elif agent_job.status == "Success" and revert: + app_patch.status = "Not Applied" + elif agent_job.status == "Success" and not revert: + app_patch.status = "Applied" + else: + app_patch.status = "In Process" + + app_patch.save() + + @jingrow.whitelist() + def revert_all_patches(self): + # TODO: Agent job: git reset RELEASE_COMMIT --hard + pass + + +def create_app_patch( + release_group: str, + app: str, + team: str, + patch_config: PatchConfig, +) -> list[str]: + patch = get_patch(patch_config) + benches = get_benches(release_group, patch_config) + patches = [] + + for bench in benches: + pg_dict = dict( + pagetype="App Patch", + patch=patch, + bench=bench, + group=release_group, + app=app, + team=team, + app_release=get_app_release(bench, app), + url=patch_config.get("patch_url"), + filename=patch_config.get("filename"), + build_assets=patch_config.get("build_assets"), + ) + + app_patch: AppPatch = jingrow.get_pg(pg_dict) + app_patch.insert() + patches.append(app_patch.name) + + return patches + + +def get_patch(patch_config: PatchConfig) -> str: + if patch := patch_config.get("patch"): + return patch + + patch_url = patch_config.get("patch_url") + return requests.get(patch_url).text + + +def get_benches(release_group: str, patch_config: PatchConfig) -> list[str]: + if not patch_config.get("patch_all_benches"): + return [patch_config["patch_bench"]] + + return jingrow.get_all( + "Bench", + filters={"status": "Active", "group": release_group}, + pluck="name", + ) + + +def get_app_release(bench: str, app: str) -> str: + return jingrow.get_all( + "Bench App", + fields=["release"], + filters={"parent": bench, "app": app}, + pluck="release", + )[0] diff --git a/jcloud/jcloud/pagetype/app_patch/test_app_patch.py b/jcloud/jcloud/pagetype/app_patch/test_app_patch.py new file mode 100644 index 0000000..bb12bae --- /dev/null +++ b/jcloud/jcloud/pagetype/app_patch/test_app_patch.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestAppPatch(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/app_release/__init__.py b/jcloud/jcloud/pagetype/app_release/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/app_release/app_release.js b/jcloud/jcloud/pagetype/app_release/app_release.js new file mode 100644 index 0000000..679ea3c --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release/app_release.js @@ -0,0 +1,44 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('App Release', { + refresh: function (frm) { + [ + [__('Clone'), 'clone', true, !frm.pg.cloned], + [__('Cleanup'), 'cleanup', true, frm.pg.cloned], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + frm.add_custom_button(__('View'), () => { + window.open(frm.pg.code_server_url); + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/app_release/app_release.json b/jcloud/jcloud/pagetype/app_release/app_release.json new file mode 100644 index 0000000..492193a --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release/app_release.json @@ -0,0 +1,236 @@ +{ + "actions": [], + "creation": "2020-01-13 15:39:20.221775", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "app", + "source", + "team", + "public", + "cloned", + "column_break_3", + "hash", + "author", + "timestamp", + "message", + "validation_section", + "invalid_release", + "invalidation_reason", + "clone_section", + "clone_directory", + "column_break_9", + "code_server_url", + "section_break_26", + "output", + "status" + ], + "fields": [ + { + "fieldname": "hash", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Hash", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "message", + "fieldtype": "Code", + "label": "Message", + "read_only": 1 + }, + { + "fieldname": "author", + "fieldtype": "Data", + "label": "Author", + "read_only": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "code_server_url", + "fieldtype": "Text", + "label": "Code Server URL", + "read_only": 1 + }, + { + "fieldname": "output", + "fieldtype": "Code", + "label": "Output", + "read_only": 1 + }, + { + "fieldname": "section_break_26", + "fieldtype": "Section Break" + }, + { + "fieldname": "source", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Source", + "options": "App Source", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "column_break_9", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "cloned", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Cloned", + "read_only": 1 + }, + { + "fieldname": "clone_directory", + "fieldtype": "Text", + "label": "Clone Directory", + "read_only": 1 + }, + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "App", + "options": "App", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "clone_section", + "fieldtype": "Section Break", + "label": "Clone" + }, + { + "fetch_from": "source.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1, + "reqd": 1 + }, + { + "default": "0", + "fetch_from": "source.public", + "fieldname": "public", + "fieldtype": "Check", + "label": "Public", + "read_only": 1 + }, + { + "default": "Draft", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Draft\nApproved\nAwaiting Approval\nRejected" + }, + { + "fieldname": "timestamp", + "fieldtype": "Datetime", + "label": "Timestamp", + "read_only": 1 + }, + { + "fieldname": "validation_section", + "fieldtype": "Section Break", + "label": "Validation" + }, + { + "default": "0", + "depends_on": "eval:pg.invalid_release", + "description": "A release is invalid if it fails validations checks. For instance if it has syntax errors.", + "fieldname": "invalid_release", + "fieldtype": "Check", + "label": "Invalid Release" + }, + { + "depends_on": "eval:pg.invalid_release", + "fieldname": "invalidation_reason", + "fieldtype": "Code", + "label": "Invalidation Reason" + } + ], + "in_create": 1, + "links": [ + { + "group": "Connections", + "link_pagetype": "Bench", + "link_fieldname": "release" + }, + { + "group": "Connections", + "link_pagetype": "Deploy Candidate", + "link_fieldname": "release" + }, + { + "group": "Sources", + "link_pagetype": "App Release Difference", + "link_fieldname": "source_release" + }, + { + "group": "Destinations", + "link_pagetype": "App Release Difference", + "link_fieldname": "destination_release" + }, + { + "group": "Sources", + "link_pagetype": "Deploy Candidate Difference", + "link_fieldname": "source_release" + }, + { + "group": "Destinations", + "link_pagetype": "Deploy Candidate Difference", + "link_fieldname": "destination_release" + } + ], + "modified": "2024-05-09 11:30:20.907001", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "App Release", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app_release/app_release.py b/jcloud/jcloud/pagetype/app_release/app_release.py new file mode 100644 index 0000000..69d42cb --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release/app_release.py @@ -0,0 +1,541 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +import os +import shlex +import shutil +import subprocess +from datetime import datetime +from typing import Optional, TypedDict + +import jingrow +from jingrow.model.document import Document + +from jcloud.api.github import get_access_token +from jcloud.jcloud.pagetype.app_source.app_source import AppSource +from jcloud.utils import log_error + + +class AppReleaseDict(TypedDict): + name: str + source: str + hash: str + cloned: int + clone_directory: str + timestamp: Optional[datetime] # noqa + creation: datetime + + +class AppReleasePair(TypedDict): + old: AppReleaseDict + new: AppReleaseDict + + +class AppRelease(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + author: DF.Data | None # noqa + clone_directory: DF.Text | None # noqa + cloned: DF.Check + code_server_url: DF.Text | None # noqa + hash: DF.Data + invalid_release: DF.Check + invalidation_reason: DF.Code | None # noqa + message: DF.Code | None # noqa + output: DF.Code | None # noqa + public: DF.Check + source: DF.Link + status: DF.Literal["Draft", "Approved", "Awaiting Approval", "Rejected"] + team: DF.Link + timestamp: DF.Datetime | None # noqa + # end: auto-generated types + + dashboard_fields = ["app", "source", "message", "hash", "author", "status"] # noqa + + @staticmethod + def get_list_query(query, filters=None, **list_args): + app_release = jingrow.qb.PageType("App Release") + release_approve_request = jingrow.qb.PageType("App Release Approval Request") + + # Subquery to get the latest screening_status for each app_release + latest_approval_request = ( + jingrow.qb.from_(release_approve_request) + .select(release_approve_request.screening_status) + .where(release_approve_request.app_release == app_release.name) + .orderby(release_approve_request.creation, order=jingrow.qb.terms.Order.desc) + .limit(1) + ) + + # Subquery to get the latest name for each app_release + approval_request_name = ( + jingrow.qb.from_(release_approve_request) + .select(release_approve_request.name) + .where(release_approve_request.app_release == app_release.name) + .orderby(release_approve_request.creation, order=jingrow.qb.terms.Order.desc) + .limit(1) + ) + + # Main query that selects app_release fields and the latest screening_status and name + query = query.select( + app_release.name, + latest_approval_request.as_("screening_status"), + approval_request_name.as_("approval_request_name"), + ) + + return query # noqa + + def validate(self): + if not self.clone_directory: + self.set_clone_directory() + + def before_save(self): + apps = jingrow.get_all("Featured App", {"parent": "Marketplace Settings"}, pluck="app") + teams = jingrow.get_all("Auto Release Team", {"parent": "Marketplace Settings"}, pluck="team") + if self.team in teams or self.app in apps: + self.status = "Approved" + + def after_insert(self): + self.create_release_differences() + self.auto_deploy() + + def get_source(self) -> AppSource: + """Return the `App Source` associated with this `App Release`""" + return jingrow.get_pg("App Source", self.source) + + def get_commit_link(self) -> str: + """Return the commit URL for this app release""" + return f"{self.get_source().repository_url}/commit/{self.hash}" + + @jingrow.whitelist() + def clone(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_clone") + + def _clone(self, force: bool = False): + if self.cloned and not force: + return + + self._set_prepared_clone_directory(self.cloned and force) + self._set_code_server_url() + self._clone_repo() + self.cloned = True + self.validate_repo() + self.save(ignore_permissions=True) + + def validate_repo(self): + if self.invalid_release or not self.clone_directory or not os.path.isdir(self.clone_directory): + return + + if (syntax_error := check_python_syntax(self.clone_directory)) or ( + syntax_error := check_pyproject_syntax(self.clone_directory) + ): + self.set_invalid(syntax_error) + + def set_invalid(self, reason: str): + self.invalid_release = True + self.invalidation_reason = reason + + def run(self, command): + try: + return run(command, self.clone_directory) + except Exception as e: + self.cleanup() + log_error( + "App Release Command Exception", + command=command, + output=e.output.decode(), + pg=self, + ) + raise e + + def set_clone_directory(self): + clone_directory = jingrow.db.get_single_value("Jcloud Settings", "clone_directory") + self.clone_directory = os.path.join(clone_directory, self.app, self.source, self.hash[:10]) + + def _set_prepared_clone_directory(self, delete_if_exists: bool = False): + self.clone_directory = get_prepared_clone_directory( + self.app, + self.source, + self.hash, + delete_if_exists, + ) + + def _set_code_server_url(self) -> None: + code_server = jingrow.db.get_single_value("Jcloud Settings", "code_server") + code_server_url = ( + f"{code_server}/?folder=/home/coder/project/{self.app}/{self.source}/{self.hash[:10]}" + ) + self.code_server_url = code_server_url + + def _clone_repo(self): + source: "AppSource" = jingrow.get_pg("App Source", self.source) + url = source.get_repo_url() + + self.output = "" + self.output += self.run("git init") + self.output += self.run(f"git checkout -B {source.branch}") + origin_exists = self.run("git remote").strip() == "origin" + if origin_exists: + self.output += self.run(f"git remote set-url origin {url}") + else: + self.output += self.run(f"git remote add origin {url}") + self.output += self.run("git config credential.helper ''") + + try: + self.output += self.run(f"git fetch --depth 1 origin {self.hash}") + except subprocess.CalledProcessError as e: + stdout = e.stdout.decode("utf-8") + + if not ( + "fatal: could not read Username for 'https://github.com'" in stdout + or "Repository not found." in stdout + ): + raise e + + """ + Do not edit without updating deploy_notifications.py + + If this is thrown, and the linked App Source has github_installation_id + set, manual attention might be required, because: + - Installation Id is set + - Installation Id is used to fetch token + - If token cannot be fetched, GitHub responds with an error + - If token is not received _get_repo_url throws + - Hence token was received, but app still cannot be cloned + """ + raise Exception("Repository could not be fetched", self.app) # noqa + + self.output += self.run(f"git checkout {self.hash}") + self.output += self.run(f"git reset --hard {self.hash}") + + def _get_repo_url(self, source: "AppSource") -> str: + if not source.github_installation_id: + return source.repository_url + + token = get_access_token(source.github_installation_id) + if token is None: + # Do not edit without updating deploy_notifications.py + raise Exception("App installation token could not be fetched", self.app) + + return f"https://x-access-token:{token}@git.jingrow.com:3000/{source.repository_owner}/{source.repository}" + + def on_trash(self): + if self.clone_directory and os.path.exists(self.clone_directory): + shutil.rmtree(self.clone_directory) + + @jingrow.whitelist() + def cleanup(self): + self.on_trash() + self.cloned = False + self.save(ignore_permissions=True) + + def create_release_differences(self): + releases = jingrow.db.sql( + """ + SELECT + DISTINCT(app.release) + FROM + `tabBench` bench + LEFT JOIN + `tabBench App` app + ON + bench.name = app.parent + WHERE + bench.status != "Archived" AND + app.source = %s AND + app.release != %s + """, + (self.source, self.name), + as_dict=True, + ) + for release in releases: + difference = jingrow.get_pg( + { + "pagetype": "App Release Difference", + "app": self.app, + "source": self.source, + "source_release": release.release, + "destination_release": self.name, + } + ) + difference.insert() + + def auto_deploy(self): + groups = jingrow.get_all( + "Release Group App", + ["parent"], + {"source": self.source, "enable_auto_deploy": True}, + ) + for group in groups: + if jingrow.get_all( + "Deploy Candidate", + {"status": ("in", ("Pending", "Running")), "group": group.parent}, + ): + continue + group = jingrow.get_pg("Release Group", group.parent) + apps = [app.as_dict() for app in group.apps if app.enable_auto_deploy] + candidate = group.create_deploy_candidate(apps) + if candidate: + candidate.schedule_build_and_deploy() + + +def cleanup_unused_releases(): + sources = jingrow.get_all( + "App Release", + fields=["source as name", "count(*) as count"], + filters={"cloned": True}, + order_by="count desc", + group_by="source", + ) + active_releases = set( + release.release + for release in jingrow.get_all( + "Bench", + fields=["`tabBench App`.release"], + filters={"status": ("!=", "Archived")}, + ) + ) + + deleted = 0 + for source in sources: + releases = jingrow.get_all( + "App Release", + {"source": source.name, "cloned": True}, + pluck="name", + order_by="creation ASC", + ) + for index, release in enumerate(releases): + if deleted > 2000: + return + + # Skip the most recent release + if index >= len(releases) - 1: + break + + # Skip already deployed releases + if release in active_releases: + continue + + try: + jingrow.get_pg("App Release", release, for_update=True).cleanup() + deleted += 1 + jingrow.db.commit() + except Exception: + log_error( + "App Release Cleanup Error", + release=release, + reference_pagetype="App Release", + reference_name=release, + ) + jingrow.db.rollback() + + +def get_permission_query_conditions(user): + from jcloud.utils import get_current_team + + if not user: + user = jingrow.session.user + + user_type = jingrow.db.get_value("User", user, "user_type", cache=True) + if user_type == "System User": + return "" + + team = get_current_team() + + return f"(`tabApp Release`.`team` = {jingrow.db.escape(team)} or `tabApp" " Release`.`public` = 1)" + + +def has_permission(pg, ptype, user): + from jcloud.utils import get_current_team + + if not user: + user = jingrow.session.user + + user_type = jingrow.db.get_value("User", user, "user_type", cache=True) + if user_type == "System User": + return True + + team = get_current_team() + if pg.public or pg.team == team: + return True + + return False + + +def get_prepared_clone_directory( + app: str, + source: str, + hash: str, + delete_if_exists: bool = False, +) -> str: + clone_directory: str = jingrow.db.get_single_value("Jcloud Settings", "clone_directory") + if not os.path.exists(clone_directory): + os.mkdir(clone_directory) + + app_directory = os.path.join(clone_directory, app) + if not os.path.exists(app_directory): + os.mkdir(app_directory) + + source_directory = os.path.join(app_directory, source) + if not os.path.exists(source_directory): + os.mkdir(source_directory) + + hash_directory = os.path.join(clone_directory, app, source, hash[:10]) + exists = os.path.exists(hash_directory) + + if exists and delete_if_exists: + shutil.rmtree(hash_directory) + exists = False + + if not exists: + os.mkdir(hash_directory) + + return hash_directory + + +def get_changed_files_between_hashes( + source: str, deployed_hash: str, update_hash: str +) -> Optional[tuple[list[str], AppReleasePair]]: # noqa + """ + Checks diff between two App Releases, if they have not been cloned + the App Releases are cloned this is because the commit needs to be + fetched to diff since it happens locally. + + Note: order of passed hashes do not matter. + """ + deployed_release = get_release_by_source_and_hash(source, deployed_hash) + update_release = get_release_by_source_and_hash(source, update_hash) + is_valid = is_update_after_deployed(update_release, deployed_release) + if not is_valid: + return None + + for release in [deployed_release, update_release]: + if release["cloned"]: + continue + + release_pg: AppRelease = jingrow.get_pg("App Release", release["name"]) + release_pg._clone() + + cwd = deployed_release["clone_directory"] + + """ + Setting remote and fetching alters .git contents, hence it has to be + restored to before the commands had been run. Without this layer will + be rebuilt. + """ + + # Save repo state + run("cp -r .git .git.bak", cwd) + + # Calculate diff against local remote + run(f"git remote add -f diff_temp {update_release['clone_directory']}", cwd) + run(f"git fetch --depth 1 diff_temp {update_hash}", cwd) + diff = run(f"git diff --name-only {deployed_hash} {update_hash}", cwd) + + # Restore repo state + run("rm -rf .git", cwd) + run("mv .git.bak .git", cwd) + + return diff.splitlines(), dict(old=deployed_release, new=update_release) + + +def get_release_by_source_and_hash(source: str, hash: str) -> AppReleaseDict: + releases: list[AppReleaseDict] = jingrow.get_all( + "App Release", + filters={"hash": hash, "source": source}, + fields=[ + "name", + "source", + "hash", + "cloned", + "clone_directory", + "timestamp", + "creation", + ], + limit=1, + ) + + if not releases: + jingrow.throw(f"App Release not found with source: {source} and hash: {hash}") + + return releases[0] + + +def is_update_after_deployed(update_release: AppReleaseDict, deployed_release: AppReleaseDict) -> bool: + update_timestamp = update_release["timestamp"] + deployed_timestamp = deployed_release["timestamp"] + if update_timestamp and deployed_timestamp: + return update_timestamp > deployed_timestamp + + return update_release["creation"] > deployed_release["creation"] + + +def run(command, cwd): + return subprocess.check_output(shlex.split(command), stderr=subprocess.STDOUT, cwd=cwd).decode() + + +def check_python_syntax(dirpath: str) -> str: + """ + Script `compileall` will compile all the Python files + in the given directory. + + If there are errors then return code will be non-zero. + + Flags: + - -q: quiet, only print errors (stdout) + - -o: optimize level, 0 is no optimization + """ + _python = _get_python_path() + command = f"{_python} -m compileall -q -o 0 {dirpath}" + proc = subprocess.run( + shlex.split(command), + text=True, + capture_output=True, + ) + if proc.returncode == 0: + return "" + + if not proc.stdout: + return proc.stderr + + return proc.stdout + + +def _get_python_path() -> str: + try: + from jingrow.utils import get_bench_path + + bench_path = get_bench_path() + _python_path = f"{bench_path}/env/bin/python3" + + if not os.path.exists(_python_path): + _python_path = "python3" + + except ImportError: + _python_path = "python3" + + return _python_path + + +def check_pyproject_syntax(dirpath: str) -> str: + # tomllib does not report errors as expected + # instead returns empty dict + from tomli import TOMLDecodeError, load + + pyproject_path = os.path.join(dirpath, "pyproject.toml") + if not os.path.isfile(pyproject_path): + return "" + + with open(pyproject_path, "rb") as f: + try: + load(f) + except TOMLDecodeError as err: + return "Invalid pyproject.toml at project root\n" + "\n".join(err.args) + + return "" diff --git a/jcloud/jcloud/pagetype/app_release/code.md b/jcloud/jcloud/pagetype/app_release/code.md new file mode 100644 index 0000000..779116b --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release/code.md @@ -0,0 +1,25 @@ +``` +docker run -it -p 127.0.0.1:8021:8080 \ + -v "/home/jingrow/repos:/home/coder/project:ro" \ + --env PASSWORD=3ZRoh4XT7MhscBAn2dcdDMQWt8HoWpZF \ + -d --name codeserver --restart=always \ + codercom/code-server:latest \ + --disable-telemetry --proxy-domain code.staging.jingrow.cloud --verbose /home/coder/project +``` + +``` +server { + listen 80; + server_name code.staging.jingrow.cloud; + location / { + proxy_pass http://127.0.0.1:8021; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'Upgrade'; + proxy_set_header Host $http_host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} +``` \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app_release/patches/set_clone_directory.py b/jcloud/jcloud/pagetype/app_release/patches/set_clone_directory.py new file mode 100644 index 0000000..f49dc7a --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release/patches/set_clone_directory.py @@ -0,0 +1,23 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt +import os + +import jingrow + + +def execute(): + jingrow.reload_pagetype("App Release") + clone_directory = jingrow.db.get_single_value("Jcloud Settings", "clone_directory") + releases = jingrow.get_all( + "App Release", + {"clone_directory": ("is", "not set")}, + ["name", "app", "source", "hash"], + ) + for release in releases: + jingrow.db.set_value( + "App Release", + release.name, + "clone_directory", + os.path.join(clone_directory, release.app, release.source, release.hash[:10]), + update_modified=False, + ) diff --git a/jcloud/jcloud/pagetype/app_release/patches/set_status_to_draft.py b/jcloud/jcloud/pagetype/app_release/patches/set_status_to_draft.py new file mode 100644 index 0000000..0fa41e9 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release/patches/set_status_to_draft.py @@ -0,0 +1,19 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + jingrow.reload_pagetype("App Release") + + jingrow.db.sql( + """ + UPDATE + `tabApp Release` + SET + status = 'Draft' + WHERE + IFNULL(status, '') = '' + """ + ) diff --git a/jcloud/jcloud/pagetype/app_release/test_app_release.py b/jcloud/jcloud/pagetype/app_release/test_app_release.py new file mode 100644 index 0000000..94ab011 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release/test_app_release.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + +import typing +import unittest + +import jingrow + +from jcloud.jcloud.pagetype.app_source.app_source import AppSource + +if typing.TYPE_CHECKING: + from jcloud.jcloud.pagetype.app_release.app_release import AppRelease + + +def create_test_app_release(app_source: AppSource, hash: str = None) -> "AppRelease": + """Create test app release given App source.""" + hash = hash or jingrow.mock("sha1") + app_release = jingrow.get_pg( + { + "pagetype": "App Release", + "app": app_source.app, + "source": app_source.name, + "hash": hash, + "message": "Test Msg", + "author": "Test Author", + "deployable": True, + "status": "Approved", + } + ).insert(ignore_if_duplicate=True) + app_release.reload() + return app_release + + +class TestAppRelease(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/app_release_approval_request/__init__.py b/jcloud/jcloud/pagetype/app_release_approval_request/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.html b/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.html new file mode 100644 index 0000000..2339e42 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.html @@ -0,0 +1,44 @@ + +{{ styles }} + +
+ {% for file in result %} +
+
+ +
+
+
+ {% for line in file.lines %} +
+ {%for issue in line.issues %} +
+ {{ issue.severity }} - {{ issue.violation }} - + {{ issue.match }} +
+ {% endfor %} +
+
{{ line.highlighted_context }}
+ {% endfor %} +
+
+
+ {% endfor %} +
diff --git a/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.js b/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.js new file mode 100644 index 0000000..125a138 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.js @@ -0,0 +1,28 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('App Release Approval Request', { + refresh(frm) { + if (['Open', 'Rejected'].includes(frm.pg.status)) { + frm.add_custom_button('Approve Request', () => { + frm.set_value('status', 'Approved'); + frm.save(); + }); + } + + if (!frm.pg.result && frm.pg.screening_status === 'Not Started') { + let btn = frm.add_custom_button('Screen Release', () => { + frm.call('start_screening'); + jingrow.msgprint('Started Screening'); + }); + } + + if (frm.pg.result_html) { + let wrapper = frm.get_field('result_html_rendered').$wrapper; + wrapper.html(frm.pg.result_html); + } + }, + status(frm) { + frm.set_value('reviewed_by', jingrow.session.user); + }, +}); diff --git a/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.json b/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.json new file mode 100644 index 0000000..c2b9c79 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.json @@ -0,0 +1,196 @@ +{ + "actions": [], + "creation": "2021-07-12 16:17:54.427902", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "marketplace_app", + "team", + "status", + "column_break_4", + "app_release", + "app", + "reviewed_by", + "section_break_8", + "reason_for_rejection", + "code_screening_tab", + "screening_status", + "baseline_result_section", + "baseline_request", + "baseline_result", + "baseline_requirements", + "result_section", + "result", + "result_html", + "result_html_rendered", + "requirements", + "comments_tab", + "code_comments" + ], + "fields": [ + { + "fieldname": "marketplace_app", + "fieldtype": "Link", + "label": "Marketplace App", + "options": "Marketplace App", + "reqd": 1, + "set_only_once": 1 + }, + { + "allow_in_quick_entry": 1, + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Open\nCancelled\nApproved\nRejected", + "reqd": 1 + }, + { + "fieldname": "app_release", + "fieldtype": "Link", + "label": "App Release", + "options": "App Release", + "reqd": 1, + "set_only_once": 1 + }, + { + "fetch_from": "marketplace_app.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fetch_from": "marketplace_app.app", + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "App", + "options": "App", + "read_only": 1 + }, + { + "depends_on": "eval:pg.status==\"Rejected\"", + "fieldname": "reason_for_rejection", + "fieldtype": "Text Editor", + "label": "Reason For Rejection", + "mandatory_depends_on": "eval:pg.status==\"Rejected\"" + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break" + }, + { + "fieldname": "reviewed_by", + "fieldtype": "Link", + "label": "Reviewed By", + "options": "User" + }, + { + "fieldname": "result", + "fieldtype": "Code", + "label": "Result", + "read_only": 1 + }, + { + "fieldname": "result_html", + "fieldtype": "Code", + "label": "Result Html", + "read_only": 1 + }, + { + "fieldname": "code_screening_tab", + "fieldtype": "Tab Break", + "label": "Code Screening" + }, + { + "fieldname": "baseline_result_section", + "fieldtype": "Section Break", + "label": "Baseline Result" + }, + { + "fieldname": "baseline_result", + "fieldtype": "Code", + "label": "Baseline Result", + "read_only": 1 + }, + { + "fieldname": "baseline_requirements", + "fieldtype": "Code", + "label": "Baseline Requirements", + "read_only": 1 + }, + { + "fieldname": "result_section", + "fieldtype": "Section Break", + "label": "Result" + }, + { + "fieldname": "result_html_rendered", + "fieldtype": "HTML", + "label": "Result HTML Rendered" + }, + { + "fieldname": "requirements", + "fieldtype": "Code", + "label": "Requirements" + }, + { + "fieldname": "baseline_request", + "fieldtype": "Data", + "label": "Baseline Request" + }, + { + "default": "Not Started", + "fieldname": "screening_status", + "fieldtype": "Select", + "label": "Screening Status", + "options": "Not Started\nScreening\nComplete" + }, + { + "fieldname": "comments_tab", + "fieldtype": "Tab Break", + "label": "Comments" + }, + { + "fieldname": "code_comments", + "fieldtype": "Table", + "label": "Code Comments", + "options": "App Release Approval Code Comments" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-08-20 14:52:29.180004", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "App Release Approval Request", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1, + "track_seen": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.py b/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.py new file mode 100644 index 0000000..3a3047e --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.py @@ -0,0 +1,360 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import glob +import json +import re + +import jingrow +from jingrow.model.document import Document +from jingrow.model.naming import make_autoname +from pygments import highlight +from pygments.formatters import HtmlFormatter as HF +from pygments.lexers import PythonLexer as PL + +from jcloud.jcloud.pagetype.app_release.app_release import AppRelease + + +class AppReleaseApprovalRequest(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + from jcloud.marketplace.pagetype.app_release_approval_code_comments.app_release_approval_code_comments import ( + AppReleaseApprovalCodeComments, + ) + + app: DF.Link | None + app_release: DF.Link + baseline_request: DF.Data | None + baseline_requirements: DF.Code | None + baseline_result: DF.Code | None + code_comments: DF.Table[AppReleaseApprovalCodeComments] + marketplace_app: DF.Link + reason_for_rejection: DF.TextEditor | None + requirements: DF.Code | None + result: DF.Code | None + result_html: DF.Code | None + reviewed_by: DF.Link | None + screening_status: DF.Literal["Not Started", "Screening", "Complete"] + status: DF.Literal["Open", "Cancelled", "Approved", "Rejected"] + team: DF.Link | None + # end: auto-generated types + + dashboard_fields = [ + "name", + "marketplace_app", + "screening_status", + "app_release", + "status", + "result", + "code_comments", + ] + + def before_save(self): + apps = jingrow.get_all("Featured App", {"parent": "Marketplace Settings"}, pluck="app") + teams = jingrow.get_all( + "Auto Release Team", {"parent": "Marketplace Settings"}, pluck="team" + ) + if self.team in teams or self.marketplace_app in apps: + self.status = "Approved" + + @staticmethod + def create(marketplace_app: str, app_release: str): + """Create a new `App Release Approval Request`""" + request = jingrow.new_pg("App Release Approval Request") + request.marketplace_app = marketplace_app + request.app_release = app_release + request.save(ignore_permissions=True) + + def cancel(self): + self.status = "Cancelled" + self.save(ignore_permissions=True) + + def autoname(self): + app = self.marketplace_app + series = f"REQ-{app}-.#####" + self.name = make_autoname(series) + + def before_insert(self): + self.request_already_exists() + self.another_request_awaiting_approval() + self.update_release_status() + + def request_already_exists(self): + requests = jingrow.get_all( + "App Release Approval Request", + filters={"app_release": self.app_release, "status": ("!=", "Cancelled")}, + ) + + if len(requests) > 0: + jingrow.throw("An active request for this app release already exists!") + + def another_request_awaiting_approval(self): + request_source = jingrow.db.get_value("App Release", self.app_release, "source") + + releases_awaiting_approval = jingrow.get_all( + "App Release Approval Request", + filters={"marketplace_app": self.marketplace_app, "status": "Open"}, + pluck="app_release", + ) + sources_awaiting_approval = [ + jingrow.db.get_value("App Release", r, "source") for r in releases_awaiting_approval + ] + + # A request for this source is already open + if request_source in sources_awaiting_approval: + jingrow.throw("A previous release is already awaiting approval!") + + def update_release_status(self): + release: AppRelease = jingrow.get_pg("App Release", self.app_release) + release.status = "Awaiting Approval" + release.save(ignore_permissions=True) + + def on_update(self): + old_pg = self.get_pg_before_save() + + if old_pg is None: + return + + status_updated = old_pg.status != self.status + release = jingrow.get_pg("App Release", self.app_release) + + if status_updated and self.status == "Rejected": + release.status = "Rejected" + self.notify_publisher() + elif status_updated and self.status == "Approved": + release.status = "Approved" + self.notify_publisher() + elif status_updated and self.status == "Cancelled": + release.status = "Draft" + + release.save(ignore_permissions=True) + jingrow.db.commit() + + def notify_publisher(self): + marketplace_app = jingrow.get_pg("Marketplace App", self.marketplace_app) + app_release: AppRelease = jingrow.get_pg("App Release", self.app_release) + publisher_email = jingrow.get_pg("Team", marketplace_app.team).user + + jingrow.sendmail( + [publisher_email], + subject=f"Jingrow Marketplace: {marketplace_app.title}", + args={ + "subject": "Update on your app release publish request", + "status": self.status, + "rejection_reason": self.reason_for_rejection, + "commit_message": app_release.message, + "releases_link": f"{jingrow.local.site}/dashboard/marketplace/apps/{self.marketplace_app}/releases", + }, + template="app_approval_request_update", + ) + + @jingrow.whitelist() + def start_screening(self): + self.release = jingrow.get_pg("App Release", self.app_release, for_update=True) + self._set_baseline() + + # Clone the release, if not already + self.release._clone() + + self._screen_python_files() + self._filter_results() + self._render_html() + + self.screening_status = "Complete" + self.save() + + def _set_baseline(self): + approved_releases = jingrow.get_all( + "App Release Approval Request", + fields=["name", "result", "requirements"], + filters={"status": "Approved", "app": self.app, "name": ("!=", self.name)}, + order_by="creation desc", + limit=1, + ) + + if approved_releases: + baseline = approved_releases[0] + self.baseline_request = baseline.name + self.baseline_result = baseline.result + self.baseline_requirements = baseline.requirements + + def _screen_python_files(self): + files = glob.glob(self.release.clone_directory + "/**/*.py", recursive=True) + result = [] + for file in files: + lines = self._screen_python_file(file) + if lines: + name = file.replace(self.release.clone_directory, "", 1)[1:] + f = { + "name": name, + "lines": lines, + "score": len(lines), + } + result.append(f) + result = sorted(result, key=lambda x: x["score"], reverse=True) + self.result = json.dumps(result, indent=2) + + def _screen_python_file(self, filename): + def is_commented_line(line): + stripped_line = line.strip() + return stripped_line.startswith("#") + + with open(filename, "r") as ff: + lines = ff.read().splitlines() + lines_with_issues = [] + for index, line in enumerate(lines): + if is_commented_line(line): + continue + issues = [] + configuration = get_configuration() + for severity, violations in configuration.items(): + for violation, keywords in violations.items(): + pattern = r"(?:^|\W)({})(?:\W|$)".format("|".join(keywords)) + regex = re.compile(pattern) + search = regex.search(line) + if search: + issues.append( + { + "severity": severity, + "violation": violation, + "match": search.group(1), + } + ) + if issues: + context = get_context(lines, index) + lines_with_issues.append({"issues": issues, "context": context}) + return lines_with_issues + + def _filter_results(self): + result = json.loads(self.result) + if self.baseline_request and self.baseline_result: + baseline_result = json.loads(self.baseline_result) + diff_result = [] + for file in result: + if file not in baseline_result: + diff_result.append(file) + else: + diff_result = result + self.diff_result = json.dumps(diff_result, indent=2) + + def _render_html(self): + diff_result = json.loads(self.diff_result) + formatter = HF() + styles = f"" + for file in diff_result: + file["id"] = file["name"].replace("/", "_").replace(".", "_") + for line in file["lines"]: + line["highlighted_context"] = highlight_context(line["context"]) + html = jingrow.render_template( + "jcloud/jcloud/pagetype/app_release_approval_request/app_release_approval_request.html", + {"result": diff_result, "styles": styles}, + ) + self.result_html = html + self.result_html_rendered = html + + +def get_context(lines, index, size=2): + length = len(lines) + start = max(0, index - size) + end = min(index + size, length) + lines = lines[start : end + 1] # noqa + return { + "line_number": index + 1, + "line_range": list(range(start + 1, end + 2)), + "lines": lines, + } + + +def highlight_context(context): + line_number = context["line_number"] + line_range = context["line_range"] + lines = context["lines"] + code = "\n".join(lines) + formatter = HF( + linenos="table", + linenostart=line_range[0], + hl_lines=[line_number - line_range[0] + 1], + ) + lexer = PL(stripnl=False, tabsize=4) + highlighted = highlight(code, lexer, formatter) + return highlighted + + +def get_configuration(): + return { + "Critical": { + "Arbitrary Command Injection": ["os", "sys", "subprocess", "sysconfig"], + "Arbitrary Command Injection - Jingrow": ["popen", "execute_in_shell"], + "Arbitrary Code Execution": [ + "exec", + "eval", + "safe_eval", + "safe_exec", + "compile", + "codeop", + ], + "Runtime Imports": [ + "__import__", + "importlib", + "zipimport", + "runpy", + "pkgutil", + "modulefinder", + ], + "Runtime Imports - Jingrow": ["get_attr", "get_module"], + "Unsafe Serialization": ["pickle", "marshal"], + "Template Rendering": ["jinja", "jinja2"], + "Foreign Functions Library": ["ctypes"], + "Arbitrary Code Injection - Posix": [ + "signal", + "syslog", + "pipes", + "fcntl", + "pty", + "tty", + "posix", + "pwd", + "grp", + "spwd", + ], + }, + "Major": { + "File Manipulation": [ + "open", + "io", + "shutil", + "pathlib", + "fileinput", + "sqlite3", + "gzip", + "bz2", + "lzma", + "zipfile", + ], + "File Manipulation - Jingrow": ["touch_file", "get_file_json", "read_file"], + "Site Access": ["get_site_config", "get_sites"], + }, + "Moderate": { + "Potential Screening Bypass": [ + "globals", + "builtins", + "__globals__", + "__builtins__", + "__module__", + "__file__", + "__func__", + "__class__", + "__dict__", + "__self__", + ], + }, + "Low": { + "Debugging": ["inspect", "breakpoint"], + "Multiprocessing": ["multiprocessing", "threading"], + }, + } diff --git a/jcloud/jcloud/pagetype/app_release_approval_request/test_app_release_approval_request.py b/jcloud/jcloud/pagetype/app_release_approval_request/test_app_release_approval_request.py new file mode 100644 index 0000000..facdfaa --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release_approval_request/test_app_release_approval_request.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestAppReleaseApprovalRequest(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/app_release_difference/__init__.py b/jcloud/jcloud/pagetype/app_release_difference/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/app_release_difference/app_release_difference.js b/jcloud/jcloud/pagetype/app_release_difference/app_release_difference.js new file mode 100644 index 0000000..7bd1f4b --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release_difference/app_release_difference.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('App Release Difference', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/app_release_difference/app_release_difference.json b/jcloud/jcloud/pagetype/app_release_difference/app_release_difference.json new file mode 100644 index 0000000..c8f628b --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release_difference/app_release_difference.json @@ -0,0 +1,153 @@ +{ + "actions": [], + "creation": "2020-12-14 20:15:05.386124", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "app", + "deploy_type", + "column_break_2", + "source", + "section_break_4", + "source_release", + "source_hash", + "column_break_7", + "destination_release", + "destination_hash", + "section_break_10", + "github_diff_url", + "files" + ], + "fields": [ + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fetch_from": "source_release.source", + "fieldname": "source", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "App Source", + "options": "App Source", + "reqd": 1, + "search_index": 1, + "set_only_once": 1 + }, + { + "fieldname": "section_break_4", + "fieldtype": "Section Break" + }, + { + "fieldname": "source_release", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Source Release", + "options": "App Release", + "reqd": 1, + "set_only_once": 1 + }, + { + "fetch_from": "source_release.hash", + "fieldname": "source_hash", + "fieldtype": "Data", + "label": "Source Hash", + "read_only": 1 + }, + { + "fieldname": "column_break_7", + "fieldtype": "Column Break" + }, + { + "fieldname": "destination_release", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Destination Release", + "options": "App Release", + "reqd": 1, + "set_only_once": 1 + }, + { + "fetch_from": "destination_release.hash", + "fieldname": "destination_hash", + "fieldtype": "Data", + "label": "Destination Hash", + "read_only": 1 + }, + { + "fieldname": "section_break_10", + "fieldtype": "Section Break" + }, + { + "fieldname": "github_diff_url", + "fieldtype": "Code", + "label": "GitHub Diff URL", + "read_only": 1 + }, + { + "default": "Pending", + "fieldname": "deploy_type", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Deploy Type", + "options": "Pull\nMigrate\nPending", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "files", + "fieldtype": "Code", + "label": "Files", + "read_only": 1 + }, + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "App", + "options": "App", + "reqd": 1, + "search_index": 1, + "set_only_once": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2021-02-16 11:05:32.813726", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "App Release Difference", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin" + }, + { + "create": 1, + "role": "Jcloud Member" + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "app", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app_release_difference/app_release_difference.py b/jcloud/jcloud/pagetype/app_release_difference/app_release_difference.py new file mode 100644 index 0000000..3f7fab2 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release_difference/app_release_difference.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import json +import re + +import jingrow +from jingrow.model.document import Document +from github import Github + +from jcloud.api.github import get_access_token + + +class AppReleaseDifference(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + deploy_type: DF.Literal["Pull", "Migrate", "Pending"] + destination_hash: DF.Data | None + destination_release: DF.Link + files: DF.Code | None + github_diff_url: DF.Code | None + source: DF.Link + source_hash: DF.Data | None + source_release: DF.Link + # end: auto-generated types + + dashboard_fields = ["github_diff_url", "source_hash", "destination_hash"] + + def validate(self): + if self.source_release == self.destination_release: + jingrow.throw( + "Destination Release must be different from Source Release", jingrow.ValidationError + ) + + def set_deploy_type(self): + if self.deploy_type != "Pending": + return + self.deploy_type = "Pull" + + source = jingrow.get_pg("App Source", self.source) + if source.github_installation_id: + try: + github_access_token = get_access_token(source.github_installation_id) + except KeyError: + jingrow.throw("Could not get access token for app source {0}".format(source.name)) + else: + github_access_token = jingrow.get_value("Jcloud Settings", None, "github_access_token") + + client = Github(github_access_token) + try: + repo = client.get_repo(f"{source.repository_owner}/{source.repository}") + except Exception: + self.add_comment( + "Info", + "Could not get repository {0}, so assuming migrate required".format( + source.repository + ), + ) + self.deploy_type = "Migrate" # fallback to migrate + self.save() + return + try: + diff = repo.compare(self.source_hash, self.destination_hash) + self.github_diff_url = diff.html_url + + files = [f.filename for f in diff.files] + except Exception: + files = ["jingrow/geo/languages.json"] + + if is_migrate_needed(files): + self.deploy_type = "Migrate" + + self.files = json.dumps(files, indent=4) + self.save() + + +def is_migrate_needed(files): + patches_file_regex = re.compile(r"\w+/patches\.txt") + if any(map(patches_file_regex.match, files)): + return True + + hooks_regex = re.compile(r"\w+/hooks\.py") + if any(map(hooks_regex.match, files)): + return True + + fixtures_regex = re.compile(r"\w+/fixtures/") + if any(map(fixtures_regex.match, files)): + return True + + custom_regex = re.compile(r"\w+/\w+/custom/") + if any(map(custom_regex.match, files)): + return True + + languages_json = re.compile(r"jingrow/geo/languages.json") + if any(map(languages_json.match, files)): + return True + + json_regex = re.compile(r"\w+/\w+/\w+/(.+)/\1\.json") + return any(map(json_regex.match, files)) diff --git a/jcloud/jcloud/pagetype/app_release_difference/test_app_release_difference.py b/jcloud/jcloud/pagetype/app_release_difference/test_app_release_difference.py new file mode 100644 index 0000000..e87c5df --- /dev/null +++ b/jcloud/jcloud/pagetype/app_release_difference/test_app_release_difference.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestAppReleaseDifference(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/app_rename/__init__.py b/jcloud/jcloud/pagetype/app_rename/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/app_rename/app_rename.js b/jcloud/jcloud/pagetype/app_rename/app_rename.js new file mode 100644 index 0000000..c167c65 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_rename/app_rename.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("App Rename", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/app_rename/app_rename.json b/jcloud/jcloud/pagetype/app_rename/app_rename.json new file mode 100644 index 0000000..b432d99 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_rename/app_rename.json @@ -0,0 +1,79 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-04-20 14:10:57.268298", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "old_name", + "column_break_ccmj", + "new_name", + "section_break_pgse", + "before_migrate_script", + "rollback_script" + ], + "fields": [ + { + "fieldname": "old_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Old name", + "reqd": 1 + }, + { + "fieldname": "column_break_ccmj", + "fieldtype": "Column Break" + }, + { + "fieldname": "new_name", + "fieldtype": "Link", + "label": "New name", + "options": "App", + "reqd": 1 + }, + { + "fieldname": "section_break_pgse", + "fieldtype": "Section Break" + }, + { + "fieldname": "rollback_script", + "fieldtype": "Code", + "label": "Rollback Script", + "options": "Python" + }, + { + "fieldname": "before_migrate_script", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Before Migrate Script", + "options": "Python", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-05-04 14:11:07.271889", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "App Rename", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app_rename/app_rename.py b/jcloud/jcloud/pagetype/app_rename/app_rename.py new file mode 100644 index 0000000..9eb478e --- /dev/null +++ b/jcloud/jcloud/pagetype/app_rename/app_rename.py @@ -0,0 +1,23 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class AppRename(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + before_migrate_script: DF.Code + new_name: DF.Link + old_name: DF.Data + rollback_script: DF.Code | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/app_rename/test_app_rename.py b/jcloud/jcloud/pagetype/app_rename/test_app_rename.py new file mode 100644 index 0000000..73070ce --- /dev/null +++ b/jcloud/jcloud/pagetype/app_rename/test_app_rename.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestAppRename(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/app_source/__init__.py b/jcloud/jcloud/pagetype/app_source/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/app_source/app_source.js b/jcloud/jcloud/pagetype/app_source/app_source.js new file mode 100644 index 0000000..6d6d000 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_source/app_source.js @@ -0,0 +1,12 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('App Source', { + refresh(frm) { + async function create_release() { + await frm.call('create_release', { force: true }); + frm.refresh(); + } + frm.add_custom_button(__('Create Release'), create_release, __('Actions')); + }, +}); diff --git a/jcloud/jcloud/pagetype/app_source/app_source.json b/jcloud/jcloud/pagetype/app_source/app_source.json new file mode 100644 index 0000000..2811e4e --- /dev/null +++ b/jcloud/jcloud/pagetype/app_source/app_source.json @@ -0,0 +1,209 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-01-28 20:07:40.451028", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "app", + "app_title", + "jingrow", + "enabled", + "repository_url", + "data_3", + "repository", + "repository_owner", + "branch", + "section_break_8", + "team", + "public", + "column_break_11", + "github_installation_id", + "uninstalled", + "section_break_12", + "versions", + "github_section", + "last_github_poll_failed", + "last_github_response", + "last_synced" + ], + "fields": [ + { + "fieldname": "repository_url", + "fieldtype": "Data", + "label": "Repository URL", + "reqd": 1 + }, + { + "fieldname": "repository", + "fieldtype": "Data", + "label": "Repository", + "read_only": 1 + }, + { + "fieldname": "repository_owner", + "fieldtype": "Data", + "label": "Repository Owner", + "read_only": 1 + }, + { + "fieldname": "branch", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Branch", + "reqd": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "public", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Public" + }, + { + "fieldname": "github_installation_id", + "fieldtype": "Data", + "label": "GitHub Installation ID" + }, + { + "fieldname": "data_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_11", + "fieldtype": "Column Break" + }, + { + "fieldname": "versions", + "fieldtype": "Table", + "label": "Versions", + "options": "App Source Version", + "reqd": 1 + }, + { + "fieldname": "section_break_12", + "fieldtype": "Section Break" + }, + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App", + "options": "App", + "reqd": 1 + }, + { + "fetch_from": "app.title", + "fieldname": "app_title", + "fieldtype": "Data", + "label": "App Title", + "reqd": 1 + }, + { + "default": "0", + "fetch_from": "app.jingrow", + "fetch_if_empty": 1, + "fieldname": "jingrow", + "fieldtype": "Check", + "label": "Jingrow" + }, + { + "default": "1", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "github_section", + "fieldtype": "Section Break", + "label": "GitHub" + }, + { + "default": "0", + "fieldname": "last_github_poll_failed", + "fieldtype": "Check", + "label": "Last GitHub Poll Failed", + "read_only": 1 + }, + { + "fieldname": "last_github_response", + "fieldtype": "Code", + "label": "Last GitHub Response", + "read_only": 1 + }, + { + "fieldname": "last_synced", + "fieldtype": "Datetime", + "label": "Last Synced", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval:pg.uninstalled", + "description": "If uninstalled it means Jingrow App was removed by user from GitHub. GitHub Installation ID has been invalidated, Create Release or Clone (from a linked App Release) will not work.", + "fieldname": "uninstalled", + "fieldtype": "Check", + "label": "Uninstalled", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [ + { + "link_pagetype": "Error Log", + "link_fieldname": "reference_name" + } + ], + "modified": "2024-04-05 10:12:54.374115", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "App Source", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "app", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app_source/app_source.py b/jcloud/jcloud/pagetype/app_source/app_source.py new file mode 100644 index 0000000..7f11049 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_source/app_source.py @@ -0,0 +1,391 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +from datetime import datetime +from typing import TYPE_CHECKING, List, Optional + +import jingrow +import requests +from jingrow.model.document import Document +from jingrow.model.naming import make_autoname +from jcloud.api.github import get_access_token, get_auth_headers +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.utils import get_current_team, log_error + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.app_release.app_release import AppRelease + + +class AppSource(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + from jcloud.jcloud.pagetype.app_source_version.app_source_version import AppSourceVersion + + app: DF.Link + app_title: DF.Data + branch: DF.Data + enabled: DF.Check + jingrow: DF.Check + github_installation_id: DF.Data | None + last_github_poll_failed: DF.Check + last_github_response: DF.Code | None + last_synced: DF.Datetime | None + public: DF.Check + repository: DF.Data | None + repository_owner: DF.Data | None + repository_url: DF.Data + team: DF.Link + uninstalled: DF.Check + versions: DF.Table[AppSourceVersion] + # end: auto-generated types + + dashboard_fields = ["repository_owner", "repository", "branch"] + + def autoname(self): + series = f"SRC-{self.app}-.###" + self.name = make_autoname(series) + + def after_insert(self): + self.create_release() + + def on_update(self): + self.create_release() + + def validate(self): + self.validate_source_signature() + self.validate_duplicate_versions() + + def add_version(self, version): + self.append("versions", {"version": version}) + self.save() + + def validate_source_signature(self): + # Don't allow multiple sources with same signature + if jingrow.db.exists( + "App Source", + { + "name": ("!=", self.name), + "app": self.app, + "repository_url": self.repository_url, + "branch": self.branch, + "team": self.team, + }, + ): + jingrow.throw( + f"Already added {(self.repository_url, self.branch)} for {self.app}", + jingrow.ValidationError, + ) + + def validate_duplicate_versions(self): + # Don't allow versions to be added multiple times + versions = set() + for row in self.versions: + if row.version in versions: + jingrow.throw( + f"Version {row.version} can be added only once", jingrow.ValidationError + ) + versions.add(row.version) + + def before_save(self): + # Assumes repository_url looks like http://git.jingrow.com:3000/jingrow/jerp + self.repository_url = self.repository_url.removesuffix(".git") + + _, self.repository_owner, self.repository = self.repository_url.rsplit("/", 2) + # self.create_release() + + @jingrow.whitelist() + def create_release(self, force: bool = False, commit_hash: str | None = None): + if self.last_github_poll_failed and not force: + return + + # 将默认值改为"gitea" + git_service_type = jingrow.db.get_single_value("Jcloud Settings", "git_service_type") or "gitea" + + if git_service_type.lower() == "gitea": + # 使用Gitea专用函数 + _commit_hash, commit_info, ok = self.get_gitea_commit_info(commit_hash) + else: + # 使用原始GitHub函数 + _commit_hash, commit_info, ok = self.get_commit_info(commit_hash) + + if not ok: + return + + try: + return self._create_release_for_service(_commit_hash, commit_info, git_service_type) + except Exception: + log_error("Create Release Error", pg=self) + + def _create_release(self, commit_hash: str, commit_info: dict) -> str: + releases = jingrow.get_all( + "App Release", + { + "app": self.app, + "source": self.name, + "hash": commit_hash, + }, + pluck="name", + limit=1, + ) + if len(releases) > 0: + # No need to create a new release + return releases[0] + + return self.create_release_from_commit_info( + commit_hash, + commit_info, + ).name + + def create_release_from_commit_info( + self, + commit_hash: str, + commit_info: dict, + ): + app_release: "AppRelease" = jingrow.get_pg( + { + "pagetype": "App Release", + "app": self.app, + "source": self.name, + "hash": commit_hash, + "team": self.team, + "message": commit_info.get("message"), + "author": commit_info.get("author", {}).get("name"), + "timestamp": get_timestamp_from_commit_info(commit_info), + } + ).insert(ignore_permissions=True) + return app_release + + def get_commit_info(self, commit_hash: None | str = None) -> tuple[str, dict, bool]: + """ + If `commit_hash` is not provided, `commit_info` is of the latest commit + on the branch pointed to by `self.hash`. + """ + if (response := self.poll_github(commit_hash)).ok: + self.set_poll_succeeded() + else: + self.set_poll_failed(response) + return ("", {}, False) + + # Will cause recursion of db.save is used + self.db_update() + + data = response.json() + if commit_hash: + return (commit_hash, data.get("commit", {}), True) + + commit_hash = data.get("commit", {}).get("sha", "") + commit_info = data.get("commit", {}).get("commit", {}) + return (commit_hash, commit_info, True) + + def poll_github(self, commit_hash: None | str = None) -> requests.Response: + headers = self.get_auth_headers() + url = f"https://api.github.com/repos/{self.repository_owner}/{self.repository}" + + if commit_hash: + # page and per_page set to reduce unnecessary diff info + url = f"{url}/commits/{commit_hash}?page=1&per_page=1" + else: + url = f"{url}/branches/{self.branch}" + + return requests.get(url, headers=headers) + + def poll_gitea(self, commit_hash: None | str = None) -> requests.Response: + """专门用于Gitea API的请求函数""" + headers = self.get_auth_headers() + git_url = jingrow.db.get_single_value("Jcloud Settings", "git_url") or "http://git.jingrow.com:3000" + url = f"{git_url}/api/v1/repos/{self.repository_owner}/{self.repository}" + + if commit_hash: + url = f"{url}/commits/{commit_hash}" + else: + url = f"{url}/branches/{self.branch}" + + return requests.get(url, headers=headers) + + def set_poll_succeeded(self): + self.last_github_response = "" + self.last_github_poll_failed = False + self.last_synced = jingrow.utils.now() + self.uninstalled = False + + def set_poll_failed(self, response): + self.last_github_response = response.text or "" + self.last_github_poll_failed = True + self.last_synced = jingrow.utils.now() + + """ + If poll fails with 404 after updating the `github_installation_id` it + *probably* means that FC hasn't been granted access to this particular + app by the user. + + In this case the App Source is in an uninstalled state. + """ + self.uninstalled = response.status_code == 404 + + if response.status_code != 404: + log_error( + "Create Release Error", + response_status_code=response.status_code, + response_text=response.text, + pg=self, + ) + + def get_auth_headers(self) -> dict: + return get_auth_headers(self.github_installation_id) + + def get_access_token(self) -> Optional[str]: + if self.github_installation_id: + return get_access_token(self.github_installation_id) + + return jingrow.get_value("Jcloud Settings", None, "github_access_token") + + def get_repo_url(self) -> str: + if not self.github_installation_id: + return self.repository_url + + token = get_access_token(self.github_installation_id) + if token is None: + # Do not edit without updating deploy_notifications.py + raise Exception("App installation token could not be fetched", self.app) + + return f"https://x-access-token:{token}@git.jingrow.com:3000/{self.repository_owner}/{self.repository}" + + def get_gitea_commit_info(self, commit_hash: None | str = None) -> tuple[str, dict, bool]: + """专门处理Gitea API响应的函数""" + try: + if (response := self.poll_gitea(commit_hash)).ok: + self.set_poll_succeeded() + else: + self.set_poll_failed(response) + return ("", {}, False) + + self.db_update() + + data = response.json() + + if commit_hash: + # 单个提交信息 + return (commit_hash, data, True) + + # 从分支信息中获取提交 + commit_data = data.get("commit", {}) + + # Gitea使用id字段存储哈希 + commit_hash = commit_data.get("id", "") + + # 确保commit_hash不为空 + if not commit_hash: + log_error("获取提交哈希失败", response_data=data, pg=self) + self.last_github_response = "无法从Gitea API获取有效的提交哈希" + self.last_github_poll_failed = True + self.db_update() + return ("", {}, False) + + return (commit_hash, commit_data, True) + + except requests.exceptions.RequestException as e: + log_error("获取Gitea提交信息失败", error=str(e), pg=self) + self.last_github_poll_failed = True + self.last_github_response = f"与Gitea服务器通信失败: {str(e)}" + self.db_update() + return ("", {}, False) + + def create_release_from_gitea_commit_info(self, commit_hash: str, commit_info: dict): + """专门从Gitea提交信息创建Release的函数""" + message = commit_info.get("message", "") + + # Gitea的作者信息结构 + author_info = commit_info.get("author", {}) + author_name = author_info.get("name", "") or author_info.get("username", "") + + # Gitea的时间戳字段 + timestamp_str = commit_info.get("timestamp", "") + timestamp = None + + if timestamp_str: + try: + timestamp = datetime.fromisoformat(timestamp_str).strftime("%Y-%m-%d %H:%M:%S") + except (ValueError, TypeError): + pass + + app_release: "AppRelease" = jingrow.get_pg( + { + "pagetype": "App Release", + "app": self.app, + "source": self.name, + "hash": commit_hash, + "team": self.team, + "message": message, + "author": author_name, + "timestamp": timestamp, + } + ).insert(ignore_permissions=True) + return app_release + + def _create_release_for_service(self, commit_hash: str, commit_info: dict, service_type: str = "github") -> str: + """根据不同Git服务类型创建Release""" + releases = jingrow.get_all( + "App Release", + { + "app": self.app, + "source": self.name, + "hash": commit_hash, + }, + pluck="name", + limit=1, + ) + if len(releases) > 0: + # 已存在,无需创建 + return releases[0] + + if service_type.lower() == "gitea": + return self.create_release_from_gitea_commit_info( + commit_hash, + commit_info, + ).name + else: + return self.create_release_from_commit_info( + commit_hash, + commit_info, + ).name + + +def create_app_source( + app: str, repository_url: str, branch: str, versions: List[str] +) -> AppSource: + team = get_current_team() + + app_source = jingrow.get_pg( + { + "pagetype": "App Source", + "app": app, + "repository_url": repository_url, + "branch": branch, + "team": team, + "versions": [{"version": version} for version in versions], + } + ) + + app_source.save() + + return app_source + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype( + "App Source" +) + + +def get_timestamp_from_commit_info(commit_info: dict) -> str | None: + timestamp_str = commit_info.get("author", {}).get("date") + if not timestamp_str: + return None + + timestamp_str = timestamp_str.replace("Z", "+00:00") + return datetime.fromisoformat(timestamp_str).strftime("%Y-%m-%d %H:%M:%S") diff --git a/jcloud/jcloud/pagetype/app_source/app_source_dashboard.py b/jcloud/jcloud/pagetype/app_source/app_source_dashboard.py new file mode 100644 index 0000000..48ad5b9 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_source/app_source_dashboard.py @@ -0,0 +1,9 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +def get_data(): + return { + "fieldname": "source", + "transactions": [{"items": ["Release Group", "App Release"]}], + } diff --git a/jcloud/jcloud/pagetype/app_source/test_app_source.py b/jcloud/jcloud/pagetype/app_source/test_app_source.py new file mode 100644 index 0000000..5872faa --- /dev/null +++ b/jcloud/jcloud/pagetype/app_source/test_app_source.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + +import unittest +from unittest.mock import patch + +import jingrow + +from jcloud.jcloud.pagetype.app.app import App +from jcloud.jcloud.pagetype.app_release.test_app_release import create_test_app_release +from jcloud.jcloud.pagetype.app_source.app_source import AppSource +from jcloud.utils import get_current_team + + +@patch.object(AppSource, "create_release", create_test_app_release) +def create_test_app_source( + version: str, + app: App, + repository_url=None, + branch: str = "master", + team: str = None, +) -> AppSource: + """ + Create test app source for app with given version. + + Also creates app release without github api call. + """ + if not repository_url: + repository_url = jingrow.mock("url") + team = team or get_current_team() + return app.add_source(version, repository_url, branch, team) + + +class TestAppSource(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/app_source_version/__init__.py b/jcloud/jcloud/pagetype/app_source_version/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/app_source_version/app_source_version.json b/jcloud/jcloud/pagetype/app_source_version/app_source_version.json new file mode 100644 index 0000000..2e97488 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_source_version/app_source_version.json @@ -0,0 +1,34 @@ +{ + "actions": [], + "creation": "2020-12-01 09:22:53.614702", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "version" + ], + "fields": [ + { + "fieldname": "version", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Version", + "options": "Jingrow Version", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2020-12-01 09:22:53.614702", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "App Source Version", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app_source_version/app_source_version.py b/jcloud/jcloud/pagetype/app_source_version/app_source_version.py new file mode 100644 index 0000000..f9f5b0a --- /dev/null +++ b/jcloud/jcloud/pagetype/app_source_version/app_source_version.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class AppSourceVersion(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + version: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/app_tag/__init__.py b/jcloud/jcloud/pagetype/app_tag/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/app_tag/app_tag.js b/jcloud/jcloud/pagetype/app_tag/app_tag.js new file mode 100644 index 0000000..20c1325 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_tag/app_tag.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('App Tag', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/app_tag/app_tag.json b/jcloud/jcloud/pagetype/app_tag/app_tag.json new file mode 100644 index 0000000..3623091 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_tag/app_tag.json @@ -0,0 +1,92 @@ +{ + "actions": [], + "creation": "2020-06-04 19:57:08.666628", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "repository", + "repository_owner", + "github_installation_id", + "column_break_4", + "tag", + "hash", + "timestamp" + ], + "fields": [ + { + "fieldname": "hash", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Hash", + "read_only": 1 + }, + { + "fieldname": "tag", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Tag", + "read_only": 1 + }, + { + "fieldname": "repository", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Repository", + "read_only": 1, + "search_index": 1 + }, + { + "fieldname": "repository_owner", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Repository Owner", + "read_only": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fieldname": "github_installation_id", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "GitHub Installation ID", + "read_only": 1 + }, + { + "fieldname": "timestamp", + "fieldtype": "Data", + "label": "Timestamp" + } + ], + "in_create": 1, + "links": [], + "modified": "2022-12-08 14:04:14.096037", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "App Tag", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "tag", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/app_tag/app_tag.py b/jcloud/jcloud/pagetype/app_tag/app_tag.py new file mode 100644 index 0000000..7b823dd --- /dev/null +++ b/jcloud/jcloud/pagetype/app_tag/app_tag.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class AppTag(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + github_installation_id: DF.Data | None + hash: DF.Data | None + repository: DF.Data | None + repository_owner: DF.Data | None + tag: DF.Data | None + timestamp: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/app_tag/test_app_tag.py b/jcloud/jcloud/pagetype/app_tag/test_app_tag.py new file mode 100644 index 0000000..f91bf61 --- /dev/null +++ b/jcloud/jcloud/pagetype/app_tag/test_app_tag.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestAppTag(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/audit_log/__init__.py b/jcloud/jcloud/pagetype/audit_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/audit_log/audit_log.js b/jcloud/jcloud/pagetype/audit_log/audit_log.js new file mode 100644 index 0000000..6980d33 --- /dev/null +++ b/jcloud/jcloud/pagetype/audit_log/audit_log.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Audit Log', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/audit_log/audit_log.json b/jcloud/jcloud/pagetype/audit_log/audit_log.json new file mode 100644 index 0000000..91fcc4e --- /dev/null +++ b/jcloud/jcloud/pagetype/audit_log/audit_log.json @@ -0,0 +1,75 @@ +{ + "actions": [], + "creation": "2021-03-16 18:37:18.284146", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "audit_type", + "status", + "telegram_group", + "telegram_group_topic", + "log" + ], + "fields": [ + { + "fieldname": "status", + "fieldtype": "Select", + "label": "Status", + "options": "Success\nFailure", + "read_only": 1 + }, + { + "fieldname": "audit_type", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Audit Type", + "options": "Bench Field Check\nBackup Record Check\nOffsite Backup Check\nRestore Offsite Backup of Site\nApp Server Replica Dirs Check\nUnbilled Subscription Check\nBilling Audit\nPartner Billing Audit", + "read_only": 1 + }, + { + "fieldname": "log", + "fieldtype": "Code", + "label": "Log", + "read_only": 1 + }, + { + "fieldname": "telegram_group", + "fieldtype": "Link", + "label": "Telegram Group", + "options": "Telegram Group", + "read_only": 1 + }, + { + "fieldname": "telegram_group_topic", + "fieldtype": "Data", + "label": "Telegram Group Topic", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-12-15 12:15:11.614291", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Audit Log", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/audit_log/audit_log.py b/jcloud/jcloud/pagetype/audit_log/audit_log.py new file mode 100644 index 0000000..04f9f3d --- /dev/null +++ b/jcloud/jcloud/pagetype/audit_log/audit_log.py @@ -0,0 +1,52 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.telegram_message.telegram_message import TelegramMessage + + +class AuditLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + audit_type: DF.Literal[ + "Bench Field Check", + "Backup Record Check", + "Offsite Backup Check", + "Restore Offsite Backup of Site", + "App Server Replica Dirs Check", + "Unbilled Subscription Check", + "Billing Audit", + "Partner Billing Audit", + ] + log: DF.Code | None + status: DF.Literal["Success", "Failure"] + telegram_group: DF.Link | None + telegram_group_topic: DF.Data | None + # end: auto-generated types + + def after_insert(self): + if self.status == "Failure": + self.notify() + + def notify(self): + domain = jingrow.get_value("Jcloud Settings", "Jcloud Settings", "domain") + message = f""" + *FAILED AUDIT* + [{self.audit_type}]({domain}{self.get_url()}) + ``` + {self.log[:3000]} + ``` + """ + + topic = self.telegram_topic or "Errors" + group = self.telegram_group + TelegramMessage.enqueue(message=message, topic=topic, group=group) diff --git a/jcloud/jcloud/pagetype/audit_log/test_audit_log.py b/jcloud/jcloud/pagetype/audit_log/test_audit_log.py new file mode 100644 index 0000000..765b52c --- /dev/null +++ b/jcloud/jcloud/pagetype/audit_log/test_audit_log.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestAuditLog(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/__init__.py b/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/aws_savings_plan_recommendation.js b/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/aws_savings_plan_recommendation.js new file mode 100644 index 0000000..0524031 --- /dev/null +++ b/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/aws_savings_plan_recommendation.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("AWS Savings Plan Recommendation", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/aws_savings_plan_recommendation.json b/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/aws_savings_plan_recommendation.json new file mode 100644 index 0000000..f130e1d --- /dev/null +++ b/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/aws_savings_plan_recommendation.json @@ -0,0 +1,170 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2024-09-12 13:04:04.454139", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "savings_plan_type", + "term", + "payment_option", + "column_break_ykwh", + "lookback_period", + "currency", + "generated_at", + "section_break_ammq", + "monthly_commitment", + "hourly_commitment", + "column_break_aklc", + "monthly_on_demand_spend", + "hourly_on_demand_spend", + "monthly_savings_amount", + "column_break_ntqu", + "savings_percentage", + "roi_percentage", + "upfront_cost" + ], + "fields": [ + { + "fieldname": "generated_at", + "fieldtype": "Datetime", + "label": "Generated At", + "read_only": 1 + }, + { + "fieldname": "lookback_period", + "fieldtype": "Data", + "label": "Lookback Period", + "read_only": 1 + }, + { + "fieldname": "payment_option", + "fieldtype": "Data", + "in_standard_filter": 1, + "label": "Payment Option", + "read_only": 1 + }, + { + "fieldname": "currency", + "fieldtype": "Link", + "label": "Currency", + "options": "Currency", + "read_only": 1 + }, + { + "fieldname": "upfront_cost", + "fieldtype": "Currency", + "label": "Upfront Cost", + "options": "currency", + "read_only": 1 + }, + { + "fieldname": "column_break_ykwh", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_ammq", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_ntqu", + "fieldtype": "Column Break" + }, + { + "fieldname": "term", + "fieldtype": "Data", + "in_standard_filter": 1, + "label": "Term", + "read_only": 1 + }, + { + "fieldname": "savings_plan_type", + "fieldtype": "Data", + "in_standard_filter": 1, + "label": "Savings Plan Type", + "read_only": 1 + }, + { + "fieldname": "hourly_commitment", + "fieldtype": "Currency", + "label": "Hourly Commitment", + "options": "currency", + "read_only": 1 + }, + { + "fieldname": "monthly_on_demand_spend", + "fieldtype": "Currency", + "label": "Monthly On Demand Spend", + "options": "currency", + "read_only": 1 + }, + { + "fieldname": "hourly_on_demand_spend", + "fieldtype": "Currency", + "label": "Hourly On Demand Spend", + "options": "currency", + "read_only": 1 + }, + { + "fieldname": "roi_percentage", + "fieldtype": "Float", + "label": "ROI Percentage", + "precision": "0", + "read_only": 1 + }, + { + "fieldname": "monthly_savings_amount", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Monthly Savings Amount", + "options": "currency", + "read_only": 1 + }, + { + "fieldname": "savings_percentage", + "fieldtype": "Float", + "in_list_view": 1, + "label": "Savings Percentage", + "precision": "0", + "read_only": 1 + }, + { + "fieldname": "monthly_commitment", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Monthly Commitment", + "options": "currency", + "read_only": 1 + }, + { + "fieldname": "column_break_aklc", + "fieldtype": "Column Break" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-09-12 17:03:38.977549", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "AWS Savings Plan Recommendation", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [], + "title_field": "generated_at" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/aws_savings_plan_recommendation.py b/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/aws_savings_plan_recommendation.py new file mode 100644 index 0000000..fa76d3f --- /dev/null +++ b/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/aws_savings_plan_recommendation.py @@ -0,0 +1,147 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import boto3 +import jingrow +import jingrow.utils +from jingrow.model.document import Document +from jingrow.utils import cint, flt + +from jcloud.jcloud.pagetype.telegram_message.telegram_message import TelegramMessage +from jcloud.utils import log_error + +AWS_HOURS_IN_A_MONTH = 730 + + +class AWSSavingsPlanRecommendation(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + currency: DF.Link | None + generated_at: DF.Datetime | None + hourly_commitment: DF.Currency + hourly_on_demand_spend: DF.Currency + lookback_period: DF.Data | None + monthly_commitment: DF.Currency + monthly_on_demand_spend: DF.Currency + monthly_savings_amount: DF.Currency + name: DF.Int | None + payment_option: DF.Data | None + roi_percentage: DF.Float + savings_percentage: DF.Float + savings_plan_type: DF.Data | None + term: DF.Data | None + upfront_cost: DF.Currency + # end: auto-generated types + + pass + + def before_insert(self): + response = self.get_recommendation() + + self.generated_at = jingrow.utils.convert_utc_to_system_timezone( + jingrow.utils.get_datetime(response["Metadata"]["GenerationTimestamp"]) + ).replace(tzinfo=None) + + recommendation = response["SavingsPlansPurchaseRecommendation"] + if not recommendation: + return + + self.lookback_period = recommendation["LookbackPeriodInDays"] + self.payment_option = recommendation["PaymentOption"] + self.term = recommendation["TermInYears"] + self.savings_plan_type = recommendation["SavingsPlansType"] + + details = recommendation["SavingsPlansPurchaseRecommendationDetails"][0] + + self.currency = details["CurrencyCode"] + self.upfront_cost = details["UpfrontCost"] + + self.hourly_commitment = flt(details["HourlyCommitmentToPurchase"]) + self.monthly_commitment = self.hourly_commitment * AWS_HOURS_IN_A_MONTH + + self.savings_percentage = flt(details["EstimatedSavingsPercentage"]) + self.hourly_on_demand_spend = flt(details["CurrentAverageHourlyOnDemandSpend"]) + self.monthly_on_demand_spend = self.hourly_on_demand_spend * AWS_HOURS_IN_A_MONTH + self.monthly_savings_amount = self.monthly_on_demand_spend * self.savings_percentage / 100 + + self.roi_percentage = self.monthly_savings_amount / self.monthly_commitment * 100 + + def validate(self): + self.validate_duplicate() + + def after_insert(self): + self.send_telegram_message() + + def send_telegram_message(self): + currency = jingrow.get_value("Currency", self.currency, "symbol") + message = f"""*Savings Plan Recommendation* + +Monthly Savings Amount: *{currency} {cint(self.monthly_savings_amount)}* +Savings Percentage: *{cint(self.savings_percentage)} %* + +Upfront Cost: {currency} {cint(self.upfront_cost)} + +Generated At: `{self.generated_at}` +Lookback Period: `{self.lookback_period}` +Payment Option: `{self.payment_option}` +Term: `{self.term}` +Savings Plan Type: `{self.savings_plan_type}` + +Monthly On Demand Spend: {currency} {cint(self.monthly_on_demand_spend)} +Monthly Commitment: {currency} {cint(self.monthly_commitment)} +ROI Percentage: {cint(self.roi_percentage)} %""" + TelegramMessage.enqueue(message=message, topic="Reminders") + + @property + def client(self): + settings = jingrow.get_single("Jcloud Settings") + return boto3.client( + "ce", + region_name="us-east-1", + aws_access_key_id=settings.aws_access_key_id, + aws_secret_access_key=settings.get_password("aws_secret_access_key"), + ) + + def generate_recommendation(self): + self.client.start_savings_plans_purchase_recommendation_generation() + + def get_recommendation(self): + return self.client.get_savings_plans_purchase_recommendation( + SavingsPlansType="COMPUTE_SP", + TermInYears="THREE_YEARS", + PaymentOption="NO_UPFRONT", + LookbackPeriodInDays="SEVEN_DAYS", + ) + return response + + def validate_duplicate(self): + if jingrow.db.exists( + self.pagetype, + { + "generated_at": self.generated_at, + }, + ): + jingrow.throw( + "AWS Savings Plan Recommendation without this timestamp already exists", + jingrow.DuplicateEntryError, + ) + + +def refresh(): + jingrow.new_pg("AWS Savings Plan Recommendation").generate_recommendation() + + +def create(): + try: + jingrow.new_pg("AWS Savings Plan Recommendation").insert() + except jingrow.DuplicateEntryError: + pass + except Exception: + log_error("AWS Savings Plan Recommendation Error") diff --git a/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/test_aws_savings_plan_recommendation.py b/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/test_aws_savings_plan_recommendation.py new file mode 100644 index 0000000..9861847 --- /dev/null +++ b/jcloud/jcloud/pagetype/aws_savings_plan_recommendation/test_aws_savings_plan_recommendation.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestAWSSavingsPlanRecommendation(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/backup_bucket/__init__.py b/jcloud/jcloud/pagetype/backup_bucket/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/backup_bucket/backup_bucket.js b/jcloud/jcloud/pagetype/backup_bucket/backup_bucket.js new file mode 100644 index 0000000..11b45c0 --- /dev/null +++ b/jcloud/jcloud/pagetype/backup_bucket/backup_bucket.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Backup Bucket', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/backup_bucket/backup_bucket.json b/jcloud/jcloud/pagetype/backup_bucket/backup_bucket.json new file mode 100644 index 0000000..bad1002 --- /dev/null +++ b/jcloud/jcloud/pagetype/backup_bucket/backup_bucket.json @@ -0,0 +1,65 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "field:bucket_name", + "creation": "2022-11-21 22:28:38.259329", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "cluster", + "bucket_name", + "region", + "endpoint_url" + ], + "fields": [ + { + "fieldname": "cluster", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Cluster", + "options": "Cluster" + }, + { + "fieldname": "bucket_name", + "fieldtype": "Data", + "label": "Bucket Name", + "unique": 1 + }, + { + "fieldname": "region", + "fieldtype": "Data", + "label": "Region" + }, + { + "fieldname": "endpoint_url", + "fieldtype": "Data", + "label": "Endpoint URL" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-03-12 15:34:52.023834", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Backup Bucket", + "naming_rule": "By fieldname", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/backup_bucket/backup_bucket.py b/jcloud/jcloud/pagetype/backup_bucket/backup_bucket.py new file mode 100644 index 0000000..4d8fe50 --- /dev/null +++ b/jcloud/jcloud/pagetype/backup_bucket/backup_bucket.py @@ -0,0 +1,23 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class BackupBucket(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + bucket_name: DF.Data | None + cluster: DF.Link | None + endpoint_url: DF.Data | None + region: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/backup_bucket/test_backup_bucket.py b/jcloud/jcloud/pagetype/backup_bucket/test_backup_bucket.py new file mode 100644 index 0000000..f2e215d --- /dev/null +++ b/jcloud/jcloud/pagetype/backup_bucket/test_backup_bucket.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestBackupBucket(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/backup_restoration_test/__init__.py b/jcloud/jcloud/pagetype/backup_restoration_test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/backup_restoration_test/backup_restoration_test.js b/jcloud/jcloud/pagetype/backup_restoration_test/backup_restoration_test.js new file mode 100644 index 0000000..eed788a --- /dev/null +++ b/jcloud/jcloud/pagetype/backup_restoration_test/backup_restoration_test.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Backup Restoration Test', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/backup_restoration_test/backup_restoration_test.json b/jcloud/jcloud/pagetype/backup_restoration_test/backup_restoration_test.json new file mode 100644 index 0000000..6e13d9d --- /dev/null +++ b/jcloud/jcloud/pagetype/backup_restoration_test/backup_restoration_test.json @@ -0,0 +1,80 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "format:BRT-{DD}-{MM}-{YYYY}-{###}", + "creation": "2022-07-25 12:02:28.181011", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "date", + "status", + "column_break_3", + "site", + "test_site" + ], + "fields": [ + { + "default": "Today", + "fieldname": "date", + "fieldtype": "Date", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Date" + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure\nUndelivered\nArchive Successful\nArchive Failed", + "read_only": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Site", + "options": "Site" + }, + { + "fieldname": "test_site", + "fieldtype": "Link", + "label": "Test Site", + "options": "Site", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-07-27 10:51:47.263608", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Backup Restoration Test", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/backup_restoration_test/backup_restoration_test.py b/jcloud/jcloud/pagetype/backup_restoration_test/backup_restoration_test.py new file mode 100644 index 0000000..7c1d27f --- /dev/null +++ b/jcloud/jcloud/pagetype/backup_restoration_test/backup_restoration_test.py @@ -0,0 +1,79 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + +from jcloud.api.site import _new +from jcloud.jcloud.pagetype.site.site import prepare_site + + +class BackupRestorationTest(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + date: DF.Date | None + site: DF.Link | None + status: DF.Literal[ + "Pending", + "Running", + "Success", + "Failure", + "Undelivered", + "Archive Successful", + "Archive Failed", + ] + test_site: DF.Link | None + # end: auto-generated types + + def before_insert(self): + self.new_sitename = "brt-" + str(self.site) + + def validate(self): + self.check_duplicate_test() + self.check_duplicate_active_site() + + def after_insert(self): + self.create_test_site() + + def check_duplicate_test(self): + # check if another backup restoration is already running + backups = jingrow.get_all( + "Backup Restoration Test", + dict(status="Running", site=self.site, name=("!=", self.name)), + pluck="name", + ) + if backups: + jingrow.throw(f"Backup Restoration Test for {self.site} is already running.") + + def check_duplicate_active_site(self): + # check if any active backup restoration test site is active + sites = jingrow.get_all( + "Site", + dict( + status=("in", ["Active", "Inactive", "Broken", "Suspended"]), name=self.test_site + ), + pluck="name", + ) + if sites: + jingrow.throw( + f"Site {self.test_site} is already active. Please archive the site first." + ) + + def create_test_site(self) -> None: + site_dict = prepare_site(self.site) + server = jingrow.get_value("Site", self.site, "server") + try: + site_job = _new(site_dict, server, True) + self.test_site = site_job.get("site") + self.status = "Running" + self.save() + jingrow.db.commit() + except Exception: + jingrow.db.rollback() + jingrow.log_error("Site Creation Error") diff --git a/jcloud/jcloud/pagetype/backup_restoration_test/backup_test.py b/jcloud/jcloud/pagetype/backup_restoration_test/backup_test.py new file mode 100644 index 0000000..35b80a4 --- /dev/null +++ b/jcloud/jcloud/pagetype/backup_restoration_test/backup_test.py @@ -0,0 +1,89 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +from random import choice +from typing import List + +import jingrow + +from jcloud.jcloud.audit import get_benches_in_server +from jcloud.jcloud.pagetype.server.server import Server + + +class BackupTest: + def __init__(self) -> None: + self.trial_plans = jingrow.get_all( + "Site Plan", dict(enabled=1, is_trial_plan=1), pluck="name" + ) + self.sites = self.get_random_sites() + + def get_random_sites(self) -> List: + sites = [] + servers = Server.get_all_primary_prod() + for server in servers: + benches = self.get_benches(server) + for bench in benches: + site_list = jingrow.get_all( + "Site", + dict(status="Active", plan=("not in", self.trial_plans), bench=bench), + pluck="name", + ) + if not site_list: + continue + site = choice(site_list) + sites.append(site) + + return sites + + def start(self): + for site in self.sites: + try: + jingrow.get_pg( + { + "pagetype": "Backup Restoration Test", + "date": jingrow.utils.now_datetime(), + "site": site, + "status": "Running", + } + ).insert() + except Exception: + jingrow.log_error("Backup Restore Test insertion failed") + + def get_benches(self, server: str) -> List[str]: + benches = get_benches_in_server(server) + + # select all benches from central benches + # TODO: provision to run for all release groups + groups = jingrow.get_all( + "Release Group", dict(enabled=1, central_bench=1), pluck="name" + ) + + bench_list = [] + for group in groups: + group_benches = jingrow.get_all( + "Bench", dict(status="Active", group=group, server=server), pluck="name" + ) + + for bench in benches.keys(): + if bench in group_benches: + bench_list.append(bench) + + return bench_list + + +def archive_backup_test_sites(): + backup_tests = jingrow.get_all( + "Backup Restoration Test", + dict(status=("in", ("Archive Failed", "Success"))), + pluck="test_site", + ) + if backup_tests: + for test_site in backup_tests: + site = jingrow.get_pg("Site", test_site) + if site.status == "Active": + site.archive(reason="Backup Restoration Test") + + +def run_backup_restore_test(): + backup_restoration_test = BackupTest() + backup_restoration_test.start() diff --git a/jcloud/jcloud/pagetype/backup_restoration_test/test_backup_restoration_test.py b/jcloud/jcloud/pagetype/backup_restoration_test/test_backup_restoration_test.py new file mode 100644 index 0000000..0842c37 --- /dev/null +++ b/jcloud/jcloud/pagetype/backup_restoration_test/test_backup_restoration_test.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestBackupRestorationTest(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/balance_transaction/__init__.py b/jcloud/jcloud/pagetype/balance_transaction/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/balance_transaction/balance_transaction.js b/jcloud/jcloud/pagetype/balance_transaction/balance_transaction.js new file mode 100644 index 0000000..8823fec --- /dev/null +++ b/jcloud/jcloud/pagetype/balance_transaction/balance_transaction.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Balance Transaction', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/balance_transaction/balance_transaction.json b/jcloud/jcloud/pagetype/balance_transaction/balance_transaction.json new file mode 100644 index 0000000..7327cc6 --- /dev/null +++ b/jcloud/jcloud/pagetype/balance_transaction/balance_transaction.json @@ -0,0 +1,153 @@ +{ + "actions": [], + "autoname": "BT-.YYYY.-.#####", + "creation": "2020-10-21 22:08:02.694042", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "type", + "source", + "currency", + "amount", + "amended_from", + "description", + "paid_via_local_pg", + "ending_balance", + "unallocated_amount", + "allocated_to", + "invoice" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1, + "search_index": 1 + }, + { + "default": "Adjustment", + "fieldname": "type", + "fieldtype": "Select", + "label": "Type", + "options": "Adjustment\nApplied To Invoice\nPartnership Fee" + }, + { + "fetch_from": "team.currency", + "fieldname": "currency", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Currency", + "options": "Currency" + }, + { + "fieldname": "amount", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Amount", + "options": "currency" + }, + { + "fieldname": "amended_from", + "fieldtype": "Link", + "label": "Amended From", + "no_copy": 1, + "options": "Balance Transaction", + "print_hide": 1, + "read_only": 1 + }, + { + "fieldname": "description", + "fieldtype": "Small Text", + "label": "Description" + }, + { + "fieldname": "ending_balance", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Ending Balance", + "options": "currency" + }, + { + "fieldname": "invoice", + "fieldtype": "Data", + "label": "Invoice" + }, + { + "fieldname": "source", + "fieldtype": "Select", + "label": "Source", + "options": "\nPrepaid Credits\nFree Credits\nTransferred Credits\nDiscount\nReferral Bonus\nMarketplace Consumption" + }, + { + "allow_on_submit": 1, + "depends_on": "eval:pg.type == 'Adjustment'", + "fieldname": "unallocated_amount", + "fieldtype": "Currency", + "label": "Unallocated Amount", + "options": "currency", + "read_only": 1 + }, + { + "allow_on_submit": 1, + "depends_on": "eval:pg.type == 'Adjustment'", + "fieldname": "allocated_to", + "fieldtype": "Table", + "label": "Allocated To", + "options": "Balance Transaction Allocation", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "paid_via_local_pg", + "fieldtype": "Check", + "label": "Paid via Local Payment Gateway" + } + ], + "index_web_pages_for_search": 1, + "is_submittable": 1, + "links": [], + "modified": "2025-01-24 17:32:30.535457", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Balance Transaction", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "amend": 1, + "cancel": 1, + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "submit": 1, + "write": 1 + }, + { + "amend": 1, + "cancel": 1, + "create": 1, + "delete": 1, + "read": 1, + "role": "Jcloud Admin", + "submit": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "team", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/balance_transaction/balance_transaction.py b/jcloud/jcloud/pagetype/balance_transaction/balance_transaction.py new file mode 100644 index 0000000..315fa48 --- /dev/null +++ b/jcloud/jcloud/pagetype/balance_transaction/balance_transaction.py @@ -0,0 +1,140 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + +from jcloud.overrides import get_permission_query_conditions_for_pagetype + + +class BalanceTransaction(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.balance_transaction_allocation.balance_transaction_allocation import ( + BalanceTransactionAllocation, + ) + + allocated_to: DF.Table[BalanceTransactionAllocation] + amended_from: DF.Link | None + amount: DF.Currency + currency: DF.Link | None + description: DF.SmallText | None + ending_balance: DF.Currency + invoice: DF.Data | None + paid_via_local_pg: DF.Check + source: DF.Literal[ + "", + "Prepaid Credits", + "Free Credits", + "Transferred Credits", + "Discount", + "Referral Bonus", + "Marketplace Consumption", + ] + team: DF.Link + type: DF.Literal["Adjustment", "Applied To Invoice", "Partnership Fee"] + unallocated_amount: DF.Currency + # end: auto-generated types + + dashboard_fields = ("type", "amount", "ending_balance", "invoice", "source", "description") + + def validate(self): + if self.amount == 0: + jingrow.throw("Amount cannot be 0") + + def before_submit(self): + if self.type == "Partnership Fee": + # don't update ending balance or unallocated amount for partnership fee + return + + last_balance = jingrow.db.get_all( + "Balance Transaction", + filters={"team": self.team, "docstatus": 1, "type": ("!=", "Partnership Fee")}, + fields=["sum(amount) as ending_balance"], + group_by="team", + pluck="ending_balance", + ) + last_balance = last_balance[0] if last_balance else 0 + if last_balance: + self.ending_balance = (last_balance or 0) + self.amount + else: + self.ending_balance = self.amount + + if self.type == "Adjustment": + self.unallocated_amount = self.amount + if self.unallocated_amount < 0: + # in case of credit transfer + self.consume_unallocated_amount() + self.unallocated_amount = 0 + elif last_balance < 0 and abs(last_balance) <= self.amount: + # previously the balance was negative + # settle the negative balance + self.unallocated_amount = self.amount - abs(last_balance) + self.add_comment(text=f"Settling negative balance of {abs(last_balance)}") + elif last_balance < 0 and abs(last_balance) > self.amount: + # doesn't make sense to throw because payment happens before creating BT + pass + + def before_update_after_submit(self): + total_allocated = sum([d.amount for d in self.allocated_to]) + self.unallocated_amount = self.amount - total_allocated + + def on_submit(self): + jingrow.publish_realtime("balance_updated", user=self.team) + + def consume_unallocated_amount(self): + self.validate_total_unallocated_amount() + + allocation_map = {} + remaining_amount = abs(self.amount) + transactions = jingrow.get_all( + "Balance Transaction", + filters={"docstatus": 1, "team": self.team, "unallocated_amount": (">", 0)}, + fields=["name", "unallocated_amount"], + order_by="creation asc", + ) + for transaction in transactions: + if remaining_amount <= 0: + break + allocated_amount = min(remaining_amount, transaction.unallocated_amount) + remaining_amount -= allocated_amount + allocation_map[transaction.name] = allocated_amount + + for transaction, amount in allocation_map.items(): + pg = jingrow.get_pg("Balance Transaction", transaction) + pg.append( + "allocated_to", + { + "amount": abs(amount), + "currency": self.currency, + "balance_transaction": self.name, + }, + ) + pg.save(ignore_permissions=True) + + def validate_total_unallocated_amount(self): + unallocated_amounts = ( + jingrow.get_all( + "Balance Transaction", + filters={"docstatus": 1, "team": self.team, "unallocated_amount": (">", 0)}, + fields=["unallocated_amount"], + pluck="unallocated_amount", + ) + or [] + ) + if not unallocated_amounts: + jingrow.throw("Cannot create transaction as no unallocated amount found") + if sum(unallocated_amounts) < abs(self.amount): + jingrow.throw( + f"Cannot create transaction as unallocated amount {sum(unallocated_amounts)} is less than {self.amount}" + ) + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Balance Transaction") diff --git a/jcloud/jcloud/pagetype/balance_transaction/test_balance_transaction.py b/jcloud/jcloud/pagetype/balance_transaction/test_balance_transaction.py new file mode 100644 index 0000000..9b1be8c --- /dev/null +++ b/jcloud/jcloud/pagetype/balance_transaction/test_balance_transaction.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +import unittest + +import jingrow + +from jcloud.jcloud.pagetype.team.test_team import create_test_team + + +class TestBalanceTransaction(unittest.TestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_team_balance(self): + team = create_test_team() + + team.allocate_credit_amount(50, source="") + self.assertEqual(team.get_balance(), 50) + + team.allocate_credit_amount(-10, source="") + self.assertEqual(team.get_balance(), 40) + + team.allocate_credit_amount(100, source="") + self.assertEqual(team.get_balance(), 140) + + self.assertEqual(jingrow.db.count("Balance Transaction", {"team": team.name}), 3) diff --git a/jcloud/jcloud/pagetype/balance_transaction_allocation/__init__.py b/jcloud/jcloud/pagetype/balance_transaction_allocation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/balance_transaction_allocation/balance_transaction_allocation.json b/jcloud/jcloud/pagetype/balance_transaction_allocation/balance_transaction_allocation.json new file mode 100644 index 0000000..e7b810d --- /dev/null +++ b/jcloud/jcloud/pagetype/balance_transaction_allocation/balance_transaction_allocation.json @@ -0,0 +1,53 @@ +{ + "actions": [], + "creation": "2020-11-12 21:24:19.153533", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "invoice", + "balance_transaction", + "amount", + "currency" + ], + "fields": [ + { + "fieldname": "invoice", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Invoice" + }, + { + "fieldname": "amount", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Amount", + "options": "currency" + }, + { + "fieldname": "currency", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Currency", + "options": "Currency" + }, + { + "fieldname": "balance_transaction", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Balance Transaction" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-11-18 23:53:33.424419", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Balance Transaction Allocation", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/balance_transaction_allocation/balance_transaction_allocation.py b/jcloud/jcloud/pagetype/balance_transaction_allocation/balance_transaction_allocation.py new file mode 100644 index 0000000..d3f0969 --- /dev/null +++ b/jcloud/jcloud/pagetype/balance_transaction_allocation/balance_transaction_allocation.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class BalanceTransactionAllocation(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + amount: DF.Currency + balance_transaction: DF.Data | None + currency: DF.Link | None + invoice: DF.Data | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/bench/__init__.py b/jcloud/jcloud/pagetype/bench/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench/bench.js b/jcloud/jcloud/pagetype/bench/bench.js new file mode 100644 index 0000000..f2e8a56 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench/bench.js @@ -0,0 +1,90 @@ +// Copyright (c) 2019, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Bench', { + onload: function (frm) { + frm.set_query('candidate', function () { + return { + filters: { + group: frm.pg.group, + }, + }; + }); + }, + + refresh: function (frm) { + frm.add_web_link( + `/dashboard/groups/${frm.pg.group}/versions/${frm.pg.name}`, + __('Visit Dashboard'), + ); + + [ + [__('Archive'), 'archive'], + [__('Sync Sites Info'), 'sync_info'], + [__('Sync Sites Analytics'), 'sync_analytics'], + [__('Update All Sites'), 'update_all_sites'], + [ + __('Generate NGINX Config'), + 'generate_nginx_config', + frm.pg.status === 'Active', + ], + [ + __('Remove SSH User from Proxy'), + 'remove_ssh_user', + frm.pg.is_ssh_proxy_setup, + ], + [ + __('Add SSH User to Proxy'), + 'add_ssh_user', + !frm.pg.is_ssh_proxy_setup, + ], + [__('Restart'), 'restart', frm.pg.status === 'Active'], + [__('Rebuild'), 'rebuild', frm.pg.status === 'Active'], + [__('Retry New Bench'), 'retry_bench', frm.pg.status === 'Broken'], + [__('Force Update Limits'), 'force_update_limits'], + ].forEach(([label, method, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()} this bench?`, + () => frm.call(method).then((r) => frm.refresh()), + ); + }, + __('Actions'), + ); + } + }); + + frm.add_custom_button( + 'Move sites', + () => { + let d = new jingrow.ui.Dialog({ + title: 'Move sites', + fields: [ + { + fieldtype: 'Link', + fieldname: 'server', + label: 'Server', + options: 'Server', + reqd: 1, + }, + ], + primary_action({ server }) { + frm.call('move_sites', { server }).then((r) => { + if (!r.exc) { + jingrow.show_alert( + `Scheduled migrations for sites to ${server}`, + ); + } + d.hide(); + }); + }, + }); + d.show(); + }, + __('Actions'), + ); + }, +}); diff --git a/jcloud/jcloud/pagetype/bench/bench.json b/jcloud/jcloud/pagetype/bench/bench.json new file mode 100644 index 0000000..3dfef94 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench/bench.json @@ -0,0 +1,406 @@ +{ + "actions": [], + "creation": "2022-02-08 15:13:45.929287", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "server", + "database_server", + "managed_database_service", + "column_break_3", + "staging", + "group", + "team", + "cluster", + "deploy_section", + "candidate", + "resetting_bench", + "last_inplace_update_failed", + "column_break_gxqm", + "docker_image", + "inplace_update_docker_image", + "section_break_6", + "apps", + "configuration_section", + "background_workers", + "gunicorn_workers", + "gunicorn_threads_per_worker", + "port_offset", + "auto_scale_workers", + "skip_memory_limits", + "memory_high", + "memory_max", + "memory_swap", + "vcpu", + "column_break_11", + "config", + "bench_config", + "ssh_section", + "is_ssh_proxy_setup", + "failures_section", + "last_archive_failure", + "feature_flags_section", + "merge_all_rq_queues", + "merge_default_and_short_rq_queues", + "use_rq_workerpool", + "is_code_server_enabled", + "column_break_mtyb", + "environment_variables", + "mounts_section", + "mounts" + ], + "fields": [ + { + "fieldname": "server", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Server", + "options": "Server", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "candidate.group", + "fetch_if_empty": 1, + "fieldname": "group", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Release Group", + "options": "Release Group", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "candidate", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Deploy Candidate", + "options": "Deploy Candidate", + "reqd": 1, + "search_index": 1, + "set_only_once": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nInstalling\nUpdating\nActive\nBroken\nArchived", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "port_offset", + "fieldtype": "Int", + "label": "Port Offset", + "read_only": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_6", + "fieldtype": "Section Break", + "label": "Apps" + }, + { + "default": "2", + "fieldname": "gunicorn_workers", + "fieldtype": "Int", + "label": "Gunicorn Workers", + "reqd": 1 + }, + { + "fieldname": "column_break_11", + "fieldtype": "Column Break" + }, + { + "default": "1", + "fieldname": "auto_scale_workers", + "fieldtype": "Check", + "label": "Auto Scale Workers" + }, + { + "fetch_from": "server.database_server", + "fieldname": "database_server", + "fieldtype": "Link", + "label": "Database Server", + "options": "Database Server", + "read_only": 1 + }, + { + "default": "1", + "fieldname": "background_workers", + "fieldtype": "Int", + "label": "Background Workers", + "reqd": 1 + }, + { + "fieldname": "bench_config", + "fieldtype": "Code", + "label": "Bench Configuration", + "options": "JSON", + "read_only": 1 + }, + { + "fieldname": "config", + "fieldtype": "Code", + "label": "Common Site Configuration", + "options": "JSON", + "read_only": 1 + }, + { + "collapsible": 1, + "collapsible_depends_on": "true", + "fieldname": "configuration_section", + "fieldtype": "Section Break", + "label": "Configuration" + }, + { + "fieldname": "apps", + "fieldtype": "Table", + "label": "Apps", + "options": "Bench App", + "read_only": 1 + }, + { + "fieldname": "docker_image", + "fieldtype": "Data", + "label": "Docker Image", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "server.cluster", + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "group.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1, + "reqd": 1 + }, + { + "default": "0", + "fieldname": "staging", + "fieldtype": "Check", + "in_standard_filter": 1, + "label": "Staging" + }, + { + "collapsible": 1, + "fieldname": "ssh_section", + "fieldtype": "Section Break", + "label": "SSH" + }, + { + "default": "0", + "fieldname": "is_ssh_proxy_setup", + "fieldtype": "Check", + "label": "Is SSH Proxy Setup", + "read_only": 1 + }, + { + "collapsible": 1, + "fieldname": "failures_section", + "fieldtype": "Section Break", + "label": "Failures" + }, + { + "fieldname": "last_archive_failure", + "fieldtype": "Datetime", + "label": "Last Archive Failure", + "read_only": 1 + }, + { + "collapsible": 1, + "fieldname": "feature_flags_section", + "fieldtype": "Section Break", + "label": "Feature Flags" + }, + { + "fieldname": "environment_variables", + "fieldtype": "Table", + "label": "Environment Variables", + "options": "Bench Variable" + }, + { + "default": "0", + "fetch_from": "candidate.merge_all_rq_queues", + "fieldname": "merge_all_rq_queues", + "fieldtype": "Check", + "label": "Merge All RQ Queues", + "read_only": 1 + }, + { + "default": "0", + "fetch_from": "candidate.merge_default_and_short_rq_queues", + "fieldname": "merge_default_and_short_rq_queues", + "fieldtype": "Check", + "label": "Merge Default and Short RQ Queues", + "read_only": 1 + }, + { + "fieldname": "column_break_mtyb", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "memory_high", + "fieldtype": "Int", + "label": "Memory High (MB)" + }, + { + "default": "0", + "fieldname": "memory_max", + "fieldtype": "Int", + "label": "Memory Max (MB)" + }, + { + "default": "0", + "fieldname": "vcpu", + "fieldtype": "Int", + "label": "vCPU" + }, + { + "fieldname": "memory_swap", + "fieldtype": "Int", + "label": "Memory Swap (MB)" + }, + { + "default": "0", + "fieldname": "skip_memory_limits", + "fieldtype": "Check", + "label": "Skip Memory Limits" + }, + { + "default": "0", + "description": "Setting this to non-zero value will set Gunicorn worker class to gthread.", + "fetch_from": "group.gunicorn_threads_per_worker", + "fieldname": "gunicorn_threads_per_worker", + "fieldtype": "Int", + "label": "Gunicorn Threads Per Worker", + "non_negative": 1 + }, + { + "default": "0", + "fetch_from": "team.is_code_server_user", + "fieldname": "is_code_server_enabled", + "fieldtype": "Check", + "label": "Is Code Server Enabled", + "read_only": 1 + }, + { + "collapsible": 1, + "fieldname": "mounts_section", + "fieldtype": "Section Break", + "label": "Mounts" + }, + { + "fieldname": "mounts", + "fieldtype": "Table", + "options": "Bench Mount" + }, + { + "default": "0", + "fetch_from": "candidate.use_rq_workerpool", + "fieldname": "use_rq_workerpool", + "fieldtype": "Check", + "label": "Use RQ WorkerPool" + }, + { + "fetch_from": "server.managed_database_service", + "fieldname": "managed_database_service", + "fieldtype": "Link", + "label": "Managed Database Service", + "options": "Managed Database Service" + }, + { + "fieldname": "deploy_section", + "fieldtype": "Section Break", + "label": "Deploy" + }, + { + "fieldname": "column_break_gxqm", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval: pg.inplace_update_docker_image", + "description": "New image created when running an in place update.", + "fieldname": "inplace_update_docker_image", + "fieldtype": "Data", + "label": "In Place Update Docker Image", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval: pg.resetting_bench", + "description": "Attempting to reset bench after failed In Place Update.", + "fieldname": "resetting_bench", + "fieldtype": "Check", + "label": "Resetting Bench", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval: pg.last_inplace_update_failed", + "description": "Previous attempt at updating bench in place failed. Only regular deploy should be attempted.", + "fieldname": "last_inplace_update_failed", + "fieldtype": "Check", + "label": "Last In Place Update Failed", + "read_only": 1 + } + ], + "links": [], + "modified": "2024-10-07 11:05:42.340197", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench/bench.py b/jcloud/jcloud/pagetype/bench/bench.py new file mode 100644 index 0000000..9522c39 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench/bench.py @@ -0,0 +1,1318 @@ +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json +from collections import OrderedDict +from functools import cached_property +from itertools import groupby +from typing import TYPE_CHECKING, Generator, Iterable, Literal + +import jingrow +import pytz +from jingrow.exceptions import DoesNotExistError +from jingrow.model.document import Document +from jingrow.model.naming import append_number_if_name_exists, make_autoname +from jingrow.utils import get_system_timezone + +from jcloud.agent import Agent +from jcloud.api.client import dashboard_whitelist +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.jcloud.pagetype.bench_shell_log.bench_shell_log import ( + ExecuteResult, + create_bench_shell_log, +) +from jcloud.jcloud.pagetype.site.site import Site +from jcloud.utils import SupervisorProcess, flatten, log_error, parse_supervisor_status + +TRANSITORY_STATES = ["Pending", "Installing"] +FINAL_STATES = ["Active", "Broken", "Archived"] + +MAX_GUNICORN_WORKERS = 36 +MIN_GUNICORN_WORKERS = 2 +MAX_BACKGROUND_WORKERS = 8 +MIN_BACKGROUND_WORKERS = 1 + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob + from jcloud.jcloud.pagetype.app_source.app_source import AppSource + from jcloud.jcloud.pagetype.bench_update.bench_update import BenchUpdate + from jcloud.jcloud.pagetype.bench_update_app.bench_update_app import BenchUpdateApp + from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate + + SupervisorctlActions = Literal[ + "start", + "stop", + "restart", + "clear", + "update", + "remove", + ] + + +class Bench(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.bench_app.bench_app import BenchApp + from jcloud.jcloud.pagetype.bench_mount.bench_mount import BenchMount + from jcloud.jcloud.pagetype.bench_variable.bench_variable import BenchVariable + + apps: DF.Table[BenchApp] + auto_scale_workers: DF.Check + background_workers: DF.Int + bench_config: DF.Code | None + candidate: DF.Link + cluster: DF.Link + config: DF.Code | None + database_server: DF.Link | None + docker_image: DF.Data + environment_variables: DF.Table[BenchVariable] + group: DF.Link + gunicorn_threads_per_worker: DF.Int + gunicorn_workers: DF.Int + inplace_update_docker_image: DF.Data | None + is_code_server_enabled: DF.Check + is_ssh_proxy_setup: DF.Check + last_archive_failure: DF.Datetime | None + last_inplace_update_failed: DF.Check + managed_database_service: DF.Link | None + memory_high: DF.Int + memory_max: DF.Int + memory_swap: DF.Int + merge_all_rq_queues: DF.Check + merge_default_and_short_rq_queues: DF.Check + mounts: DF.Table[BenchMount] + port_offset: DF.Int + resetting_bench: DF.Check + server: DF.Link + skip_memory_limits: DF.Check + staging: DF.Check + status: DF.Literal["Pending", "Installing", "Updating", "Active", "Broken", "Archived"] + team: DF.Link + use_rq_workerpool: DF.Check + vcpu: DF.Int + # end: auto-generated types + + PAGETYPE = "Bench" + dashboard_fields = ( + "apps", + "name", + "group", + "status", + "cluster", + "is_ssh_proxy_setup", + "inplace_update_docker_image", + ) + + @staticmethod + def get_list_query(query): + Bench = jingrow.qb.PageType("Bench") + + Site = jingrow.qb.PageType("Site") + site_count = ( + jingrow.qb.from_(Site) + .select(jingrow.query_builder.functions.Count("*")) + .where(Site.bench == Bench.name) + .where(Site.status != "Archived") + ) + + benches = ( + query.select( + Bench.is_ssh_proxy_setup, Bench.inplace_update_docker_image, site_count.as_("site_count") + ) + .where(Bench.status != "Archived") + .run(as_dict=1) + ) + bench_names = [d.name for d in benches] + benches_with_patches = jingrow.get_all( + "App Patch", + fields=["bench"], + filters={"bench": ["in", bench_names], "status": "Applied"}, + pluck="bench", + ) + for bench in benches: + bench.has_app_patch_applied = bench.name in benches_with_patches + bench.has_updated_inplace = bool(bench.inplace_update_docker_image) + return benches + + def get_pg(self, pg): + user_ssh_key = jingrow.db.get_all( + "User SSH Key", {"user": jingrow.session.user, "is_default": 1}, limit=1 + ) + pg.user_ssh_key = bool(user_ssh_key) + pg.proxy_server = jingrow.db.get_value("Server", self.server, "proxy_server") + + group = jingrow.db.get_value( + "Release Group", + self.group, + ["title", "public", "team", "central_bench"], + as_dict=1, + ) + pg.group_title = group.title + pg.group_team = group.team + pg.group_public = group.public or group.central_bench + + @staticmethod + def with_sites(name: str): + bench = jingrow.get_pg("Bench", name) + sites = jingrow.get_all("Site", filters={"bench": name}, pluck="name") + bench.sites = [jingrow.get_pg("Site", s) for s in sites] + + return bench + + @staticmethod + def all_with_sites(fields=None, filters=None): + benches = jingrow.get_all("Bench", filters=filters, fields=fields, pluck="name") + return [Bench.with_sites(b) for b in benches] + + def autoname(self): + server_name_abbreviation, server_name = jingrow.db.get_value( + "Server", self.server, ["hostname_abbreviation", "hostname"] + ) + candidate_name = self.candidate[7:] + + self.name = self.get_bench_name(candidate_name, server_name, server_name_abbreviation) + + def get_bench_name(self, candidate_name, server_name, server_name_abbreviation): + bench_name = f"bench-{candidate_name}-{server_name}" + + if len(bench_name) > 32: + bench_name = f"bench-{candidate_name}-{server_name_abbreviation}" + + return append_number_if_name_exists("Bench", bench_name, separator="-") + + def update_config_with_rg_config(self, config: dict): + release_group_common_site_config = jingrow.db.get_value( + "Release Group", self.group, "common_site_config" + ) + if release_group_common_site_config: + config.update(json.loads(release_group_common_site_config)) + + self.config = json.dumps(config, indent=4) + + def update_bench_config_with_rg_config(self, bench_config: dict): + release_group_bench_config = jingrow.db.get_value("Release Group", self.group, "bench_config") + if release_group_bench_config: + bench_config.update(json.loads(release_group_bench_config)) + + self.bench_config = json.dumps(bench_config, indent=4) + + def set_apps(self, candidate: "DeployCandidate"): + if self.apps: + return + + for release in candidate.apps: + app_release = release.release + app_hash = release.hash + + if release.pullable_release and release.pullable_hash: + app_release = release.pullable_release + app_hash = release.pullable_hash + + self.append( + "apps", + { + "release": app_release, + "source": release.source, + "app": release.app, + "hash": app_hash, + }, + ) + + def validate(self): + if not self.candidate: + candidate = jingrow.get_all("Deploy Candidate", filters={"group": self.group})[0] + self.candidate = candidate.name + candidate = jingrow.get_pg("Deploy Candidate", self.candidate) + self.docker_image = candidate.docker_image + + self.set_apps(candidate) + + if self.is_new(): + self.port_offset = self.get_unused_port_offset() + + config = { + "monitor": True, + "redis_cache": "redis://localhost:13000", + "redis_queue": "redis://localhost:11000", + "redis_socketio": "redis://localhost:13000", + "socketio_port": 9000, + "webserver_port": 8000, + "restart_supervisor_on_update": True, + } + + db_host = jingrow.db.get_value("Database Server", self.database_server, "private_ip") + + if db_host: + config["db_host"] = db_host + config["db_port"] = 3306 + + if self.managed_database_service: + config["rds_db"] = 1 + config["db_host"] = self.managed_database_service + config["db_port"] = jingrow.db.get_value( + "Managed Database Service", self.managed_database_service, "port" + ) + + jcloud_settings_common_site_config = jingrow.db.get_single_value( + "Jcloud Settings", "bench_configuration" + ) + if jcloud_settings_common_site_config: + config.update(json.loads(jcloud_settings_common_site_config)) + + self.update_config_with_rg_config(config) + + server_private_ip = jingrow.db.get_value("Server", self.server, "private_ip") + bench_config = { + "docker_image": self.docker_image, + "web_port": 18000 + self.port_offset, + "socketio_port": 19000 + self.port_offset, + "private_ip": server_private_ip, + "ssh_port": 22000 + self.port_offset, + "codeserver_port": 16000 + self.port_offset, + "is_ssh_enabled": True, + "gunicorn_workers": self.gunicorn_workers, + "background_workers": self.background_workers, + "http_timeout": 120, + "statsd_host": f"{server_private_ip}:9125", + "merge_all_rq_queues": bool(self.merge_all_rq_queues), + "merge_default_and_short_rq_queues": bool(self.merge_default_and_short_rq_queues), + "environment_variables": self.get_environment_variables(), + "single_container": True, + "gunicorn_threads_per_worker": self.gunicorn_threads_per_worker, + "is_code_server_enabled": self.is_code_server_enabled, + "use_rq_workerpool": self.use_rq_workerpool, + } + + self.update_bench_config_with_rq_port(bench_config) + self.add_limits(bench_config) + self.update_bench_config_with_rg_config(bench_config) + + def update_bench_config_with_rq_port(self, bench_config): + if self.is_new(): + bench_config["rq_port"] = 11000 + self.port_offset + elif old := self.get_pg_before_save(): + config = json.loads(old.bench_config) + if config.get("rq_port"): + bench_config["rq_port"] = config["rq_port"] + + def add_limits(self, bench_config): + if any([self.memory_high, self.memory_max, self.memory_swap]): + if not all([self.memory_high, self.memory_max, self.memory_swap]): + jingrow.throw("All memory limits need to be set") + + if self.memory_swap != -1 and (self.memory_max > self.memory_swap): + jingrow.throw("Memory Swap needs to be greater than Memory Max") + + if self.memory_high > self.memory_max: + jingrow.throw("Memory Max needs to be greater than Memory High") + + bench_config.update(self.get_limits()) + + def get_limits(self) -> dict: + return { + "memory_high": self.memory_high, + "memory_max": self.memory_max, + "memory_swap": self.memory_swap, + "vcpu": self.vcpu, + } + + @jingrow.whitelist() + def force_update_limits(self): + agent = Agent(self.server) + agent.force_update_bench_limits(self.name, self.get_limits()) + + def get_unused_port_offset(self): + benches = jingrow.get_all( + "Bench", + fields=["port_offset"], + filters={"server": self.server, "status": ("!=", "Archived")}, + ) + all_offsets = range(0, 1000) + used_offsets = map(lambda x: x.port_offset, benches) + available_offsets = set(all_offsets) - set(used_offsets) + return min(available_offsets) + + def on_update(self): + self.update_bench_config() + + def update_bench_config(self, force=False): + if force: + bench_config = json.loads(self.bench_config) + config = json.loads(self.config) + self.update_config_with_rg_config(config) + self.update_bench_config_with_rg_config(bench_config) + self.save() + return + old = self.get_pg_before_save() + if old and (old.config != self.config or old.bench_config != self.bench_config): + agent = Agent(self.server) + agent.update_bench_config(self) + + def after_insert(self): + self.create_agent_request() + + def create_agent_request(self): + agent = Agent(self.server) + agent.new_bench(self) + + @dashboard_whitelist() + def archive(self): + self.status = "Pending" + self.save() # lock 1 + unarchived_sites = jingrow.db.get_value( + "Site", + {"bench": self.name, "status": ("!=", "Archived")}, + "name", + for_update=True, # lock 2 + ) + if unarchived_sites: + jingrow.throw("Cannot archive bench with active sites.") + self.check_ongoing_job() + agent = Agent(self.server) + agent.archive_bench(self) + + def check_ongoing_job(self): + ongoing_jobs = jingrow.db.exists( + "Agent Job", {"bench": self.name, "status": ("in", ["Running", "Pending"])} + ) + if ongoing_jobs: + jingrow.throw("Cannot archive bench with ongoing jobs.") + + @jingrow.whitelist() + def sync_info(self): + """Initiates a Job to update Site Usage, site.config.encryption_key and timezone details for all sites on Bench.""" + try: + sites = jingrow.get_all( + "Site", filters={"bench": self.name, "status": ("!=", "Archived")}, pluck="name" + ) + last_synced_time = round( + convert_user_timezone_to_utc( + jingrow.get_all( + "Site Usage", + filters=[["site", "in", sites]], + limit_page_length=1, + order_by="creation desc", + pluck="creation", + ignore_ifnull=True, + )[0] + ).timestamp() + ) + except IndexError: + last_synced_time = None + + agent = Agent(self.server) + if agent.should_skip_requests(): + return + data = agent.get_sites_info(self, since=last_synced_time) + if data: + for site, info in data.items(): + if not jingrow.db.exists("Site", site): + continue + try: + jingrow.get_pg("Site", site, for_update=True).sync_info(info) + jingrow.db.commit() + except jingrow.DoesNotExistError: + # Ignore: Site got renamed or deleted + pass + except Exception: + log_error( + "Site Sync Error", + site=site, + info=info, + reference_pagetype="Bench", + reference_name=self.name, + ) + jingrow.db.rollback() + + @jingrow.whitelist() + def sync_analytics(self): + agent = Agent(self.server) + if agent.should_skip_requests(): + return + data = agent.get_sites_analytics(self) + if not data: + return + for site, analytics in data.items(): + if not jingrow.db.exists("Site", site): + return + try: + jingrow.get_pg("Site", site).sync_analytics(analytics) + jingrow.db.commit() + except Exception: + log_error( + "Site Analytics Sync Error", + site=site, + analytics=analytics, + reference_pagetype="Bench", + reference_name=self.name, + ) + jingrow.db.rollback() + + def sync_product_site_users(self): + agent = Agent(self.server) + if agent.should_skip_requests(): + return + data = agent.get_sites_analytics(self) + if not data: + return + for site, analytics in data.items(): + if not jingrow.db.exists("Site", site): + return + try: + jingrow.get_pg("Site", site).sync_users_to_product_site(analytics) + jingrow.db.commit() + except Exception: + log_error( + "Site Users Sync Error", + site=site, + analytics=analytics, + reference_pagetype="Bench", + reference_name=self.name, + ) + jingrow.db.rollback() + + @dashboard_whitelist() + def update_all_sites(self): + sites = jingrow.get_all( + "Site", + { + "bench": self.name, + "status": ("in", ("Active", "Inactive", "Suspended")), + }, + pluck="name", + ) + for site in sites: + try: + site = jingrow.get_pg("Site", site) + site.schedule_update() + jingrow.db.commit() + except Exception: + import traceback + + traceback.print_exc() + jingrow.db.rollback() + + @jingrow.whitelist() + def add_ssh_user(self): + proxy_server = jingrow.db.get_value("Server", self.server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + agent.add_ssh_user(self) + + @jingrow.whitelist() + def remove_ssh_user(self): + proxy_server = jingrow.db.get_value("Server", self.server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + agent.remove_ssh_user(self) + + @jingrow.whitelist() + def generate_nginx_config(self): + agent = Agent(self.server) + agent.update_bench_config(self) + + @cached_property + def workload(self) -> float: + """ + Score representing load on the bench put on by sites. + + = sum of cpu time per day + """ + return ( + jingrow.db.sql_list( + # minimum plan is taken as 10 + f""" + SELECT SUM(plan.cpu_time_per_day) + FROM tabSite site + + JOIN tabSubscription subscription + ON site.name = subscription.document_name + + JOIN `tabSite Plan` plan + ON subscription.plan = plan.name + + WHERE site.bench = "{self.name}" + AND site.status in ("Active", "Pending", "Updating") + """ + )[0] + or 0 + ) + + @property + def server_logs(self): + return Agent(self.server).get(f"benches/{self.name}/logs") + + def get_server_log(self, log): + return Agent(self.server).get(f"benches/{self.name}/logs/{log}") + + def get_server_log_for_log_browser(self, log): + return Agent(self.server).get(f"benches/{self.name}/logs_v2/{log}") + + @jingrow.whitelist() + def move_sites(self, server: str): + try: + destination_bench = jingrow.get_last_pg( + "Bench", + { + "status": "Active", + "candidate": self.candidate, + "server": server, + }, + ) + except DoesNotExistError: + jingrow.throw("Bench of corresponding Deploy Candidate not found in server") + return + sites = jingrow.get_all("Site", {"bench": self.name, "status": "Active"}, pluck="name") + for idx, site in enumerate(sites): + jingrow.get_pg( + { + "pagetype": "Site Migration", + "site": site, + "destination_bench": destination_bench.name, + "scheduled_time": jingrow.utils.add_to_date(None, minutes=5 * idx), + } + ).insert() + + @jingrow.whitelist() + def retry_bench(self): + if jingrow.get_value("Deploy Candidate", self.candidate, "status") != "Success": + jingrow.throw(f"Deploy Candidate {self.candidate} is not Active") + + candidate = jingrow.get_pg("Deploy Candidate", self.candidate) + candidate._create_deploy([self.server]) + + @dashboard_whitelist() + def rebuild(self): + return Agent(self.server).rebuild_bench(self) + + @dashboard_whitelist() + def restart(self, web_only=False): + agent = Agent(self.server) + agent.restart_bench(self, web_only=web_only) + + def get_environment_variables(self): + return {v.key: v.value for v in self.environment_variables} + + def allocate_workers( + self, + server_workload, + max_gunicorn_workers, + max_bg_workers, + set_memory_limits=False, + gunicorn_memory=150, + bg_memory=3 * 80, + ): + """ + Mostly makes sense when called from Server's auto_scale_workers + + Allocates workers and memory if required + """ + try: + max_gn, min_gn, max_bg, min_bg = jingrow.db.get_values( + "Release Group", + self.group, + ( + "max_gunicorn_workers", + "min_gunicorn_workers", + "max_background_workers", + "min_background_workers", + ), + )[0] + self.gunicorn_workers = min( + max_gn or MAX_GUNICORN_WORKERS, + max( + min_gn or MIN_GUNICORN_WORKERS, + round(self.workload / server_workload * max_gunicorn_workers), + ), # min 2 max 36 + ) + if self.gunicorn_threads_per_worker: + # Allocate fewer workers if threaded workers are used + # Roughly workers / threads_per_worker = total number of workers + # 1. At least one worker + # 2. Slightly more workers than required + self.gunicorn_workers = jingrow.utils.ceil( + self.gunicorn_workers / self.gunicorn_threads_per_worker + ) + self.background_workers = min( + max_bg or MAX_BACKGROUND_WORKERS, + max( + min_bg or MIN_BACKGROUND_WORKERS, round(self.workload / server_workload * max_bg_workers) + ), # min 1 max 8 + ) + except ZeroDivisionError: # when total_workload is 0 + self.gunicorn_workers = MIN_GUNICORN_WORKERS + self.background_workers = MIN_BACKGROUND_WORKERS + if set_memory_limits: + if self.skip_memory_limits: + self.memory_max = jingrow.db.get_value("Server", self.server, "ram") + self.memory_high = self.memory_max - 1024 + else: + self.memory_high = 512 + ( + self.gunicorn_workers * gunicorn_memory + self.background_workers * bg_memory + ) + self.memory_max = self.memory_high + gunicorn_memory + bg_memory + self.memory_swap = self.memory_max * 2 + else: + self.memory_high = 0 + self.memory_max = 0 + self.memory_swap = 0 + self.save(ignore_permissions=True) + return self.gunicorn_workers, self.background_workers + + def docker_execute( + self, + cmd: str, + subdir: str | None = None, + save_output: bool = True, + create_log: bool = True, + ) -> ExecuteResult: + if self.status not in ["Active", "Broken"]: + raise Exception(f"Bench {self.name} has status {self.status}, docker_execute cannot be run") + + data = {"command": cmd} + if subdir: + data["subdir"] = subdir + + result: ExecuteResult = Agent(self.server).post(f"benches/{self.name}/docker_execute", data) + + if create_log: + create_bench_shell_log(result, self.name, cmd, subdir, save_output) + return result + + def supervisorctl( + self, + action: "SupervisorctlActions", + programs: str | list[str] = "all", + ) -> None: + """ + If programs list is empty then all programs are selected + For reference check: http://supervisord.org/running.html#supervisorctl-actions + """ + if type(programs) is str: + programs = [programs] + + return Agent(self.server).call_supervisorctl( + self.name, + action, + programs, + ) + + def supervisorctl_status(self): + result = self.docker_execute("supervisorctl status") + if result["status"] != "Success" or not result["output"]: + # Check Bench Shell Log for traceback if present + raise Exception("Could not fetch supervisorctl status") + + output = result["output"] + processes = parse_supervisor_status(output) + return sort_supervisor_processes(processes) + + def update_inplace(self, apps: "list[BenchUpdateApp]", sites: "list[str]") -> str: + self.set_self_and_site_status(sites, status="Updating", site_status="Updating") + self.save() + job = Agent(self.server).create_agent_job( + "Update Bench In Place", + path=f"benches/{self.name}/update_inplace", + bench=self.name, + data={ + "sites": sites, + "apps": self.get_inplace_update_apps(apps), + "image": self.get_next_inplace_update_docker_image(), + }, + ) + return job.name + + def get_inplace_update_apps(self, apps: "list[BenchUpdateApp]"): + inplace_update_apps = [] + for app in apps: + source: "AppSource" = jingrow.get_pg("App Source", app.source) + inplace_update_apps.append( + { + "app": app.app, + "url": source.get_repo_url(), + "hash": app.hash, + } + ) + return inplace_update_apps + + def get_next_inplace_update_docker_image(self): + sep = "-inplace-" + default = self.docker_image + sep + "01" + if not self.inplace_update_docker_image: + return default + + splits = self.inplace_update_docker_image.split(sep) + if len(splits) != 2: + return default + + try: + count = int(splits[1]) + 1 + except ValueError: + return default + + return self.docker_image + f"{sep}{count:02}" + + @staticmethod + def process_update_inplace(job: "AgentJob"): + bench: "Bench" = jingrow.get_pg("Bench", job.bench) + bench._process_update_inplace(job) + bench.save() + + def _process_update_inplace(self, job: "AgentJob"): + req_data = json.loads(job.request_data) or {} + if job.status in ["Undelivered", "Delivery Failure"]: + self.set_self_and_site_status( + req_data.get("sites", []), + status="Active", + site_status="Active", + ) + + elif job.status in ["Pending", "Running"]: + self.set_self_and_site_status( + req_data.get("sites", []), + status="Updating", + site_status="Updating", + ) + + elif job.status == "Failure": + self._handle_inplace_update_failure(req_data) + + elif job.status == "Success": + self._handle_inplace_update_success(req_data, job) + + else: + # no-op + raise NotImplementedError("Unexpected case reached") + + def _handle_inplace_update_failure(self, req_data: dict): + sites = req_data.get("sites", []) + self.set_self_and_site_status( + sites=sites, + status="Broken", + site_status="Broken", + ) + self.last_inplace_update_failed = True + self.recover_update_inplace(sites) + + def recover_update_inplace(self, sites: list[str]): + """Used to attempt recovery if `update_inplace` fails""" + self.resetting_bench = True + self.save() + + # `inplace_update_docker_image` is the last working inplace update image + docker_image = self.inplace_update_docker_image or self.docker_image + + Agent(self.server).create_agent_job( + "Recover Update In Place", + path=f"benches/{self.name}/recover_update_inplace", + bench=self.name, + data={ + "sites": sites, + "image": docker_image, + }, + ) + + @staticmethod + def process_recover_update_inplace(job: "AgentJob"): + bench: "Bench" = jingrow.get_pg("Bench", job.bench) + bench._process_recover_update_inplace(job) + bench.save() + + def _process_recover_update_inplace(self, job: "AgentJob"): + self.resetting_bench = job.status not in ["Running", "Pending"] + if job.status != "Success" and job.status != "Failure": + return + + req_data = json.loads(job.request_data) or {} + status = "Active" if job.status == "Success" else "Broken" + + self.set_self_and_site_status( + req_data.get("sites", []), + status=status, + site_status=status, + ) + + def _handle_inplace_update_success(self, req_data: dict, job: "AgentJob"): + if job.get_step_status("Bench Restart") == "Success": + docker_image = req_data.get("image") + self.inplace_update_docker_image = docker_image + + bench_config = json.loads(self.bench_config) + bench_config.update({"docker_image": docker_image}) + self.bench_config = json.dumps(bench_config, indent=4) + + self.update_apps_after_inplace_update( + update_apps=req_data.get("apps", []), + ) + + self.set_self_and_site_status( + req_data.get("sites", []), + status="Active", + site_status="Active", + ) + self.last_inplace_update_failed = False + + def set_self_and_site_status( + self, + sites: list[str], + status: str, + site_status: str, + ): + self.status = status + for site in sites: + jingrow.set_value("Site", site, "status", site_status) + + def update_apps_after_inplace_update( + self, + update_apps: list[dict], + ): + apps_map = {a.app: a for a in self.apps} + for ua in update_apps: + name = ua.get("app") or "" + if not (bench_app := apps_map.get(name)): + continue + + bench_app.hash = ua.get("hash") + + # Update release by creating one + source: "AppSource" = jingrow.get_pg("App Source", bench_app.source) + if release := source.create_release(True, commit_hash=bench_app.hash): + bench_app.release = release + + @classmethod + def get_workloads(cls, sites: list[str]) -> Generator[tuple[str, float, str], None, None]: + benches = jingrow.get_all("Site", filters={"name": ["in", sites]}, pluck="bench", order_by="bench") + for bench_name in benches: + bench = cls(cls.PAGETYPE, bench_name) + yield bench.name, bench.workload, bench.server + + +class StagingSite(Site): + def __init__(self, bench: Bench): + plan = jingrow.db.get_value("Jcloud Settings", None, "staging_plan") + if not plan: + jingrow.throw("Staging plan not set in settings") + log_error(title="Staging plan not set in settings") + super().__init__( + { + "pagetype": "Site", + "subdomain": make_autoname("staging-.########"), + "staging": True, + "bench": bench.name, + "apps": [{"app": app.app} for app in bench.apps], + "team": jingrow.db.get_value("Team", {"user": "Administrator"}, "name"), + "subscription_plan": plan, + } + ) + + @classmethod + def archive_expired(cls): + expiry = jingrow.db.get_single_value("Jcloud Settings", "staging_expiry") or 24 + sites = jingrow.get_all( + "Site", + {"staging": True, "creation": ("<", jingrow.utils.add_to_date(None, hours=-expiry))}, + ) + for site_name in sites: + site = jingrow.get_pg("Site", site_name) + site.archive() + + @classmethod + def create_if_needed(cls, bench: Bench): + if not bench.staging: + return + try: + cls(bench).insert() + except Exception as e: + log_error("Staging Site creation error", exception=e) + + +def archive_staging_sites(): + StagingSite.archive_expired() + + +def process_new_bench_job_update(job): + bench = jingrow.get_pg("Bench", job.bench) + + updated_status = { + "Pending": "Pending", + "Running": "Installing", + "Success": "Active", + "Failure": "Broken", + "Delivery Failure": "Broken", + }[job.status] + if updated_status == bench.status: + return + + jingrow.db.set_value("Bench", job.bench, "status", updated_status) + + # check if new bench related to a site group deploy + site_group_deploy = jingrow.db.get_value( + "Site Group Deploy", + { + "release_group": bench.group, + "site": ("is", "not set"), + "bench": ("is", "not set"), + }, + ) + if site_group_deploy: + jingrow.get_pg("Site Group Deploy", site_group_deploy).update_site_group_deploy_on_process_job(job) + + if updated_status != "Active": + return + + StagingSite.create_if_needed(bench) + bench = jingrow.get_pg("Bench", job.bench) + jingrow.enqueue( + "jcloud.jcloud.pagetype.bench.bench.archive_obsolete_benches", + enqueue_after_commit=True, + group=bench.group, + server=bench.server, + ) + bench.add_ssh_user() + + dc_status = jingrow.get_value("Deploy Candidate", bench.candidate, "status") + if dc_status != "Success": + return + + bench_updates = jingrow.get_all( + "Bench Update", + {"candidate": bench.candidate}, + pluck="name", + limit=1, + ) + if len(bench_updates) != 0: + bench_update: "BenchUpdate" = jingrow.get_pg( + "Bench Update", + bench_updates[0], + ) + bench_update.update_sites_on_server(job.bench, bench.server) + + +def process_archive_bench_job_update(job): + bench_status = jingrow.get_value("Bench", job.bench, "status") + + updated_status = { + "Pending": "Pending", + "Running": "Pending", + "Success": "Archived", + "Failure": "Broken", + "Delivery Failure": "Active", + }[job.status] + + if job.status == "Failure": + if job.traceback and "Bench has sites" in job.traceback: # custom exception hardcoded in agent + updated_status = "Active" + jingrow.db.set_value("Bench", job.bench, "last_archive_failure", jingrow.utils.now_datetime()) + + if updated_status != bench_status: + jingrow.db.set_value("Bench", job.bench, "status", updated_status) + is_ssh_proxy_setup = jingrow.db.get_value("Bench", job.bench, "is_ssh_proxy_setup") + if updated_status == "Archived" and is_ssh_proxy_setup: + jingrow.get_pg("Bench", job.bench).remove_ssh_user() + + +def process_add_ssh_user_job_update(job): + if job.status == "Success": + jingrow.db.set_value("Bench", job.bench, "is_ssh_proxy_setup", True, update_modified=False) + + +def process_remove_ssh_user_job_update(job): + if job.status == "Success": + jingrow.db.set_value("Bench", job.bench, "is_ssh_proxy_setup", False, update_modified=False) + + +def get_archive_jobs(bench: str): + jingrow.db.commit() + return jingrow.get_all( + "Agent Job", + { + "job_type": "Archive Bench", + "bench": bench, + "status": ("in", ("Pending", "Running", "Success")), + "creation": (">", jingrow.utils.add_to_date(None, hours=-6)), + }, + limit=1, + ignore_ifnull=True, + order_by="job_type", + ) + + +def get_ongoing_jobs(bench: str): + jingrow.db.commit() + return jingrow.db.exists("Agent Job", {"bench": bench, "status": ("in", ["Running", "Pending"])}) + + +def get_active_site_updates(bench: str): + jingrow.db.commit() + return jingrow.get_all( + "Site Update", + { + "status": ("in", ["Pending", "Running", "Failure", "Scheduled"]), + }, + or_filters={ + "source_bench": bench, + "destination_bench": bench, + }, + limit=1, + ignore_ifnull=True, + order_by="destination_bench", + ) + + +def get_unfinished_site_migrations(bench: str): + jingrow.db.commit() + return jingrow.db.exists( + "Site Migration", + {"status": ("in", ["Scheduled", "Pending", "Running"]), "destination_bench": bench}, + ) + + +def get_unarchived_sites(bench: str): + jingrow.db.commit() + return jingrow.db.exists("Site", {"bench": bench, "status": ("!=", "Archived")}) + + +def get_scheduled_version_upgrades(bench: dict): + jingrow.db.commit() + sites = jingrow.qb.PageType("Site") + version_upgrades = jingrow.qb.PageType("Version Upgrade") + return ( + jingrow.qb.from_(sites) + .join(version_upgrades) + .on(sites.name == version_upgrades.site) + .select("name") + .where(sites.server == bench.server) + .where(version_upgrades.destination_group == bench.group) + .where(version_upgrades.status.isin(["Scheduled", "Pending", "Running"])) + .run() + ) + + +def try_archive(bench: str): + try: + jingrow.get_pg("Bench", bench).archive() + jingrow.db.commit() + return True + except Exception: + log_error( + "Bench Archival Error", + bench=bench, + reference_pagetype="Bench", + reference_name=bench, + ) + jingrow.db.rollback() + return False + + +def archive_obsolete_benches(group: str | None = None, server: str | None = None): + query_substr = "" + if group and server: + query_substr = f"AND bench.group = '{group}' AND bench.server = '{server}'" + benches = jingrow.db.sql( + f""" + SELECT + bench.name, bench.server, bench.group, bench.candidate, bench.creation, bench.last_archive_failure, bench.resetting_bench, g.public, g.central_bench + FROM + tabBench bench + LEFT JOIN + `tabRelease Group` g + ON + bench.group = g.name + WHERE + bench.status = "Active" {query_substr} + ORDER BY + bench.server + """, + as_dict=True, + ) + benches_by_server = groupby(benches, lambda x: x.server) + for server_benches in benches_by_server: + jingrow.enqueue( + "jcloud.jcloud.pagetype.bench.bench.archive_obsolete_benches_for_server", + queue="long", + job_id=f"archive_obsolete_benches:{server_benches[0]}", + deduplicate=True, + benches=list(server_benches[1]), + ) + + +def archive_obsolete_benches_for_server(benches: Iterable[dict]): + for bench in benches: + # Bench is Broken but a reset to a working state is being attempted + if ( + bench.resetting_bench + or ( + bench.last_archive_failure + and bench.last_archive_failure > jingrow.utils.add_to_date(None, hours=-24) + ) + or get_archive_jobs(bench.name) # already being archived + or get_ongoing_jobs(bench.name) + or get_active_site_updates(bench.name) + or get_unfinished_site_migrations(bench.name) + or get_unarchived_sites(bench.name) + ): + continue + + if ( + not (bench.public or bench.central_bench) + and bench.creation < jingrow.utils.add_days(None, -3) + and not get_scheduled_version_upgrades(bench) + ): + try_archive(bench.name) + continue + + # If there isn't a Deploy Candidate Difference with this bench's candidate as source + # That means this is the most recent bench and should be skipped. + + differences = jingrow.db.get_all( + "Deploy Candidate Difference", ["destination"], {"source": bench.candidate} + ) + if not differences: + continue + + # This bench isn't most recent. + # But if none of the recent versions of this bench are yet active then this bench is still useful. + + # If any of the recent versions are active then, this bench can be safely archived. + for difference in differences: + if jingrow.db.exists( + "Bench", {"candidate": difference.destination, "status": "Active"} + ) and try_archive(bench.name): + break + + +def sync_benches(): + benches = jingrow.get_all("Bench", {"status": "Active"}, pluck="name") + for bench in benches: + jingrow.enqueue( + "jcloud.jcloud.pagetype.bench.bench.sync_bench", + queue="sync", + name=bench, + job_id=f"sync_bench:{bench}", + deduplicate=True, + enqueue_after_commit=True, + ) + jingrow.db.commit() + + +def sync_bench(name): + bench = jingrow.get_pg("Bench", name) + try: + active_archival_jobs = jingrow.get_all( + "Agent Job", + { + "job_type": "Archive Bench", + "bench": bench.name, + "status": ("in", ("Pending", "Running", "Success")), + }, + limit=1, + ignore_ifnull=True, + order_by="job_type", + ) + if active_archival_jobs: + return + bench.sync_info() + jingrow.db.commit() + except Exception: + log_error( + "Bench Sync Error", + bench=bench.name, + reference_pagetype="Bench", + reference_name=bench.name, + ) + jingrow.db.rollback() + + +def sync_analytics(): + benches = jingrow.get_all("Bench", {"status": "Active"}, pluck="name") + for bench in benches: + jingrow.enqueue( + "jcloud.jcloud.pagetype.bench.bench.sync_bench_analytics", + queue="sync", + name=bench, + job_id=f"sync_bench_analytics:{bench}", + deduplicate=True, + enqueue_after_commit=True, + ) + jingrow.db.commit() + + +def sync_bench_analytics(name): + bench = jingrow.get_pg("Bench", name) + # Skip syncing analytics for benches that have been archived (after the job was enqueued) + if bench.status != "Active": + return + try: + bench.sync_analytics() + jingrow.db.commit() + except Exception: + log_error( + "Bench Analytics Sync Error", + bench=bench.name, + reference_pagetype="Bench", + reference_name=bench.name, + ) + jingrow.db.rollback() + + +def convert_user_timezone_to_utc(datetime): + timezone = pytz.timezone(get_system_timezone()) + return timezone.localize(datetime).astimezone(pytz.utc) + + +def sort_supervisor_processes(processes: "list[SupervisorProcess]"): + """ + Sorts supervisor processes according to `status_order` and groups them + by process group. + """ + + status_order = [ + "Starting", + "Backoff", + "Running", + "Stopping", + "Stopped", + "Exited", + "Fatal", + "Unknown", + ] + status_grouped = group_supervisor_processes(processes) + sorted_process_groups: "list[list[SupervisorProcess]]" = [] + for status in status_order: + if not (group_grouped := status_grouped.get(status)): + continue + + sorted_process_groups.extend(group_grouped.values()) + del status_grouped[status] + + # Incase not all statuses have been accounted for + for group_grouped in status_grouped.values(): + sorted_process_groups.extend(group_grouped.values()) + + return flatten(sorted_process_groups) + + +def group_supervisor_processes(processes: "list[SupervisorProcess]"): + status_grouped: "OrderedDict[str, OrderedDict[str, list[SupervisorProcess]]]" = OrderedDict() + for p in processes: + status = p.get("status") + group = p.get("group", "NONE") + + if status not in status_grouped: + status_grouped[status] = OrderedDict() + + group_grouped = status_grouped[status] + if group not in group_grouped: + group_grouped[group] = [] + + group_grouped[group].append(p) + return status_grouped + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Bench") diff --git a/jcloud/jcloud/pagetype/bench/bench_dashboard.py b/jcloud/jcloud/pagetype/bench/bench_dashboard.py new file mode 100644 index 0000000..e09a28c --- /dev/null +++ b/jcloud/jcloud/pagetype/bench/bench_dashboard.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow import _ + + +def get_data(): + return { + "fieldname": "bench", + "transactions": [ + {"label": _("Related Documents"), "items": ["Site"]}, + {"label": _("Logs"), "items": ["Agent Job"]}, + ], + } diff --git a/jcloud/jcloud/pagetype/bench/test_bench.py b/jcloud/jcloud/pagetype/bench/test_bench.py new file mode 100644 index 0000000..da911a1 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench/test_bench.py @@ -0,0 +1,393 @@ +# Copyright (c) 2019, JINGROW +# See license.txt +from __future__ import annotations + +import unittest +from typing import TYPE_CHECKING +from unittest.mock import MagicMock, Mock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob, poll_pending_jobs +from jcloud.jcloud.pagetype.agent_job.test_agent_job import fake_agent_job +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.bench.bench import ( + MAX_BACKGROUND_WORKERS, + MAX_GUNICORN_WORKERS, + Bench, + StagingSite, + archive_obsolete_benches, + archive_obsolete_benches_for_server, +) +from jcloud.jcloud.pagetype.deploy_candidate_difference.test_deploy_candidate_difference import ( + create_test_deploy_candidate_differences, +) +from jcloud.jcloud.pagetype.release_group.release_group import ReleaseGroup +from jcloud.jcloud.pagetype.release_group.test_release_group import ( + create_test_release_group, +) +from jcloud.jcloud.pagetype.server.server import Server, scale_workers +from jcloud.jcloud.pagetype.site.test_site import create_test_bench, create_test_site +from jcloud.jcloud.pagetype.site_plan.test_site_plan import create_test_plan +from jcloud.jcloud.pagetype.subscription.test_subscription import create_test_subscription +from jcloud.jcloud.pagetype.version_upgrade.test_version_upgrade import ( + create_test_version_upgrade, +) +from jcloud.utils import get_current_team +from jcloud.utils.test import foreground_enqueue, foreground_enqueue_pg + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.team.team import Team + + +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +class TestStagingSite(unittest.TestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_create_staging_site(self): + bench = create_test_bench() # also creates jcloud settings + jingrow.db.set_single_value("Jcloud Settings", "staging_plan", create_test_plan("Site").name) + count_before = jingrow.db.count("Site") + + site = StagingSite(bench).insert() + + self.assertTrue(site.staging) + count_after = jingrow.db.count("Site") + self.assertEqual(count_after - count_before, 1) + + +@patch.object(AgentJob, "after_insert", new=Mock()) +@patch("jcloud.jcloud.pagetype.server.server.jingrow.enqueue_pg", new=foreground_enqueue_pg) +@patch("jcloud.jcloud.pagetype.server.server.jingrow.db.commit", new=MagicMock) +class TestBench(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + def _create_bench_with_n_sites_with_cpu_time(self, n: int, x: float, bench: str | None = None) -> Bench: + """Creates new bench if None given.""" + plan = create_test_plan("Site", cpu_time=x) + + if not bench: + site = create_test_site() + create_test_subscription(site.name, plan.name, site.team) # map site with plan + bench = site.bench + n -= 1 + for _i in range(n): + site = create_test_site(bench=bench) + create_test_subscription(site.name, plan.name, site.team) + return Bench("Bench", bench) + + def test_workload_is_calculated_correctly(self): + bench = self._create_bench_with_n_sites_with_cpu_time(3, 5) + self.assertEqual(bench.workload, 15) + bench = self._create_bench_with_n_sites_with_cpu_time(3, 10, bench.name) + self.assertEqual(bench.workload, 45) + + def test_workload_gives_reasonable_numbers(self): + bench1 = self._create_bench_with_n_sites_with_cpu_time(3, 5) + bench2 = self._create_bench_with_n_sites_with_cpu_time(3, 10) + bench3 = self._create_bench_with_n_sites_with_cpu_time(6, 5) + bench4 = self._create_bench_with_n_sites_with_cpu_time(6, 10) + self.assertGreater(bench2.workload, bench1.workload) + self.assertGreater(bench4.workload, bench3.workload) + self.assertGreater(bench4.workload, bench2.workload) + + def test_workers_get_allocated(self): + bench = self._create_bench_with_n_sites_with_cpu_time(3, 5) + workers_before = (bench.background_workers, bench.gunicorn_workers) # 1, 2 + scale_workers() + bench.reload() + workers_after = (bench.background_workers, bench.gunicorn_workers) + self.assertGreater(workers_after[1], workers_before[1]) + self.assertGreater(workers_after[0], workers_before[0]) + + def test_auto_scale_uses_release_groups_max_workers_when_set(self): + bench = self._create_bench_with_n_sites_with_cpu_time(3, 5) + self.assertEqual(bench.gunicorn_workers, 2) + self.assertEqual(bench.background_workers, 1) + group = jingrow.get_pg("Release Group", bench.group) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, MAX_GUNICORN_WORKERS) + self.assertEqual(bench.background_workers, MAX_BACKGROUND_WORKERS) + group.db_set("max_gunicorn_workers", 8) + group.db_set("max_background_workers", 4) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, 8) + self.assertEqual(bench.background_workers, 4) + + def test_auto_scale_uses_release_groups_max_workers_respecting_ram_available_on_server( + self, + ): + bench = self._create_bench_with_n_sites_with_cpu_time(3, 5) + group = jingrow.get_pg("Release Group", bench.group) + group.db_set("max_gunicorn_workers", 48) + group.db_set("max_background_workers", 8) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, 48) + bench2 = create_test_bench(group=jingrow.get_pg("Release Group", bench.group), server=bench.server) + self._create_bench_with_n_sites_with_cpu_time(3, 5, bench2.name) + scale_workers() + bench.reload() + bench2.reload() + # assuming max gunicorn workers for default server (16gb RAM) is 52 + self.assertLess(bench.gunicorn_workers, 48) + self.assertLess(bench2.gunicorn_workers, 48) + + def test_auto_scale_uses_release_groups_min_workers_when_set(self): + bench = self._create_bench_with_n_sites_with_cpu_time(3, 5) + self.assertEqual(bench.gunicorn_workers, 2) + self.assertEqual(bench.background_workers, 1) + jingrow.db.set_value("Server", bench.server, "ram", 1600) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, 5) # for for such low ram + self.assertEqual(bench.background_workers, 2) + group = jingrow.get_pg("Release Group", bench.group) + group.db_set("min_gunicorn_workers", 8) + group.db_set("min_background_workers", 4) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, 8) + self.assertEqual(bench.background_workers, 4) + jingrow.db.set_value("Server", bench.server, "ram", 16000) + scale_workers() + bench.reload() + self.assertGreater(bench.gunicorn_workers, 8) + self.assertGreater(bench.background_workers, 4) + + def test_auto_scale_uses_release_groups_min_workers_respecting_ram_available_on_server( + self, + ): + bench = self._create_bench_with_n_sites_with_cpu_time(3, 5) + jingrow.db.set_value("Server", bench.server, "ram", 1600) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, 5) + self.assertEqual(bench.background_workers, 2) + group = jingrow.get_pg("Release Group", bench.group) + group.db_set("min_gunicorn_workers", 12) + group.db_set("min_background_workers", 6) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, 12) + self.assertEqual(bench.background_workers, 6) + bench2 = create_test_bench(group=ReleaseGroup("Release Group", bench.group), server=bench.server) + self._create_bench_with_n_sites_with_cpu_time(3, 5, bench2.name) + scale_workers() + bench.reload() + bench2.reload() + # assuming max gunicorn workers for default server (16gb RAM) is 52 + self.assertGreaterEqual(bench.gunicorn_workers, 12) + self.assertGreaterEqual(bench2.gunicorn_workers, 12) + + def test_auto_scale_uses_release_groups_max_and_min_workers_when_set(self): + bench = self._create_bench_with_n_sites_with_cpu_time(3, 5) + self.assertEqual(bench.gunicorn_workers, 2) + self.assertEqual(bench.background_workers, 1) + group = jingrow.get_pg("Release Group", bench.group) + group.db_set("max_gunicorn_workers", 10) + group.db_set("max_background_workers", 5) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, 10) + self.assertEqual(bench.background_workers, 5) + jingrow.db.set_value("Server", bench.server, "ram", 1600) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, 5) # autoscaled for for such low ram + self.assertEqual(bench.background_workers, 2) + group.db_set("min_gunicorn_workers", 8) + group.db_set("min_background_workers", 4) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, 8) + self.assertEqual(bench.background_workers, 4) + jingrow.db.set_value("Server", bench.server, "ram", 16000) + scale_workers() + bench.reload() + self.assertEqual(bench.gunicorn_workers, 10) + self.assertEqual(bench.background_workers, 5) + + def test_set_bench_memory_limits_on_server_adds_memory_limit_on_bench_on_auto_scale( + self, + ): + bench1 = self._create_bench_with_n_sites_with_cpu_time(3, 5) + bench2 = self._create_bench_with_n_sites_with_cpu_time(3, 5) + + jingrow.db.set_value("Server", bench1.server, "set_bench_memory_limits", False) + jingrow.db.set_value("Server", bench2.server, "set_bench_memory_limits", False) + + scale_workers() + + bench1.reload() + bench2.reload() + self.assertEqual(bench1.memory_high, 0) + self.assertEqual(bench1.memory_max, 0) + self.assertEqual(bench2.memory_high, 0) + self.assertEqual(bench2.memory_max, 0) + jingrow.db.set_value("Server", bench1.server, "set_bench_memory_limits", True) + server = Server("Server", bench1.server) + + scale_workers() + + bench1.reload() + bench2.reload() + self.assertTrue(bench1.memory_high) + self.assertTrue(bench1.memory_max) + self.assertTrue(bench1.memory_swap) + self.assertEqual( + bench1.memory_high, + 512 + + bench1.gunicorn_workers * server.GUNICORN_MEMORY + + bench1.background_workers * server.BACKGROUND_JOB_MEMORY, + ) + self.assertEqual( + bench1.memory_max, + 512 + + bench1.gunicorn_workers * server.GUNICORN_MEMORY + + bench1.background_workers * server.BACKGROUND_JOB_MEMORY + + server.GUNICORN_MEMORY + + server.BACKGROUND_JOB_MEMORY, + ) + self.assertEqual(bench1.memory_swap, bench1.memory_max * 2) + + self.assertFalse(bench2.memory_high) + self.assertFalse(bench2.memory_max) + self.assertFalse(bench2.memory_swap) + + def test_memory_limits_set_to_server_ram_when_skip_memory_limits_is_set(self): + bench1 = self._create_bench_with_n_sites_with_cpu_time(3, 5) + bench2 = self._create_bench_with_n_sites_with_cpu_time(3, 5) + + bench1.reload() + bench2.reload() + self.assertEqual(bench1.memory_high, 0) + self.assertEqual(bench1.memory_max, 0) + self.assertEqual(bench2.memory_high, 0) + self.assertEqual(bench2.memory_max, 0) + jingrow.db.set_value("Server", bench1.server, "set_bench_memory_limits", True) + jingrow.db.set_value("Bench", bench1.name, "skip_memory_limits", True) + + # Server.set_bench_memory_limits now defaults to True + # Unset bench2.server set_bench_memory_limits to test the unset case + jingrow.db.set_value("Server", bench2.server, "set_bench_memory_limits", False) + server = Server("Server", bench1.server) + + scale_workers() + + bench1.reload() + bench2.reload() + self.assertTrue(bench1.memory_high) + self.assertEqual(bench1.memory_high, server.ram - 1024) + self.assertEqual(bench1.memory_max, server.ram) + self.assertEqual(bench1.memory_swap, server.ram * 2) + + self.assertFalse(bench2.memory_high) + self.assertFalse(bench2.memory_max) + self.assertFalse(bench2.memory_swap) + + @patch("jcloud.jcloud.pagetype.team.team.jingrow.enqueue_pg", new=foreground_enqueue_pg) + def test_workers_reallocated_on_site_unsuspend(self): + bench1 = self._create_bench_with_n_sites_with_cpu_time(3, 5) # current team + bench2 = self._create_bench_with_n_sites_with_cpu_time(3, 5) + + jingrow.db.set_value("Site", {"name": ("is", "set")}, "status", "Suspended") + jingrow.db.set_value("Server", bench1.server, "ram", 32000) + + scale_workers() + + self.assertEqual(bench1.workload, 0) + self.assertEqual(bench2.workload, 0) + self.assertEqual(bench1.gunicorn_workers, 2) + self.assertEqual(bench2.gunicorn_workers, 2) + + team: Team = get_current_team(get_pg=True) + team.unsuspend_sites() + + del bench1.workload # cached properties + del bench2.workload + bench1.reload() + bench2.reload() + + self.assertEqual(bench1.workload, 15) + self.assertEqual(bench2.workload, 15) + self.assertGreater(bench1.gunicorn_workers, 2) + self.assertGreater(bench2.gunicorn_workers, 2) + + +@patch("jcloud.jcloud.pagetype.bench.bench.jingrow.db.commit", new=MagicMock) +class TestArchiveObsoleteBenches(unittest.TestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_private_obsolete_benches_archived(self): + priv_group = create_test_release_group(apps=[create_test_app()], public=False) + + create_test_bench(group=priv_group, creation=jingrow.utils.add_days(None, -10)) + benches_before = jingrow.db.count("Bench", {"status": "Active"}) + with fake_agent_job("Archive Bench"): + archive_obsolete_benches() + poll_pending_jobs() + benches_after = jingrow.db.count("Bench", {"status": "Active"}) + self.assertEqual( + benches_before - benches_after, + 1, + ) + + def test_old_public_benches_without_sites_archived(self): + pub_group = create_test_release_group(apps=[create_test_app()], public=True) + + bench1 = create_test_bench(group=pub_group, creation=jingrow.utils.add_days(None, -10)) + benches_before = jingrow.db.count("Bench", {"status": "Active"}) + with fake_agent_job("Archive Bench"): + archive_obsolete_benches() + poll_pending_jobs() + benches_after = jingrow.db.count("Bench", {"status": "Active"}) + self.assertEqual(benches_after, benches_before) # nothing got archived + bench2 = create_test_bench(group=pub_group, server=bench1.server) + create_test_deploy_candidate_differences(bench2.candidate) + with fake_agent_job("Archive Bench"): + archive_obsolete_benches() + poll_pending_jobs() + benches_after = jingrow.db.count("Bench", {"status": "Active"}) + self.assertEqual(benches_after, benches_before) # older bench got archived + + def test_private_benches_where_version_upgrade_scheduled_is_not_archived(self): + priv_group = create_test_release_group(apps=[create_test_app()], public=False) + bench = create_test_bench(group=priv_group, creation=jingrow.utils.add_days(None, -10)) + + bench2 = create_test_bench(server=bench.server) # same server, different group + site = create_test_site(bench=bench2.name) + + priv_group.add_server(bench.server, deploy=False) # version upgrade validation + create_test_version_upgrade(site.name, priv_group.name) + benches_before = jingrow.db.count("Bench", {"status": "Active"}) + with fake_agent_job("Archive Bench"): + archive_obsolete_benches() + poll_pending_jobs() + benches_after = jingrow.db.count("Bench", {"status": "Active"}) + self.assertEqual(benches_after, benches_before) + + @patch( + "jcloud.jcloud.pagetype.bench.bench.archive_obsolete_benches_for_server", + wraps=archive_obsolete_benches_for_server, + ) + @patch("jcloud.jcloud.pagetype.bench.bench.jingrow.enqueue", new=foreground_enqueue) + def test_benches_archived_for_multiple_servers_via_multiple_jobs(self, mock_archive_by_server: MagicMock): + priv_group = create_test_release_group(apps=[create_test_app()], public=False) + create_test_bench(group=priv_group, creation=jingrow.utils.add_days(None, -10)) + priv_group2 = create_test_release_group(apps=[create_test_app()], public=False) + create_test_bench(group=priv_group2, creation=jingrow.utils.add_days(None, -10)) + + benches_before = jingrow.db.count("Bench", {"status": "Active"}) + with fake_agent_job("Archive Bench"): + archive_obsolete_benches() + poll_pending_jobs() + benches_after = jingrow.db.count("Bench", {"status": "Active"}) + self.assertEqual(benches_before - benches_after, 2) + self.assertEqual(mock_archive_by_server.call_count, 2) diff --git a/jcloud/jcloud/pagetype/bench_app/__init__.py b/jcloud/jcloud/pagetype/bench_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_app/bench_app.js b/jcloud/jcloud/pagetype/bench_app/bench_app.js new file mode 100644 index 0000000..a76c455 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_app/bench_app.js @@ -0,0 +1,7 @@ +// Copyright (c) 2019, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Bench App', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/bench_app/bench_app.json b/jcloud/jcloud/pagetype/bench_app/bench_app.json new file mode 100644 index 0000000..9e39709 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_app/bench_app.json @@ -0,0 +1,66 @@ +{ + "actions": [], + "creation": "2019-12-09 15:15:36.057080", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "app", + "hash", + "column_break_3", + "source", + "release" + ], + "fields": [ + { + "fieldname": "hash", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Hash", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App", + "options": "App", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "release", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Release", + "options": "App Release", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "source", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Source", + "options": "App Source", + "read_only": 1, + "reqd": 1 + } + ], + "istable": 1, + "links": [], + "modified": "2020-12-22 21:13:18.733825", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench App", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench_app/bench_app.py b/jcloud/jcloud/pagetype/bench_app/bench_app.py new file mode 100644 index 0000000..2326fb1 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_app/bench_app.py @@ -0,0 +1,80 @@ +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.document import Document + + +class BenchApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + hash: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + release: DF.Link + source: DF.Link + # end: auto-generated types + + @staticmethod + def get_list_query(query, filters=None, **list_args): + if not filters or not (parent := filters.get("parent")): + return query + + AppSource = jingrow.qb.PageType("App Source") + AppRelease = jingrow.qb.PageType("App Release") + Bench = jingrow.qb.PageType("Bench") + BenchApp = jingrow.qb.PageType("Bench App") + + q = ( + jingrow.qb.from_(BenchApp) + .join(Bench) + .on(Bench.name == BenchApp.parent) + .join(AppRelease) + .on(BenchApp.release == AppRelease.name) + .join(AppSource) + .on(BenchApp.source == AppSource.name) + .select( + BenchApp.app, + BenchApp.hash, + BenchApp.release, + AppRelease.message.as_("commit_message"), + AppSource.app_title.as_("title"), + AppSource.branch, + AppSource.repository_url, + ) + .where(BenchApp.parent == parent) + ) + + if owner := filters.get("repository_owner"): + q = q.where(AppSource.repository_owner == owner) + + if branch := filters.get("branch"): + q = q.where(AppSource.branch == branch) + + apps = q.run(as_dict=True) + + # Apply is_app_patched flag to installed_apps + app_names = [a["app"] for a in apps] + patched_apps = jingrow.get_all( + "App Patch", + fields=["app"], + filters={ + "bench": parent, + "app": ["in", app_names], + }, + pluck="app", + ) + + for app in apps: + if app["app"] in patched_apps: + app["is_app_patched"] = True + return apps diff --git a/jcloud/jcloud/pagetype/bench_app/test_bench_app.py b/jcloud/jcloud/pagetype/bench_app/test_bench_app.py new file mode 100644 index 0000000..ee5b291 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_app/test_bench_app.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestBenchApp(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/bench_dependency/__init__.py b/jcloud/jcloud/pagetype/bench_dependency/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_dependency/bench_dependency.js b/jcloud/jcloud/pagetype/bench_dependency/bench_dependency.js new file mode 100644 index 0000000..a82b828 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_dependency/bench_dependency.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Bench Dependency", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/bench_dependency/bench_dependency.json b/jcloud/jcloud/pagetype/bench_dependency/bench_dependency.json new file mode 100644 index 0000000..2fe60e6 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_dependency/bench_dependency.json @@ -0,0 +1,61 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "prompt", + "creation": "2023-09-26 12:24:30.572783", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "title", + "internal", + "supported_versions" + ], + "fields": [ + { + "fieldname": "supported_versions", + "fieldtype": "Table", + "label": "Supported Versions", + "options": "Bench Dependency Version" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title", + "unique": 1 + }, + { + "default": "0", + "description": "Internal dependencies are not editable by the user from the dashboard.", + "fieldname": "internal", + "fieldtype": "Check", + "label": "Internal" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-07-18 16:19:44.928250", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Dependency", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench_dependency/bench_dependency.py b/jcloud/jcloud/pagetype/bench_dependency/bench_dependency.py new file mode 100644 index 0000000..9bafa4a --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_dependency/bench_dependency.py @@ -0,0 +1,26 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class BenchDependency(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.bench_dependency_version.bench_dependency_version import ( + BenchDependencyVersion, + ) + + internal: DF.Check + supported_versions: DF.Table[BenchDependencyVersion] + title: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/bench_dependency/test_bench_dependency.py b/jcloud/jcloud/pagetype/bench_dependency/test_bench_dependency.py new file mode 100644 index 0000000..55347a6 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_dependency/test_bench_dependency.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestBenchDependency(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/bench_dependency_version/__init__.py b/jcloud/jcloud/pagetype/bench_dependency_version/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_dependency_version/bench_dependency_version.json b/jcloud/jcloud/pagetype/bench_dependency_version/bench_dependency_version.json new file mode 100644 index 0000000..a3549fe --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_dependency_version/bench_dependency_version.json @@ -0,0 +1,39 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-09-26 12:22:19.339438", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "version", + "supported_jingrow_version" + ], + "fields": [ + { + "fieldname": "supported_jingrow_version", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Supported Jingrow Version", + "options": "Jingrow Version" + }, + { + "fieldname": "version", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Version" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-10-06 15:44:52.752799", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Dependency Version", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench_dependency_version/bench_dependency_version.py b/jcloud/jcloud/pagetype/bench_dependency_version/bench_dependency_version.py new file mode 100644 index 0000000..60116ac --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_dependency_version/bench_dependency_version.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class BenchDependencyVersion(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + supported_jingrow_version: DF.Link | None + version: DF.Data | None + # end: auto-generated types + + dashboard_fields = ["version", "supported_jingrow_version"] diff --git a/jcloud/jcloud/pagetype/bench_get_app_cache/__init__.py b/jcloud/jcloud/pagetype/bench_get_app_cache/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_get_app_cache/bench_get_app_cache.js b/jcloud/jcloud/pagetype/bench_get_app_cache/bench_get_app_cache.js new file mode 100644 index 0000000..cd64dcc --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_get_app_cache/bench_get_app_cache.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Bench Get App Cache", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/bench_get_app_cache/bench_get_app_cache.json b/jcloud/jcloud/pagetype/bench_get_app_cache/bench_get_app_cache.json new file mode 100644 index 0000000..df8e65d --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_get_app_cache/bench_get_app_cache.json @@ -0,0 +1,102 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-02-01 16:42:30.143539", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "tab_2_tab", + "file_name", + "size", + "accessed", + "column_break_uoai", + "app", + "is_compressed", + "section_break_fbua", + "raw" + ], + "fields": [ + { + "fieldname": "tab_2_tab", + "fieldtype": "Tab Break", + "label": "Tab 2" + }, + { + "fieldname": "file_name", + "fieldtype": "Data", + "label": "File Name", + "read_only": 1 + }, + { + "fieldname": "size", + "fieldtype": "Float", + "in_list_view": 1, + "label": "Size (MB)", + "read_only": 1 + }, + { + "fieldname": "accessed", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "Accessed", + "read_only": 1 + }, + { + "fieldname": "column_break_uoai", + "fieldtype": "Column Break" + }, + { + "fieldname": "app", + "fieldtype": "Data", + "in_list_view": 1, + "label": "App", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "is_compressed", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Is Compressed", + "read_only": 1 + }, + { + "fieldname": "section_break_fbua", + "fieldtype": "Section Break" + }, + { + "fieldname": "raw", + "fieldtype": "Code", + "label": "Raw", + "read_only": 1 + } + ], + "in_create": 1, + "index_web_pages_for_search": 1, + "is_virtual": 1, + "links": [], + "modified": "2024-02-02 18:54:26.533734", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Get App Cache", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "accessed", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench_get_app_cache/bench_get_app_cache.py b/jcloud/jcloud/pagetype/bench_get_app_cache/bench_get_app_cache.py new file mode 100644 index 0000000..f28c40f --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_get_app_cache/bench_get_app_cache.py @@ -0,0 +1,135 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +from datetime import datetime + +import jingrow +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.deploy_candidate.cache_utils import run_command_in_docker_cache +from jcloud.utils import ttl_cache + + +class BenchGetAppCache(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + accessed: DF.Datetime | None + app: DF.Data | None + file_name: DF.Data | None + is_compressed: DF.Check + raw: DF.Code | None + size: DF.Float + # end: auto-generated types + + @staticmethod + def get_data(): + data = get_app_cache_items() + return data + + def load_from_db(self): + db = {v.name: v for v in BenchGetAppCache.get_data()} + return super(Document, self).__init__(db[self.name]) + + def delete(self): + run_command_in_docker_cache(f"rm bench/apps/{self.file_name}") + get_app_cache_items.cache.invalidate() + + @staticmethod + def clear_app_cache() -> None: + run_command_in_docker_cache("rm bench/apps/*.tar bench/apps/*.tgz") + get_app_cache_items.cache.invalidate() + + @staticmethod + def clear_app_cache_by_app(app: str) -> None: + run_command_in_docker_cache(f"rm bench/apps/{app}-*.tar bench/apps/{app}-*.tgz") + get_app_cache_items.cache.invalidate() + + @staticmethod + def get_list(_): + return BenchGetAppCache.get_data() + + @staticmethod + def get_count(_): + data = BenchGetAppCache.get_data() + return len(data) + + """ + The methods below are not applicable hence no-op. + """ + + def db_update(self): + pass + + def db_insert(self, *args, **kwargs): + pass + + @staticmethod + def get_stats(args): + return {} + + +""" +ttl_cache used cause checking app cache involves +building an image to execute `ls` during build time +this takes a few of seconds (mostly a minute). +""" + + +@ttl_cache(ttl=20) +def get_app_cache_items(): + result = run_command_in_docker_cache("ls -luAt --time-style=full-iso bench/apps") + if result["returncode"]: + return [] + + output = result["output"] + values = [] + + """ + # Example Output : + total 587164 + -rw-r--r-- 1 1000 1000 251607040 2024-02-01 10:03:33.972950013 +0000 builder-13a6ece9dd.tar + -rw-r--r-- 1 1000 1000 321587200 2024-02-01 10:01:04.109586013 +0000 hrms-84aced29ec.tar + -rw-r--r-- 1 1000 1000 28057600 2024-02-01 10:00:11.669851002 +0000 wiki-8b369c63dd.tar + """ + + for line in output.splitlines(): + pg = get_dict_from_ls_line(line) + if pg is not None: + values.append(pg) + return values + + +def get_dict_from_ls_line(line: str): + parts = [p for p in line.split(" ") if p] + if len(parts) != 9: + return None + + size = 0 + accessed = datetime.fromtimestamp(0) + datestring = " ".join(parts[5:8]) + try: + size = int(parts[4]) / 1_000_000 + accessed = datetime.fromisoformat(datestring) + except ValueError: + """ + Invalid values passed above ∵ format not as expected. Use field `raw` + to debug and fix. Erroring out will prevent clearing of cache. + """ + pass + + file_name = parts[-1] + return jingrow._dict( + name=file_name.split(".", 1)[0], + file_name=file_name, + app=file_name.split("-")[0], + is_compressed=file_name.endswith(".tgz"), + size=size, + accessed=accessed, + raw=line, + ) diff --git a/jcloud/jcloud/pagetype/bench_get_app_cache/test_bench_get_app_cache.py b/jcloud/jcloud/pagetype/bench_get_app_cache/test_bench_get_app_cache.py new file mode 100644 index 0000000..5b97ad0 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_get_app_cache/test_bench_get_app_cache.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestBenchGetAppCache(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/bench_mount/__init__.py b/jcloud/jcloud/pagetype/bench_mount/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_mount/bench_mount.json b/jcloud/jcloud/pagetype/bench_mount/bench_mount.json new file mode 100644 index 0000000..6411acc --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_mount/bench_mount.json @@ -0,0 +1,56 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-12-13 15:05:23.962806", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "is_absolute_path", + "section_break_nuqc", + "source", + "column_break_qdzk", + "destination" + ], + "fields": [ + { + "fieldname": "source", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Source (Host)", + "reqd": 1 + }, + { + "fieldname": "column_break_qdzk", + "fieldtype": "Column Break" + }, + { + "fieldname": "destination", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Destination (Container)", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "is_absolute_path", + "fieldtype": "Check", + "label": "Is Absolute Path" + }, + { + "fieldname": "section_break_nuqc", + "fieldtype": "Section Break" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-12-14 11:01:53.689356", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Mount", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench_mount/bench_mount.py b/jcloud/jcloud/pagetype/bench_mount/bench_mount.py new file mode 100644 index 0000000..5c93220 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_mount/bench_mount.py @@ -0,0 +1,25 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class BenchMount(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + destination: DF.Data + is_absolute_path: DF.Check + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + source: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/bench_shell/__init__.py b/jcloud/jcloud/pagetype/bench_shell/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_shell/bench_shell.js b/jcloud/jcloud/pagetype/bench_shell/bench_shell.js new file mode 100644 index 0000000..986b6d2 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_shell/bench_shell.js @@ -0,0 +1,40 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// Most of the code here imitates the code from +// jingrow/desk/pagetype/system_console/system_console.js +jingrow.ui.form.on('Bench Shell', { + onload(frm) { + jingrow.ui.keys.add_shortcut({ + shortcut: 'shift+enter', + action: () => frm.page.btn_primary.trigger('click'), + page: frm.page, + description: __('Run Bench Shell command'), + ignore_inputs: true, + }); + }, + + refresh(frm) { + frm.disable_save(); + frm.page.set_primary_action(__('Run'), async ($btn) => { + $btn.text(__('Running Command...')); + return frm.execute_action('Run').finally(() => $btn.text(__('Run'))); + }); + + const bench = localStorage.getItem('bench_shell_bench'); + const command = localStorage.getItem('bench_shell_command'); + + if (!bench || !command) { + return; + } + + frm.set_value('bench', bench); + frm.set_value('command', command); + + ['output', 'traceback', 'directory'].forEach((f) => frm.set_value(f, null)); + ['returncode', 'duration'].forEach((f) => frm.set_value(f, 0)); + + localStorage.removeItem('bench_shell_bench'); + localStorage.removeItem('bench_shell_command'); + }, +}); diff --git a/jcloud/jcloud/pagetype/bench_shell/bench_shell.json b/jcloud/jcloud/pagetype/bench_shell/bench_shell.json new file mode 100644 index 0000000..750bbf1 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_shell/bench_shell.json @@ -0,0 +1,151 @@ +{ + "actions": [ + { + "action": "/app/bench-shell-log", + "action_type": "Route", + "label": "Bench Shell Logs" + }, + { + "action": "jcloud.jcloud.pagetype.bench_shell.bench_shell.run_command", + "action_type": "Server Action", + "hidden": 1, + "label": "Run" + } + ], + "creation": "2024-03-21 10:17:17.616121", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "section_break_grqb", + "bench", + "column_break_lqmy", + "save_output", + "section_break_vhzg", + "command", + "subdir", + "output_section", + "output", + "traceback", + "returncode", + "meta_section", + "duration", + "column_break_gvty", + "directory" + ], + "fields": [ + { + "fieldname": "bench", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Bench", + "link_filters": "[\n [\n {\n \"fieldname\": \"bench\",\n \"field_option\": \"Bench\"\n },\n \"status\",\n \"=\",\n \"Active\"\n ]\n]", + "options": "Bench" + }, + { + "description": "Note: this is meant to be used for short running, read-only commands like ls -lAh, or supervisorctl status, etc", + "fieldname": "command", + "fieldtype": "Code", + "label": "Command" + }, + { + "default": "0", + "description": "If checked output will be saved in the Bench Shell Log.\n
\nDo not save the output unless you need it for future reference.", + "fieldname": "save_output", + "fieldtype": "Check", + "label": "Save Output" + }, + { + "fieldname": "section_break_vhzg", + "fieldtype": "Section Break" + }, + { + "description": "Command is run in the bench root directory. To change the directory use Subdir for example: apps, sites, etc", + "fieldname": "subdir", + "fieldtype": "Data", + "label": "Subdir" + }, + { + "fieldname": "output_section", + "fieldtype": "Section Break", + "label": "Output", + "read_only": 1 + }, + { + "depends_on": "eval:pg.output", + "fieldname": "output", + "fieldtype": "Code", + "label": "Output", + "read_only": 1 + }, + { + "depends_on": "eval:pg.output || pg.traceback || pg.directory", + "fieldname": "returncode", + "fieldtype": "Int", + "label": "Return Code", + "read_only": 1 + }, + { + "depends_on": "eval:pg.traceback", + "fieldname": "traceback", + "fieldtype": "Code", + "label": "Traceback", + "read_only": 1 + }, + { + "fieldname": "meta_section", + "fieldtype": "Section Break", + "label": "Meta" + }, + { + "depends_on": "eval:pg.output || pg.traceback || pg.directory", + "fieldname": "duration", + "fieldtype": "Float", + "label": "Duration (seconds)", + "read_only": 1 + }, + { + "depends_on": "eval:typeof pg.directory", + "fieldname": "directory", + "fieldtype": "Data", + "label": "Directory", + "read_only": 1 + }, + { + "fieldname": "column_break_gvty", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_grqb", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_lqmy", + "fieldtype": "Column Break" + } + ], + "hide_toolbar": 1, + "index_web_pages_for_search": 1, + "issingle": 1, + "links": [], + "modified": "2024-03-22 12:26:17.147623", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Shell", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "print": 1, + "read": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench_shell/bench_shell.py b/jcloud/jcloud/pagetype/bench_shell/bench_shell.py new file mode 100644 index 0000000..2ab6a37 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_shell/bench_shell.py @@ -0,0 +1,62 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import json +from typing import TYPE_CHECKING + +import jingrow +from jingrow.model.document import Document + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.bench.bench import Bench + + +class BenchShell(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + bench: DF.Link | None + command: DF.Code | None + directory: DF.Data | None + duration: DF.Float + output: DF.Code | None + returncode: DF.Int + save_output: DF.Check + subdir: DF.Data | None + traceback: DF.Code | None + # end: auto-generated types + + def run_command(self): + jingrow.only_for("System Manager") + + bench: "Bench" = jingrow.get_pg("Bench", self.bench) + try: + result = bench.docker_execute( + self.command, + self.subdir, + self.save_output, + ) + except Exception: + self.save_output = False + self.output = None + self.traceback = jingrow.get_traceback() + return + + self.output = result.get("output") + self.directory = result.get("directory") + self.traceback = result.get("traceback") + self.returncode = result.get("returncode") + self.duration = result.get("duration") + jingrow.db.commit() + + +@jingrow.whitelist() +def run_command(pg): + bench_shell: "BenchShell" = jingrow.get_pg(json.loads(pg)) + bench_shell.run_command() + return bench_shell.as_dict() diff --git a/jcloud/jcloud/pagetype/bench_shell/test_bench_shell.py b/jcloud/jcloud/pagetype/bench_shell/test_bench_shell.py new file mode 100644 index 0000000..32b169b --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_shell/test_bench_shell.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestBenchShell(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/bench_shell_log/__init__.py b/jcloud/jcloud/pagetype/bench_shell_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_shell_log/bench_shell_log.js b/jcloud/jcloud/pagetype/bench_shell_log/bench_shell_log.js new file mode 100644 index 0000000..36c475b --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_shell_log/bench_shell_log.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Bench Shell Log", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/bench_shell_log/bench_shell_log.json b/jcloud/jcloud/pagetype/bench_shell_log/bench_shell_log.json new file mode 100644 index 0000000..a9851ef --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_shell_log/bench_shell_log.json @@ -0,0 +1,156 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-03-19 13:34:04.955886", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "command", + "bench", + "column_break_neym", + "status", + "args_section", + "cmd", + "subdir", + "output_section", + "output", + "traceback", + "returncode", + "meta_section", + "start", + "duration", + "column_break_irvb", + "end", + "directory" + ], + "fields": [ + { + "fieldname": "command", + "fieldtype": "Section Break" + }, + { + "fieldname": "bench", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Bench", + "options": "Bench", + "read_only": 1 + }, + { + "fieldname": "column_break_neym", + "fieldtype": "Column Break" + }, + { + "fieldname": "status", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "read_only": 1 + }, + { + "fieldname": "args_section", + "fieldtype": "Section Break", + "label": "Arguments" + }, + { + "fieldname": "directory", + "fieldtype": "Data", + "label": "Directory", + "read_only": 1 + }, + { + "fieldname": "output_section", + "fieldtype": "Section Break", + "label": "Output" + }, + { + "depends_on": "eval:pg.output", + "fieldname": "output", + "fieldtype": "Code", + "label": "Output", + "read_only": 1 + }, + { + "fieldname": "meta_section", + "fieldtype": "Section Break", + "label": "Meta" + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Float", + "label": "Duration (seconds)", + "read_only": 1 + }, + { + "fieldname": "column_break_irvb", + "fieldtype": "Column Break" + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "fieldname": "returncode", + "fieldtype": "Int", + "label": "Returncode", + "read_only": 1 + }, + { + "fieldname": "cmd", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Command", + "read_only": 1 + }, + { + "depends_on": "eval:pg.traceback", + "fieldname": "traceback", + "fieldtype": "Code", + "label": "Traceback", + "read_only": 1 + }, + { + "depends_on": "eval:pg.subdir", + "fieldname": "subdir", + "fieldtype": "Data", + "label": "Subdir", + "read_only": 1 + } + ], + "in_create": 1, + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-03-21 11:44:01.218664", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Shell Log", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "bench" +} diff --git a/jcloud/jcloud/pagetype/bench_shell_log/bench_shell_log.py b/jcloud/jcloud/pagetype/bench_shell_log/bench_shell_log.py new file mode 100644 index 0000000..077aa7e --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_shell_log/bench_shell_log.py @@ -0,0 +1,68 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +from datetime import datetime +from typing import Optional, TypedDict + +import jingrow +from jingrow.model.document import Document + +ExecuteResult = TypedDict( + "ExecuteResult", + { + "command": str, + "status": str, + "start": str, + "end": str, + "duration": float, + "output": str, + "directory": Optional[str], + "traceback": Optional[str], + "returncode": Optional[int], + }, +) + + +class BenchShellLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + bench: DF.Link | None + cmd: DF.Code | None + directory: DF.Data | None + duration: DF.Float + end: DF.Datetime | None + output: DF.Code | None + returncode: DF.Int + start: DF.Datetime | None + status: DF.Data | None + subdir: DF.Data | None + traceback: DF.Code | None + # end: auto-generated types + + +def create_bench_shell_log( + res: "ExecuteResult", + bench: str, + cmd: str, + subdir: Optional[str], + save_output: bool, +) -> None: + pg_dict = { + "pagetype": "Bench Shell Log", + "cmd": cmd, + "bench": bench, + "subdir": subdir, + **res, + } + pg_dict["start"] = datetime.fromisoformat(res["start"]) + pg_dict["end"] = datetime.fromisoformat(res["end"]) + if not save_output: + del pg_dict["output"] + jingrow.get_pg(pg_dict).insert() + jingrow.db.commit() diff --git a/jcloud/jcloud/pagetype/bench_shell_log/test_bench_shell_log.py b/jcloud/jcloud/pagetype/bench_shell_log/test_bench_shell_log.py new file mode 100644 index 0000000..df2aacd --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_shell_log/test_bench_shell_log.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestBenchShellLog(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/bench_site_update/__init__.py b/jcloud/jcloud/pagetype/bench_site_update/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_site_update/bench_site_update.js b/jcloud/jcloud/pagetype/bench_site_update/bench_site_update.js new file mode 100644 index 0000000..50706cd --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_site_update/bench_site_update.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Bench Site Update", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/bench_site_update/bench_site_update.json b/jcloud/jcloud/pagetype/bench_site_update/bench_site_update.json new file mode 100644 index 0000000..71e659c --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_site_update/bench_site_update.json @@ -0,0 +1,82 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-06-16 18:26:31.546918", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "site_update", + "status", + "source_candidate", + "server", + "skip_failing_patches", + "skip_backups" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "in_preview": 1, + "label": "Site", + "options": "Site", + "reqd": 1 + }, + { + "fieldname": "site_update", + "fieldtype": "Link", + "in_list_view": 1, + "in_preview": 1, + "label": "Site Update", + "options": "Site Update" + }, + { + "fetch_from": "site_update.status", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_preview": 1, + "label": "Status", + "options": "Pending\nRunning\nFailure\nRecovered\nSuccess\nFatal" + }, + { + "fieldname": "server", + "fieldtype": "Link", + "label": "Server", + "options": "Server" + }, + { + "default": "0", + "fieldname": "skip_failing_patches", + "fieldtype": "Check", + "label": "Skip failing patches" + }, + { + "default": "0", + "fieldname": "skip_backups", + "fieldtype": "Check", + "label": "Skip backups" + }, + { + "fieldname": "source_candidate", + "fieldtype": "Link", + "label": "Source Candidate", + "options": "Deploy Candidate" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-08-18 13:58:45.533223", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Site Update", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench_site_update/bench_site_update.py b/jcloud/jcloud/pagetype/bench_site_update/bench_site_update.py new file mode 100644 index 0000000..e842c42 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_site_update/bench_site_update.py @@ -0,0 +1,29 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class BenchSiteUpdate(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + server: DF.Link | None + site: DF.Link + site_update: DF.Link | None + skip_backups: DF.Check + skip_failing_patches: DF.Check + source_candidate: DF.Link | None + status: DF.Literal["Pending", "Running", "Failure", "Recovered", "Success", "Fatal"] + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/bench_site_update/test_bench_site_update.py b/jcloud/jcloud/pagetype/bench_site_update/test_bench_site_update.py new file mode 100644 index 0000000..1183c79 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_site_update/test_bench_site_update.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestBenchSiteUpdate(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/bench_update/__init__.py b/jcloud/jcloud/pagetype/bench_update/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_update/bench_update.js b/jcloud/jcloud/pagetype/bench_update/bench_update.js new file mode 100644 index 0000000..d88d189 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_update/bench_update.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Bench Update", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/bench_update/bench_update.json b/jcloud/jcloud/pagetype/bench_update/bench_update.json new file mode 100644 index 0000000..4cc1e1e --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_update/bench_update.json @@ -0,0 +1,107 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-06-16 18:29:22.245007", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "group", + "column_break_wff5", + "candidate", + "in_place_update_section", + "is_inplace_update", + "column_break_bdov", + "bench", + "section_break_nfd1", + "apps", + "sites" + ], + "fields": [ + { + "fieldname": "group", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Group", + "options": "Release Group", + "reqd": 1 + }, + { + "fieldname": "sites", + "fieldtype": "Table", + "label": "Sites", + "options": "Bench Site Update" + }, + { + "fieldname": "column_break_wff5", + "fieldtype": "Column Break" + }, + { + "fieldname": "candidate", + "fieldtype": "Link", + "label": "Candidate", + "options": "Deploy Candidate", + "read_only": 1 + }, + { + "fieldname": "section_break_nfd1", + "fieldtype": "Section Break" + }, + { + "fieldname": "apps", + "fieldtype": "Table", + "label": "Apps", + "options": "Bench Update App" + }, + { + "fieldname": "in_place_update_section", + "fieldtype": "Section Break", + "label": "In Place Update" + }, + { + "default": "0", + "depends_on": "eval: pg.is_inplace_update", + "description": "A new build is not run in this case, instead container is updated using docker container commit which creates a new image.", + "fieldname": "is_inplace_update", + "fieldtype": "Check", + "label": "Is In Place Update" + }, + { + "fieldname": "column_break_bdov", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval: pg.bench", + "description": "Bench that will be updated in place.", + "fieldname": "bench", + "fieldtype": "Link", + "label": "Bench", + "options": "Bench" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-08-23 12:20:58.651546", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Update", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench_update/bench_update.py b/jcloud/jcloud/pagetype/bench_update/bench_update.py new file mode 100644 index 0000000..9c27fa5 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_update/bench_update.py @@ -0,0 +1,189 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import jingrow +from jingrow.model.document import Document + +from jcloud.utils import get_current_team + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.bench.bench import Bench + + +class BenchUpdate(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.bench_site_update.bench_site_update import BenchSiteUpdate + from jcloud.jcloud.pagetype.bench_update_app.bench_update_app import BenchUpdateApp + from jcloud.jcloud.pagetype.release_group.release_group import ReleaseGroup + + apps: DF.Table[BenchUpdateApp] + bench: DF.Link | None + candidate: DF.Link | None + group: DF.Link + is_inplace_update: DF.Check + sites: DF.Table[BenchSiteUpdate] + # end: auto-generated types + + def validate(self): + if not self.is_new(): + return + + self.validate_pending_updates() + self.validate_pending_site_updates() + + if self.is_inplace_update: + self.validate_inplace_update() + + def before_save(self): + if not self.is_inplace_update: + return + + site = self.sites[0].site # validation should throw if no sites + self.bench = jingrow.get_value("Site", site, "bench") + + def validate_pending_updates(self): + if jingrow.get_pg("Release Group", self.group).deploy_in_progress: + jingrow.throw("A deploy for this bench is already in progress", jingrow.ValidationError) + + def validate_pending_site_updates(self): + for site in self.sites: + if jingrow.db.exists( + "Site Update", + {"site": site.site, "status": ("in", ("Pending", "Running"))}, + ): + jingrow.throw("An update is already pending for this site", jingrow.ValidationError) + + def validate_inplace_update(self): + sites = [s.site for s in self.sites if s.site] + if len(sites) == 0: + jingrow.throw( + "In place update cannot be run without a site being selected", + ) + + benches = jingrow.get_all( + "Site", + fields=["bench"], + filters={"name": ["in", sites]}, + pluck="bench", + ) + + if len(set(benches)) > 1: + jingrow.throw( + "In place update can be used only to update single benches", + jingrow.ValidationError, + ) + + def deploy(self, run_will_fail_check=False) -> str: + rg: ReleaseGroup = jingrow.get_pg("Release Group", self.group) + candidate = rg.create_deploy_candidate(self.apps, run_will_fail_check) + candidate.schedule_build_and_deploy() + + self.candidate = candidate.name + self.save() + + if not isinstance(candidate.name, str): + raise Exception( + f"Invalid name found for deploy candidate '{candidate.name}' of type {type(candidate.name)}" + ) + + return candidate.name + + def update_inplace(self) -> str: + if not self.is_inplace_update: + raise Exception("In place update flag is not set, aborting in place update.") + + bench: "Bench" = jingrow.get_pg("Bench", self.bench) + sites = [s.site for s in self.sites] + + return bench.update_inplace(self.apps, sites) + + def update_sites_on_server(self, bench, server): + # This method gets called multiple times concurrently when a new candidate is deployed + # Avoid saving the pg to avoid TimestampMismatchError + if jingrow.get_value("Bench", bench, "status") != "Active": + return + + for row in self.sites: + if row.server != server: + continue + + # Don't try to update if the site is already on another bench + # It already could be on newest bench and Site Update couldn't be scheduled + # In any case our job was to move site to a newer than this, which is already done + current_site_bench = jingrow.get_value("Site", row.site, "bench") + if row.source_candidate != jingrow.get_value("Bench", current_site_bench, "candidate"): + jingrow.db.set_value("Bench Site Update", row.name, "status", "Success") + jingrow.db.commit() + continue + + if row.status == "Pending" and not row.site_update: + try: + if jingrow.get_all( + "Site Update", + {"site": row.site, "status": ("in", ("Pending", "Running", "Failure"))}, + ignore_ifnull=True, + limit=1, + ): + continue + site_update = jingrow.get_pg("Site", row.site).schedule_update( + skip_failing_patches=row.skip_failing_patches, skip_backups=row.skip_backups + ) + jingrow.db.set_value("Bench Site Update", row.name, "site_update", site_update) + jingrow.db.commit() + except Exception: + # Rollback the failed attempt and set status to Failure + # So, we don't try again + # TODO: Add Notifications + jingrow.db.rollback() + jingrow.db.set_value("Bench Site Update", row.name, "status", "Failure") + traceback = jingrow.get_traceback(with_context=True) + comment = f"Failed to schedule update for {row.site}

{traceback}
" + self.add_comment(text=comment) + jingrow.db.commit() + + +def get_bench_update( + name: str, + apps: list, + sites: str | list[str] | None = None, + is_inplace_update: bool = False, +) -> BenchUpdate: + if sites is None: + sites = [] + + current_team = get_current_team() + rg_team = jingrow.db.get_value("Release Group", name, "team") + + if rg_team != current_team: + jingrow.throw("Bench can only be deployed by the bench owner", exc=jingrow.PermissionError) + + bench_update: "BenchUpdate" = jingrow.get_pg( + { + "pagetype": "Bench Update", + "group": name, + "apps": apps, + "sites": [ + { + "site": site["name"], + "server": site["server"], + "skip_failing_patches": site["skip_failing_patches"], + "skip_backups": site["skip_backups"], + "source_candidate": jingrow.get_value("Bench", site["bench"], "candidate"), + } + for site in sites + ], + "is_inplace_update": is_inplace_update, + } + ).insert(ignore_permissions=True) + return bench_update diff --git a/jcloud/jcloud/pagetype/bench_update/test_bench_update.py b/jcloud/jcloud/pagetype/bench_update/test_bench_update.py new file mode 100644 index 0000000..e656fdf --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_update/test_bench_update.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestBenchUpdate(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/bench_update_app/__init__.py b/jcloud/jcloud/pagetype/bench_update_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_update_app/bench_update_app.json b/jcloud/jcloud/pagetype/bench_update_app/bench_update_app.json new file mode 100644 index 0000000..7927276 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_update_app/bench_update_app.json @@ -0,0 +1,71 @@ +{ + "actions": [], + "creation": "2023-10-05 11:21:42.353413", + "default_view": "List", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "app", + "source", + "column_break_3", + "release", + "hash" + ], + "fields": [ + { + "fetch_from": "release.app", + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App", + "options": "App", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "release.source", + "fieldname": "source", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Source", + "options": "App Source", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "release", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Release", + "options": "App Release", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "release.hash", + "fieldname": "hash", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Hash", + "read_only": 1, + "reqd": 1 + } + ], + "istable": 1, + "links": [], + "modified": "2023-10-05 15:38:24.315931", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Update App", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench_update_app/bench_update_app.py b/jcloud/jcloud/pagetype/bench_update_app/bench_update_app.py new file mode 100644 index 0000000..0c47c59 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_update_app/bench_update_app.py @@ -0,0 +1,26 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class BenchUpdateApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + hash: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + release: DF.Link + source: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/bench_variable/__init__.py b/jcloud/jcloud/pagetype/bench_variable/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/bench_variable/bench_variable.json b/jcloud/jcloud/pagetype/bench_variable/bench_variable.json new file mode 100644 index 0000000..3c952e5 --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_variable/bench_variable.json @@ -0,0 +1,42 @@ +{ + "actions": [], + "creation": "2023-06-13 16:17:11.965308", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "key", + "value" + ], + "fields": [ + { + "fieldname": "key", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Key", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "value", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Value", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-06-13 16:17:11.965308", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Variable", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/bench_variable/bench_variable.py b/jcloud/jcloud/pagetype/bench_variable/bench_variable.py new file mode 100644 index 0000000..b882a1d --- /dev/null +++ b/jcloud/jcloud/pagetype/bench_variable/bench_variable.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class BenchVariable(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + key: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + value: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/blocked_domain/__init__.py b/jcloud/jcloud/pagetype/blocked_domain/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/blocked_domain/blocked_domain.js b/jcloud/jcloud/pagetype/blocked_domain/blocked_domain.js new file mode 100644 index 0000000..5370d5d --- /dev/null +++ b/jcloud/jcloud/pagetype/blocked_domain/blocked_domain.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Blocked Domain', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/blocked_domain/blocked_domain.json b/jcloud/jcloud/pagetype/blocked_domain/blocked_domain.json new file mode 100644 index 0000000..d81454d --- /dev/null +++ b/jcloud/jcloud/pagetype/blocked_domain/blocked_domain.json @@ -0,0 +1,53 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "Prompt", + "creation": "2021-09-21 16:46:44.313395", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "root_domain", + "block_for_all" + ], + "fields": [ + { + "fieldname": "root_domain", + "fieldtype": "Link", + "label": "Root Domain", + "options": "Root Domain" + }, + { + "default": "0", + "fieldname": "block_for_all", + "fieldtype": "Check", + "label": "Block for All Root Domain" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-01-25 10:59:42.568228", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Blocked Domain", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/blocked_domain/blocked_domain.py b/jcloud/jcloud/pagetype/blocked_domain/blocked_domain.py new file mode 100644 index 0000000..b1ff06f --- /dev/null +++ b/jcloud/jcloud/pagetype/blocked_domain/blocked_domain.py @@ -0,0 +1,21 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class BlockedDomain(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + block_for_all: DF.Check + root_domain: DF.Link | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/blocked_domain/test_blocked_domain.py b/jcloud/jcloud/pagetype/blocked_domain/test_blocked_domain.py new file mode 100644 index 0000000..4694e0f --- /dev/null +++ b/jcloud/jcloud/pagetype/blocked_domain/test_blocked_domain.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestBlockedDomain(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/build_cache_shell/__init__.py b/jcloud/jcloud/pagetype/build_cache_shell/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/build_cache_shell/build_cache_shell.js b/jcloud/jcloud/pagetype/build_cache_shell/build_cache_shell.js new file mode 100644 index 0000000..285bcac --- /dev/null +++ b/jcloud/jcloud/pagetype/build_cache_shell/build_cache_shell.js @@ -0,0 +1,35 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Build Cache Shell', { + onload(frm) { + jingrow.ui.keys.add_shortcut({ + shortcut: 'shift+enter', + action: () => frm.page.btn_primary.trigger('click'), + page: frm.page, + description: __('Run Build Cache Shell command'), + ignore_inputs: true, + }); + }, + + refresh(frm) { + frm.disable_save(); + frm.page.set_primary_action(__('Run'), async ($btn) => { + $btn.text(__('Running Command...')); + return frm.execute_action('Run').finally(() => $btn.text(__('Run'))); + }); + + const command = localStorage.getItem('build_cache_shell_command'); + if (!command) { + return; + } + + frm.set_value('bench', bench); + frm.set_value('command', command); + + ['output', 'cwd', 'image_tag'].forEach((f) => frm.set_value(f, null)); + frm.set_value('returncode', 0); + + localStorage.removeItem('build_cache_shell_command'); + }, +}); diff --git a/jcloud/jcloud/pagetype/build_cache_shell/build_cache_shell.json b/jcloud/jcloud/pagetype/build_cache_shell/build_cache_shell.json new file mode 100644 index 0000000..d9d45ef --- /dev/null +++ b/jcloud/jcloud/pagetype/build_cache_shell/build_cache_shell.json @@ -0,0 +1,118 @@ +{ + "actions": [ + { + "action": "jcloud.jcloud.pagetype.build_cache_shell.build_cache_shell.run_command", + "action_type": "Server Action", + "hidden": 1, + "label": "Run" + } + ], + "allow_rename": 1, + "creation": "2024-04-18 11:55:45.595311", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "command", + "cache_target", + "build_server", + "output_section", + "output", + "meta_section", + "cwd", + "image_tag", + "column_break_zdbf", + "returncode" + ], + "fields": [ + { + "default": "ls -lAh", + "fieldname": "command", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Command", + "reqd": 1 + }, + { + "default": "/home/jingrow/.cache", + "description": "Sets the mount=type=cache target and the WORKDIR of where the command is run.", + "fieldname": "cache_target", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Cache Target", + "reqd": 1 + }, + { + "description": "Select which build server to run the command on.", + "fieldname": "build_server", + "fieldtype": "Link", + "label": "Build Server", + "options": "Server", + "reqd": 1 + }, + { + "fieldname": "output_section", + "fieldtype": "Section Break", + "label": "Output" + }, + { + "depends_on": "eval:pg.output", + "fieldname": "output", + "fieldtype": "Code", + "label": "Output", + "read_only": 1 + }, + { + "fieldname": "meta_section", + "fieldtype": "Section Break", + "label": "Meta" + }, + { + "depends_on": "eval:pg.cwd", + "fieldname": "cwd", + "fieldtype": "Data", + "label": "CWD", + "read_only": 1 + }, + { + "fieldname": "image_tag", + "fieldtype": "Data", + "label": "Image Tag", + "read_only": 1 + }, + { + "fieldname": "column_break_zdbf", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval:pg.output", + "fieldname": "returncode", + "fieldtype": "Int", + "label": "Return Code", + "read_only": 1 + } + ], + "hide_toolbar": 1, + "index_web_pages_for_search": 1, + "issingle": 1, + "links": [], + "modified": "2024-06-11 10:37:05.598121", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Build Cache Shell", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "print": 1, + "read": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/build_cache_shell/build_cache_shell.py b/jcloud/jcloud/pagetype/build_cache_shell/build_cache_shell.py new file mode 100644 index 0000000..b03ddfb --- /dev/null +++ b/jcloud/jcloud/pagetype/build_cache_shell/build_cache_shell.py @@ -0,0 +1,53 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import json + +import jingrow +from jingrow.model.document import Document + +from jcloud.agent import Agent + + +class BuildCacheShell(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + build_server: DF.Link + cache_target: DF.Data + command: DF.Code + cwd: DF.Data | None + image_tag: DF.Data | None + output: DF.Code | None + returncode: DF.Int + # end: auto-generated types + + def run_command(self): + jingrow.only_for("System Manager") + result = self._run_command() or {} + self.output = result.get("output", "# no-output") + self.cwd = result.get("cwd") + self.image_tag = result.get("image_tag") + self.returncode = result.get("returncode") + jingrow.db.commit() + + def _run_command(self): + if not self.build_server: + jingrow.throw("Please select a Build Server.") + + return Agent(self.build_server).run_command_in_docker_cache( + self.command, + self.cache_target, + ) + + +@jingrow.whitelist() +def run_command(pg): + bench_shell: "BuildCacheShell" = jingrow.get_pg(json.loads(pg)) + bench_shell.run_command() + return bench_shell.as_dict() diff --git a/jcloud/jcloud/pagetype/build_cache_shell/test_build_cache_shell.py b/jcloud/jcloud/pagetype/build_cache_shell/test_build_cache_shell.py new file mode 100644 index 0000000..5f08ffb --- /dev/null +++ b/jcloud/jcloud/pagetype/build_cache_shell/test_build_cache_shell.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestBuildCacheShell(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/certificate_authority/__init__.py b/jcloud/jcloud/pagetype/certificate_authority/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/certificate_authority/certificate_authority.js b/jcloud/jcloud/pagetype/certificate_authority/certificate_authority.js new file mode 100644 index 0000000..cfc7135 --- /dev/null +++ b/jcloud/jcloud/pagetype/certificate_authority/certificate_authority.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Certificate Authority', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/certificate_authority/certificate_authority.json b/jcloud/jcloud/pagetype/certificate_authority/certificate_authority.json new file mode 100644 index 0000000..e2f1f99 --- /dev/null +++ b/jcloud/jcloud/pagetype/certificate_authority/certificate_authority.json @@ -0,0 +1,158 @@ +{ + "actions": [], + "autoname": "field:common_name", + "creation": "2020-09-17 10:43:35.055591", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "common_name", + "organization", + "organizational_unit", + "column_break_4", + "validity_days", + "rsa_key_size", + "is_root_ca", + "parent_authority", + "section_break_9", + "directory", + "section_break_11", + "issued_on", + "column_break_13", + "expires_on", + "section_break_15", + "decoded_certificate" + ], + "fields": [ + { + "fieldname": "common_name", + "fieldtype": "Data", + "label": "Common Name", + "reqd": 1, + "set_only_once": 1, + "unique": 1 + }, + { + "default": "Jingrow Technologies", + "fieldname": "organization", + "fieldtype": "Data", + "label": "Organization", + "reqd": 1, + "set_only_once": 1 + }, + { + "default": "7300", + "fieldname": "validity_days", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Validity (Days)", + "reqd": 1, + "set_only_once": 1 + }, + { + "default": "4096", + "fieldname": "rsa_key_size", + "fieldtype": "Select", + "label": "RSA Key Size", + "options": "2048\n3072\n4096", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "directory", + "fieldtype": "Data", + "label": "Directory", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "issued_on", + "fieldtype": "Datetime", + "label": "Issued On", + "read_only": 1 + }, + { + "fieldname": "expires_on", + "fieldtype": "Datetime", + "label": "Expires On", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "is_root_ca", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Is Root CA", + "set_only_once": 1 + }, + { + "fieldname": "decoded_certificate", + "fieldtype": "Code", + "label": "Decoded Certificate", + "read_only": 1 + }, + { + "default": "Jingrow", + "fieldname": "organizational_unit", + "fieldtype": "Data", + "label": "Organizational Unit", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "section_break_9", + "fieldtype": "Section Break" + }, + { + "depends_on": "eval: !pg.is_root_ca", + "fieldname": "parent_authority", + "fieldtype": "Link", + "label": "Parent Authority", + "mandatory_depends_on": "eval: !pg.is_root_ca", + "options": "Certificate Authority", + "read_only_depends_on": "eval: pg.is_root_ca", + "set_only_once": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_11", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_13", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_15", + "fieldtype": "Section Break", + "hide_border": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2020-09-17 19:30:23.565306", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Certificate Authority", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/certificate_authority/certificate_authority.py b/jcloud/jcloud/pagetype/certificate_authority/certificate_authority.py new file mode 100644 index 0000000..499dfb3 --- /dev/null +++ b/jcloud/jcloud/pagetype/certificate_authority/certificate_authority.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import os +import secrets +import shlex +import shutil +import subprocess +from datetime import datetime +from pathlib import Path + +import jingrow +import OpenSSL +from jingrow.model.document import Document + +from jcloud.utils import developer_mode_only + + +class CertificateAuthority(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + common_name: DF.Data + decoded_certificate: DF.Code | None + directory: DF.Data + expires_on: DF.Datetime | None + is_root_ca: DF.Check + issued_on: DF.Datetime | None + organization: DF.Data + organizational_unit: DF.Data + parent_authority: DF.Link | None + rsa_key_size: DF.Literal["2048", "3072", "4096"] + validity_days: DF.Int + # end: auto-generated types + + def onload(self): + developer_mode_only() + + def validate(self): + developer_mode_only() + self.setup_directory() + self.generate_private_key() + if self.is_root_ca: + self.generate_root_certificate() + else: + self.generate_certificate_signing_request() + self.sign_certificate_signing_request() + self.generate_chain_certificate() + self.extract_certificate_details() + + def setup_directory(self): + if os.path.exists(self.directory): + shutil.rmtree(self.directory) + os.mkdir(self.directory) + + os.mkdir(self.new_certificates_directory) + Path(self.database_file).touch() + with open(self.serial_file, "w") as f: + f.write(f"{secrets.randbits(16*8):0{32}x}\n") + + template = "root.conf" if self.is_root_ca else "intermediate.conf" + template = f"jcloud/jcloud/pagetype/certificate_authority/{template}" + with open(self.openssl_config_file, "w") as f: + openssl_config = jingrow.render_template(template, {"pg": self}, is_path=True) + f.write(openssl_config) + + def run(self, command): + return subprocess.check_output(shlex.split(command)).decode() + + def generate_private_key(self): + self.run(f"openssl genrsa -out {self.private_key_file} {self.rsa_key_size}") + os.chmod(self.private_key_file, 400) + + def generate_root_certificate(self): + self.run( + f"openssl req -new -x509 -days {self.validity_days} -config" + f" {self.openssl_config_file} -key {self.private_key_file} -out" + f" {self.certificate_file}" + ) + os.chmod(self.certificate_file, 444) + + def generate_certificate_signing_request(self): + self.run( + f"openssl req -new -config {self.openssl_config_file} -key" + f" {self.private_key_file} -out {self.certificate_signing_request_file}" + ) + os.chmod(self.certificate_signing_request_file, 444) + + def sign_certificate_signing_request(self): + parent = jingrow.get_pg(self.pagetype, self.parent_authority) + self.run( + f"openssl ca -batch -notext -days {self.validity_days} -config" + f" {parent.openssl_config_file} -extensions v3_intermediate_ca -in" + f" {self.certificate_signing_request_file} -out {self.certificate_file}" + ) + os.chmod(self.certificate_file, 444) + + def generate_chain_certificate(self): + parent = jingrow.get_pg(self.pagetype, self.parent_authority) + with open(self.certificate_file) as f: + certificate = f.read() + with open(parent.certificate_file) as f: + certificate += f.read() + with open(self.certificate_chain_file, "w") as f: + f.write(certificate) + os.chmod(self.certificate_chain_file, 444) + + def extract_certificate_details(self): + with open(self.certificate_file) as f: + certificate = f.read() + + x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate) + self.decoded_certificate = OpenSSL.crypto.dump_certificate( + OpenSSL.crypto.FILETYPE_TEXT, x509 + ).decode() + + self.issued_on = datetime.strptime(x509.get_notBefore().decode(), "%Y%m%d%H%M%SZ") + self.expires_on = datetime.strptime(x509.get_notAfter().decode(), "%Y%m%d%H%M%SZ") + + def on_trash(self): + if os.path.exists(self.directory): + shutil.rmtree(self.directory) + children = jingrow.get_all(self.pagetype, {"parent_authority": self.name}) + for child in children: + jingrow.delete_pg(self.pagetype, child.name) + + @property + def certificate_chain_file(self): + return os.path.join(self.directory, "chain.pem") + + @property + def certificate_file(self): + return os.path.join(self.directory, "cert.pem") + + @property + def certificate_signing_request_file(self): + return os.path.join(self.directory, "csr.pem") + + @property + def database_file(self): + return os.path.join(self.directory, "index.txt") + + @property + def new_certificates_directory(self): + return os.path.join(self.directory, "newcerts") + + @property + def openssl_config_file(self): + return os.path.join(self.directory, "openssl.conf") + + @property + def private_key_file(self): + return os.path.join(self.directory, "key.pem") + + @property + def serial_file(self): + return os.path.join(self.directory, "serial") diff --git a/jcloud/jcloud/pagetype/certificate_authority/intermediate.conf b/jcloud/jcloud/pagetype/certificate_authority/intermediate.conf new file mode 100644 index 0000000..a19762c --- /dev/null +++ b/jcloud/jcloud/pagetype/certificate_authority/intermediate.conf @@ -0,0 +1,51 @@ +# OpenSSL Intermediate CA configuration file + +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = {{ pg.directory }} + +database = {{ pg.database_file }} +new_certs_dir = {{ pg.new_certificates_directory }} +serial = {{ pg.serial_file }} + +certificate = {{ pg.certificate_file }} +private_key = {{ pg.private_key_file }} + +default_md = sha256 +default_days = 30 + +# This is to support CSRs that include a Subject Alternative Name +# SANs aren't used by OpenSSL by default +# Chrome has dropped support for certificates without SANs +# This has some downsides as well, but, we trust all the CSR's sent our way +copy_extensions = copy +unique_subject = no + +policy = policy_default + +[ policy_default ] +commonName = supplied +organizationName = supplied +organizationalUnitName = supplied + +[ req ] +distinguished_name = req_distinguished_name +prompt = no + +[ req_distinguished_name ] +CN = {{ pg.name }} +O = {{ pg.organization }} +OU = {{ pg.organizational_unit }} + +[ server_cert ] +# The Server certificate can't act as a CA +basicConstraints = CA:FALSE +# The Server certificate can't be used to sign any certificates +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer:always +nsCertType = server +nsComment = "OpenSSL Generated Server Certificate" diff --git a/jcloud/jcloud/pagetype/certificate_authority/root.conf b/jcloud/jcloud/pagetype/certificate_authority/root.conf new file mode 100644 index 0000000..8804bea --- /dev/null +++ b/jcloud/jcloud/pagetype/certificate_authority/root.conf @@ -0,0 +1,56 @@ +# OpenSSL Root CA configuration file + +# This section is only used for signing Intermediate CA certificates +[ ca ] +default_ca = CA_default + +[ CA_default ] +dir = {{ pg.directory }} + +database = {{ pg.database_file }} +new_certs_dir = {{ pg.new_certificates_directory }} +serial = {{ pg.serial_file }} + +certificate = {{ pg.certificate_file }} +private_key = {{ pg.private_key_file }} + +default_md = sha256 + +# Allow reissuing certificates to same subject without having to edit the database +unique_subject = no + +# Expect Intermediate CA CSR to provide Common Name and Organization Name +policy = policy_default + +[ policy_default ] +commonName = supplied +organizationName = supplied +organizationalUnitName = supplied + +[ req ] +distinguished_name = req_distinguished_name +# Don't ask for any user inputs, use values from req_distinguished_name section instead +prompt = no +x509_extensions = v3_ca + +[ req_distinguished_name ] +CN = {{ pg.common_name }} +O = {{ pg.organization }} +OU = {{ pg.organizational_unit }} + +[ v3_ca ] +# Allow Root CA to act as a CA +basicConstraints = critical, CA:true +# Allow Root CA to sign certificates +keyUsage = critical, digitalSignature, keyCertSign +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer + +# This sections needs to be activated with `-extensions v3_intermediate_ca` from the CLI +[ v3_intermediate_ca ] +# Allow Intermediate CA to act as a CA but it cannot issue certificates to another CA +basicConstraints = critical, CA:true, pathlen:0 +# Allow Intermediate CA to sign certificates +keyUsage = critical, digitalSignature, keyCertSign +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid:always,issuer diff --git a/jcloud/jcloud/pagetype/certificate_authority/test_certificate_authority.py b/jcloud/jcloud/pagetype/certificate_authority/test_certificate_authority.py new file mode 100644 index 0000000..7cbd816 --- /dev/null +++ b/jcloud/jcloud/pagetype/certificate_authority/test_certificate_authority.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestCertificateAuthority(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/child_team_member/__init__.py b/jcloud/jcloud/pagetype/child_team_member/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/child_team_member/child_team_member.json b/jcloud/jcloud/pagetype/child_team_member/child_team_member.json new file mode 100644 index 0000000..f84be8b --- /dev/null +++ b/jcloud/jcloud/pagetype/child_team_member/child_team_member.json @@ -0,0 +1,39 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-04-22 10:27:26.970665", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "child_team", + "impersonate_team" + ], + "fields": [ + { + "fieldname": "child_team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Child Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "impersonate_team", + "fieldtype": "Button", + "label": "Impersonate Team" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-04-22 10:27:26.970665", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Child Team Member", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/child_team_member/child_team_member.py b/jcloud/jcloud/pagetype/child_team_member/child_team_member.py new file mode 100644 index 0000000..b21dc49 --- /dev/null +++ b/jcloud/jcloud/pagetype/child_team_member/child_team_member.py @@ -0,0 +1,23 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class ChildTeamMember(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + child_team: DF.Link + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/cloud_region/__init__.py b/jcloud/jcloud/pagetype/cloud_region/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/cloud_region/cloud_region.js b/jcloud/jcloud/pagetype/cloud_region/cloud_region.js new file mode 100644 index 0000000..617d271 --- /dev/null +++ b/jcloud/jcloud/pagetype/cloud_region/cloud_region.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Cloud Region", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/cloud_region/cloud_region.json b/jcloud/jcloud/pagetype/cloud_region/cloud_region.json new file mode 100644 index 0000000..6b6aec7 --- /dev/null +++ b/jcloud/jcloud/pagetype/cloud_region/cloud_region.json @@ -0,0 +1,55 @@ +{ + "actions": [], + "autoname": "field:region_name", + "creation": "2023-08-08 10:48:41.639117", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "region_name", + "provider" + ], + "fields": [ + { + "fieldname": "region_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Region Name", + "reqd": 1, + "unique": 1 + }, + { + "fieldname": "provider", + "fieldtype": "Select", + "label": "Provider", + "options": "AWS EC2\nOCI\nHetzner", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-09-14 12:04:26.561520", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Cloud Region", + "naming_rule": "By fieldname", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/cloud_region/cloud_region.py b/jcloud/jcloud/pagetype/cloud_region/cloud_region.py new file mode 100644 index 0000000..47b4c9f --- /dev/null +++ b/jcloud/jcloud/pagetype/cloud_region/cloud_region.py @@ -0,0 +1,21 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class CloudRegion(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + provider: DF.Literal["AWS EC2", "OCI", "Hetzner"] + region_name: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/cloud_region/test_cloud_region.py b/jcloud/jcloud/pagetype/cloud_region/test_cloud_region.py new file mode 100644 index 0000000..9c4facc --- /dev/null +++ b/jcloud/jcloud/pagetype/cloud_region/test_cloud_region.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestCloudRegion(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/cluster/__init__.py b/jcloud/jcloud/pagetype/cluster/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/cluster/cluster.js b/jcloud/jcloud/pagetype/cluster/cluster.js new file mode 100644 index 0000000..3334ddb --- /dev/null +++ b/jcloud/jcloud/pagetype/cluster/cluster.js @@ -0,0 +1,34 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Cluster', { + refresh: function (frm) { + [ + [__('Create Servers'), 'create_servers', frm.pg.status === 'Active'], + [__('Add Images'), 'add_images', frm.pg.status === 'Active'], + ].forEach(([label, method, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + frm.call(method).then((r) => frm.refresh()); + }, + __('Actions'), + ); + } + }); + if (frm.pg.vpc_id) { + if (frm.pg.cloud_provider === 'AWS EC2') { + frm.add_web_link( + `https://${frm.pg.region}.console.aws.amazon.com/vpc/home?region=${frm.pg.region}#VpcDetails:VpcId=${frm.pg.vpc_id}`, + __('Visit AWS Dashboard'), + ); + } else if (frm.pg.cloud_provider === 'OCI') { + frm.add_web_link( + `https://cloud.oracle.com/networking/vcns/${frm.pg.vpc_id}?region=${frm.pg.region}`, + __('Visit OCI Dashboard'), + ); + } + } + }, +}); diff --git a/jcloud/jcloud/pagetype/cluster/cluster.json b/jcloud/jcloud/pagetype/cluster/cluster.json new file mode 100644 index 0000000..8244426 --- /dev/null +++ b/jcloud/jcloud/pagetype/cluster/cluster.json @@ -0,0 +1,346 @@ +{ + "actions": [], + "autoname": "Prompt", + "creation": "2022-01-28 20:07:41.240327", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "title", + "description", + "column_break_2", + "status", + "public", + "beta", + "hybrid", + "column_break_fsht", + "monitoring_password", + "image", + "billing_section", + "team", + "provisioning_section", + "cloud_provider", + "region", + "column_break_5", + "ssh_key", + "availability_zone", + "aws_section", + "aws_access_key_id", + "column_break_qpia", + "aws_secret_access_key", + "oci_section", + "oci_user", + "oci_public_key", + "column_break_bpar", + "oci_tenancy", + "oci_private_key", + "networking_section", + "cidr_block", + "subnet_cidr_block", + "column_break_12", + "vpc_id", + "subnet_id", + "security_group_id", + "proxy_security_group_id", + "network_acl_id", + "route_table_id" + ], + "fields": [ + { + "fieldname": "description", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Description" + }, + { + "fieldname": "monitoring_password", + "fieldtype": "Password", + "label": "Monitoring Password", + "set_only_once": 1 + }, + { + "default": "Generic", + "fieldname": "cloud_provider", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Cloud Provider", + "options": "AWS EC2\nGeneric\nOCI\nHetzner", + "reqd": 1, + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.cloud_provider === \"AWS EC2\"", + "fieldname": "aws_access_key_id", + "fieldtype": "Data", + "label": "AWS Access Key ID", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.cloud_provider === \"AWS EC2\"", + "fieldname": "aws_secret_access_key", + "fieldtype": "Password", + "label": "AWS Secret Access Key", + "set_only_once": 1 + }, + { + "fieldname": "provisioning_section", + "fieldtype": "Section Break", + "label": "Provisioning" + }, + { + "fieldname": "column_break_5", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval:pg.cloud_provider !== \"Generic\"", + "fieldname": "region", + "fieldtype": "Link", + "label": "Region", + "mandatory_depends_on": "eval:pg.cloud_provider !== \"Generic\"", + "options": "Cloud Region", + "set_only_once": 1 + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "cidr_block", + "fieldtype": "Data", + "label": "CIDR Block", + "read_only": 1 + }, + { + "fieldname": "networking_section", + "fieldtype": "Section Break", + "label": "Networking" + }, + { + "fieldname": "column_break_12", + "fieldtype": "Column Break" + }, + { + "fieldname": "subnet_cidr_block", + "fieldtype": "Data", + "label": "Subnet CIDR Block", + "read_only": 1 + }, + { + "depends_on": "eval:pg.cloud_provider !== \"Generic\"", + "fieldname": "availability_zone", + "fieldtype": "Data", + "label": "Availability Zone", + "mandatory_depends_on": "eval:pg.cloud_provider === \"AWS EC2\"", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.cloud_provider !== \"Generic\"", + "fieldname": "ssh_key", + "fieldtype": "Link", + "label": "SSH Key", + "mandatory_depends_on": "eval:pg.cloud_provider !== \"Generic\"", + "options": "SSH Key", + "set_only_once": 1 + }, + { + "default": "0", + "description": "Show in New Site/New Bench selection", + "fieldname": "public", + "fieldtype": "Check", + "label": "Public" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title" + }, + { + "fieldname": "image", + "fieldtype": "Attach Image", + "label": "Image" + }, + { + "fieldname": "column_break_fsht", + "fieldtype": "Column Break" + }, + { + "default": "Active", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Active\nCopying Images\nArchived" + }, + { + "collapsible": 1, + "fieldname": "oci_section", + "fieldtype": "Section Break", + "label": "OCI" + }, + { + "fieldname": "column_break_bpar", + "fieldtype": "Column Break" + }, + { + "collapsible": 1, + "fieldname": "aws_section", + "fieldtype": "Section Break", + "label": "AWS" + }, + { + "fieldname": "column_break_qpia", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval:pg.cloud_provider === \"OCI\"", + "fieldname": "oci_user", + "fieldtype": "Data", + "label": "OCI User", + "mandatory_depends_on": "eval:pg.cloud_provider === \"OCI\"", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.cloud_provider === \"OCI\"", + "fieldname": "oci_public_key", + "fieldtype": "Code", + "label": " OCI Public Key", + "mandatory_depends_on": "eval:pg.cloud_provider === \"OCI\"", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.cloud_provider === \"OCI\"", + "fieldname": "oci_tenancy", + "fieldtype": "Data", + "label": "OCI Tenancy", + "mandatory_depends_on": "eval:pg.cloud_provider === \"OCI\"", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.cloud_provider === \"OCI\"", + "fieldname": "oci_private_key", + "fieldtype": "Password", + "label": "OCI Private Key", + "length": 4096, + "mandatory_depends_on": "eval:pg.cloud_provider === \"OCI\"", + "set_only_once": 1 + }, + { + "fieldname": "vpc_id", + "fieldtype": "Data", + "label": "VPC ID", + "read_only": 1 + }, + { + "fieldname": "subnet_id", + "fieldtype": "Data", + "label": "Subnet ID", + "read_only": 1 + }, + { + "fieldname": "proxy_security_group_id", + "fieldtype": "Data", + "label": "Proxy Security Group ID", + "read_only": 1 + }, + { + "fieldname": "security_group_id", + "fieldtype": "Data", + "label": "Security Group ID", + "read_only": 1 + }, + { + "fieldname": "route_table_id", + "fieldtype": "Data", + "label": "Route Table ID", + "read_only": 1 + }, + { + "fieldname": "network_acl_id", + "fieldtype": "Data", + "label": "Network ACL ID", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "beta", + "fieldtype": "Check", + "label": "Beta" + }, + { + "default": "0", + "fieldname": "hybrid", + "fieldtype": "Check", + "label": "Hybrid" + }, + { + "fieldname": "billing_section", + "fieldtype": "Section Break", + "label": "Billing" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + } + ], + "image_field": "image", + "index_web_pages_for_search": 1, + "links": [ + { + "group": "Servers", + "link_pagetype": "Server", + "link_fieldname": "cluster" + }, + { + "group": "Servers", + "link_pagetype": "Database Server", + "link_fieldname": "cluster" + }, + { + "group": "Servers", + "link_pagetype": "Proxy Server", + "link_fieldname": "cluster" + }, + { + "group": "Servers", + "link_pagetype": "Log Server", + "link_fieldname": "cluster" + }, + { + "group": "Servers", + "link_pagetype": "Monitor Server", + "link_fieldname": "cluster" + }, + { + "group": "Billing", + "link_pagetype": "Subscription", + "link_fieldname": "document_name" + } + ], + "modified": "2024-09-14 13:52:58.308402", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Cluster", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/cluster/cluster.py b/jcloud/jcloud/pagetype/cluster/cluster.py new file mode 100644 index 0000000..2a4885a --- /dev/null +++ b/jcloud/jcloud/pagetype/cluster/cluster.py @@ -0,0 +1,848 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import base64 +import hashlib +import ipaddress +import re +import time +import typing +from textwrap import wrap +from typing import ClassVar, Generator + +import boto3 +import jingrow +from jingrow.model.document import Document +from hcloud import APIException, Client +from hcloud.networks.domain import NetworkSubnet +from oci.config import validate_config +from oci.core import VirtualNetworkClient +from oci.core.models import ( + AddNetworkSecurityGroupSecurityRulesDetails, + AddSecurityRuleDetails, + CreateInternetGatewayDetails, + CreateNetworkSecurityGroupDetails, + CreateSubnetDetails, + CreateVcnDetails, + PortRange, + RouteRule, + TcpOptions, + UpdateRouteTableDetails, +) +from oci.identity import IdentityClient + +from jcloud.jcloud.pagetype.virtual_machine_image.virtual_machine_image import ( + VirtualMachineImage, +) +from jcloud.utils import get_current_team, unique + +if typing.TYPE_CHECKING: + from jcloud.jcloud.pagetype.jcloud_settings.jcloud_settings import JcloudSettings + from jcloud.jcloud.pagetype.server_plan.server_plan import ServerPlan + from jcloud.jcloud.pagetype.virtual_machine.virtual_machine import VirtualMachine + + +class Cluster(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + availability_zone: DF.Data | None + aws_access_key_id: DF.Data | None + aws_secret_access_key: DF.Password | None + beta: DF.Check + cidr_block: DF.Data | None + cloud_provider: DF.Literal["AWS EC2", "Generic", "OCI", "Hetzner"] + description: DF.Data | None + hybrid: DF.Check + image: DF.AttachImage | None + monitoring_password: DF.Password | None + network_acl_id: DF.Data | None + oci_private_key: DF.Password | None + oci_public_key: DF.Code | None + oci_tenancy: DF.Data | None + oci_user: DF.Data | None + proxy_security_group_id: DF.Data | None + public: DF.Check + region: DF.Link | None + route_table_id: DF.Data | None + security_group_id: DF.Data | None + ssh_key: DF.Link | None + status: DF.Literal["Active", "Copying Images", "Archived"] + subnet_cidr_block: DF.Data | None + subnet_id: DF.Data | None + team: DF.Link | None + title: DF.Data | None + vpc_id: DF.Data | None + # end: auto-generated types + + dashboard_fields: ClassVar[list[str]] = ["title", "image"] + + base_servers: ClassVar[dict[str, str]] = { + "Proxy Server": "n", + "Database Server": "m", + "Server": "f", # App server is last as it needs both proxy and db server + } + + private_servers: ClassVar[dict] = { + # TODO: Uncomment these when they are implemented + # "Monitor Server": "p", + # "Log Server": "e, + } + + wait_for_aws_creds_seconds = 20 + + @staticmethod + def get_list_query(query, filters=None, **list_args): + if filters and filters.get("group"): + rg = jingrow.get_pg("Release Group", filters.get("group")) + cluster_names = rg.get_clusters() + return jingrow.get_all( + "Cluster", + fields=["name", "title", "image", "beta"], + filters={"name": ("in", cluster_names)}, + ) + return None + + def validate(self): + self.validate_monitoring_password() + self.validate_cidr_block() + if self.cloud_provider == "AWS EC2": + self.validate_aws_credentials() + elif self.cloud_provider == "OCI": + self.set_oci_availability_zone() + elif self.cloud_provider == "Hetzner": + self.validate_hetzner_api_token() + + def validate_hetzner_api_token(self): + settings: "JcloudSettings" = jingrow.get_single("Jcloud Settings") + api_token = settings.get_password("hetzner_api_token") + client = Client(token=api_token) + try: + # Check if we can list servers (read access) + servers = client.servers.get_all() + + if servers is None: + jingrow.throw("API token does not have read access to the Hetzner Cloud.") + + except APIException as e: + # Handle specific API exceptions like unauthorized access + if e.code == "unauthorized": + jingrow.throw("API token is invalid or does not have the correct permissions.") + else: + jingrow.throw(f"An error occurred while validating the API token: {e}") + + def validate_aws_credentials(self): + settings: "JcloudSettings" = jingrow.get_single("Jcloud Settings") + if self.public and not self.aws_access_key_id: + self.aws_access_key_id = settings.aws_access_key_id + self.aws_secret_access_key = settings.get_password("aws_secret_access_key") + elif not self.aws_access_key_id or not self.aws_secret_access_key: + root_client = settings.boto3_iam_client + group = ( # make sure group exists + root_client.get_group(GroupName="fc-vpc-customer").get("Group", {}).get("GroupName") + ) + root_client.create_user( + UserName=jingrow.scrub(self.name), + ) + root_client.add_user_to_group(GroupName=group, UserName=jingrow.scrub(self.name)) + access_key_pair = root_client.create_access_key( + UserName=jingrow.scrub(self.name), + )["AccessKey"] + + self.aws_access_key_id = access_key_pair["AccessKeyId"] + self.aws_secret_access_key = access_key_pair["SecretAccessKey"] + from time import sleep + + sleep(self.wait_for_aws_creds_seconds) # wait for key to be valid + + def after_insert(self): + if self.cloud_provider == "AWS EC2": + self.provision_on_aws_ec2() + elif self.cloud_provider == "OCI": + self.provision_on_oci() + elif self.cloud_provider == "Hetzner": + self.provision_on_hetzner() + + def provision_on_hetzner(self): + try: + # Define the subnet + subnets = [ + NetworkSubnet( + type="cloud", # VPCs in Hetzner are defined as 'cloud' subnets + ip_range=self.subnet_cidr_block, + network_zone=self.availability_zone, + ) + ] + + # Get Hetzner API token from Jcloud Settings + settings: "JcloudSettings" = jingrow.get_single("Jcloud Settings") + api_token = settings.get_password("hetzner_api_token") + + client = Client(token=api_token) + + # Create the network (VPC) on Hetzner + network = client.networks.create( + name=f"Jingrow - {self.name}", + ip_range=self.cidr_block, # The IP range for the entire network (CIDR) + subnets=subnets, + routes=[], + ) + self.vpc_id = network.id + self.save() + + except APIException as e: + jingrow.throw(f"Failed to provision network on Hetzner: {e!s}") + + except Exception as e: + jingrow.throw(f"An unexpected error occurred during provisioning: {e!s}") + + def on_trash(self): + machines = jingrow.get_all( + "Virtual Machine", + {"cluster": self.name, "status": "Terminated"}, + pluck="name", + ) + for machine in machines: + jingrow.delete_pg("Virtual Machine", machine) + + @jingrow.whitelist() + def add_images(self): + if self.images_available == 1: + jingrow.throw("Images are already available", jingrow.ValidationError) + if not set(self.get_other_region_vmis(get_series=True)) - set( + self.get_same_region_vmis(get_series=True) + ): + jingrow.throw( + "Images for required series not available in other regions. Create Images from server docs.", + jingrow.ValidationError, + ) + jingrow.enqueue_pg(self.pagetype, self.name, "_add_images", queue="long", timeout=1200) + + def _add_images(self): + """Copies VMIs required for the cluster""" + self.db_set("status", "Copying Images") + jingrow.db.commit() + for vmi in self.copy_virtual_machine_images(): + vmi.wait_for_availability() + self.reload() + self.db_set("status", "Active") + + @property + def images_available(self) -> float: + return len(self.get_same_region_vmis()) / len(self.server_doctypes) + + def validate_cidr_block(self): + if not self.cidr_block: + blocks = ipaddress.ip_network("10.0.0.0/8").subnets(new_prefix=16) + existing_blocks = ["10.0.0.0/16"] + jingrow.get_all("Cluster", ["cidr_block"], pluck="cidr_block") # noqa: RUF005 + for block in blocks: + cidr_block = str(block) + if cidr_block not in existing_blocks: + self.cidr_block = cidr_block + self.subnet_cidr_block = cidr_block + break + if not self.cidr_block: + jingrow.throw("No CIDR block available", jingrow.ValidationError) + + def validate_monitoring_password(self): + if not self.monitoring_password: + self.monitoring_password = jingrow.generate_hash() + + def provision_on_aws_ec2(self): + client = boto3.client( + "ec2", + region_name=self.region, + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.get_password("aws_secret_access_key"), + ) + + response = client.create_vpc( + AmazonProvidedIpv6CidrBlock=False, + InstanceTenancy="default", + TagSpecifications=[ + { + "ResourceType": "vpc", + "Tags": [{"Key": "Name", "Value": f"Jingrow - {self.name}"}], + }, + ], + CidrBlock=self.cidr_block, + ) + self.vpc_id = response["Vpc"]["VpcId"] + + client.modify_vpc_attribute(VpcId=self.vpc_id, EnableDnsHostnames={"Value": True}) + + response = client.create_subnet( + TagSpecifications=[ + { + "ResourceType": "subnet", + "Tags": [ + { + "Key": "Name", + "Value": f"Jingrow - {self.name} - Public Subnet", + } + ], + }, + ], + AvailabilityZone=self.availability_zone, + VpcId=self.vpc_id, + CidrBlock=self.subnet_cidr_block, + ) + self.subnet_id = response["Subnet"]["SubnetId"] + + response = client.create_internet_gateway( + TagSpecifications=[ + { + "ResourceType": "internet-gateway", + "Tags": [ + { + "Key": "Name", + "Value": f"Jingrow - {self.name} - Internet Gateway", + }, + ], + }, + ], + ) + + self.internet_gateway_id = response["InternetGateway"]["InternetGatewayId"] + + client.attach_internet_gateway(InternetGatewayId=self.internet_gateway_id, VpcId=self.vpc_id) + + response = client.describe_route_tables( + Filters=[{"Name": "vpc-id", "Values": [self.vpc_id]}], + ) + self.route_table_id = response["RouteTables"][0]["RouteTableId"] + + client.create_route( + DestinationCidrBlock="0.0.0.0/0", + GatewayId=self.internet_gateway_id, + RouteTableId=self.route_table_id, + ) + + client.create_tags( + Resources=[self.route_table_id], + Tags=[{"Key": "Name", "Value": f"Jingrow - {self.name} - Route Table"}], + ) + + response = client.describe_network_acls( + Filters=[{"Name": "vpc-id", "Values": [self.vpc_id]}], + ) + self.network_acl_id = response["NetworkAcls"][0]["NetworkAclId"] + client.create_tags( + Resources=[self.network_acl_id], + Tags=[{"Key": "Name", "Value": f"Jingrow - {self.name} - Network ACL"}], + ) + + response = client.create_security_group( + GroupName=f"Jingrow - {self.name} - Security Group", + Description="Allow Everything", + VpcId=self.vpc_id, + TagSpecifications=[ + { + "ResourceType": "security-group", + "Tags": [ + { + "Key": "Name", + "Value": f"Jingrow - {self.name} - Security Group", + }, + ], + }, + ], + ) + self.security_group_id = response["GroupId"] + + client.authorize_security_group_ingress( + GroupId=self.security_group_id, + IpPermissions=[ + { + "FromPort": 80, + "IpProtocol": "tcp", + "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": "HTTP from anywhere"}], + "ToPort": 80, + }, + { + "FromPort": 443, + "IpProtocol": "tcp", + "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": "HTTPS from anywhere"}], + "ToPort": 443, + }, + { + "FromPort": 22, + "IpProtocol": "tcp", + "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": "SSH from anywhere"}], + "ToPort": 22, + }, + { + "FromPort": 3306, + "IpProtocol": "tcp", + "IpRanges": [ + { + "CidrIp": self.subnet_cidr_block, + "Description": "MariaDB from private network", + } + ], + "ToPort": 3306, + }, + { + "FromPort": 22000, + "IpProtocol": "tcp", + "IpRanges": [ + { + "CidrIp": self.subnet_cidr_block, + "Description": "SSH from private network", + } + ], + "ToPort": 22999, + }, + { + "FromPort": -1, + "IpProtocol": "icmp", + "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": "ICMP from anywhere"}], + "ToPort": -1, + }, + ], + ) + self.create_proxy_security_group() + + try: # noqa: SIM105 + # We don't care if the key already exists in this region + response = client.import_key_pair( + KeyName=self.ssh_key, + PublicKeyMaterial=jingrow.db.get_value("SSH Key", self.ssh_key, "public_key"), + TagSpecifications=[ + { + "ResourceType": "key-pair", + "Tags": [{"Key": "Name", "Value": self.ssh_key}], + }, + ], + ) + except Exception: + pass + self.save() + + def create_proxy_security_group(self): + client = boto3.client( + "ec2", + region_name=self.region, + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.get_password("aws_secret_access_key"), + ) + response = client.create_security_group( + GroupName=f"Jingrow - {self.name} - Proxy - Security Group", + Description="Allow Everything on Proxy", + VpcId=self.vpc_id, + TagSpecifications=[ + { + "ResourceType": "security-group", + "Tags": [ + { + "Key": "Name", + "Value": f"Jingrow - {self.name} - Proxy - Security Group", + }, + ], + }, + ], + ) + self.proxy_security_group_id = response["GroupId"] + + client.authorize_security_group_ingress( + GroupId=self.proxy_security_group_id, + IpPermissions=[ + { + "FromPort": 2222, + "IpProtocol": "tcp", + "IpRanges": [ + { + "CidrIp": "0.0.0.0/0", + "Description": "SSH proxy from anywhere", + } + ], + "ToPort": 2222, + }, + { + "FromPort": 3306, + "IpProtocol": "tcp", + "IpRanges": [{"CidrIp": "0.0.0.0/0", "Description": "MariaDB from anywhere"}], + "ToPort": 3306, + }, + ], + ) + + def get_oci_public_key_fingerprint(self): + match = re.match( + r"-*BEGIN PUBLIC KEY-*(.*?)-*END PUBLIC KEY-*", + "".join(self.oci_public_key.splitlines()), + ) + base64_key = match.group(1).encode("utf-8") + binary_key = base64.b64decode(base64_key) + digest = hashlib.md5(binary_key).hexdigest() + return ":".join(wrap(digest, 2)) + + def get_oci_config(self): + # Stupid Password field, replaces newines with spaces + private_key = ( + self.get_password("oci_private_key").replace(" ", "\n").replace("\nPRIVATE\n", " PRIVATE ") + ) + + config = { + "user": self.oci_user, + "fingerprint": self.get_oci_public_key_fingerprint(), + "tenancy": self.oci_tenancy, + "region": self.region, + "key_content": private_key, + } + validate_config(config) + return config + + def set_oci_availability_zone(self): + identity_client = IdentityClient(self.get_oci_config()) + availability_domain = identity_client.list_availability_domains(self.oci_tenancy).data[0].name + self.availability_zone = availability_domain + + def provision_on_oci(self): + vcn_client = VirtualNetworkClient(self.get_oci_config()) + vcn = vcn_client.create_vcn( + CreateVcnDetails( + compartment_id=self.oci_tenancy, + display_name=f"Jingrow - {self.name}", + cidr_block=self.subnet_cidr_block, + ) + ).data + self.vpc_id = vcn.id + self.route_table_id = vcn.default_route_table_id + self.network_acl_id = vcn.default_security_list_id + + time.sleep(1) + # https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml + # 1 ICMP + # 6 TCP + # 17 UDP + security_group = vcn_client.create_network_security_group( + CreateNetworkSecurityGroupDetails( + compartment_id=self.oci_tenancy, + display_name=f"Jingrow - {self.name} - Security Group", + vcn_id=self.vpc_id, + ) + ).data + self.security_group_id = security_group.id + + time.sleep(1) + vcn_client.add_network_security_group_security_rules( + self.security_group_id, + AddNetworkSecurityGroupSecurityRulesDetails( + security_rules=[ + AddSecurityRuleDetails( + description="HTTP from anywhere", + direction="INGRESS", + protocol="6", + source="0.0.0.0/0", + tcp_options=TcpOptions(destination_port_range=PortRange(min=80, max=80)), + ), + AddSecurityRuleDetails( + description="HTTPS from anywhere", + direction="INGRESS", + protocol="6", + source="0.0.0.0/0", + tcp_options=TcpOptions(destination_port_range=PortRange(min=443, max=443)), + ), + AddSecurityRuleDetails( + description="SSH from anywhere", + direction="INGRESS", + protocol="6", + source="0.0.0.0/0", + tcp_options=TcpOptions(destination_port_range=PortRange(min=22, max=22)), + ), + AddSecurityRuleDetails( + description="MariaDB from private network", + direction="INGRESS", + protocol="6", + source=self.subnet_cidr_block, + tcp_options=TcpOptions(destination_port_range=PortRange(min=3306, max=3306)), + ), + AddSecurityRuleDetails( + description="SSH from private network", + direction="INGRESS", + protocol="6", + source=self.subnet_cidr_block, + tcp_options=TcpOptions(destination_port_range=PortRange(min=22000, max=22999)), + ), + AddSecurityRuleDetails( + description="ICMP from anywhere", + direction="INGRESS", + protocol="1", + source="0.0.0.0/0", + # Ignoring IcmpOptions for now. https://docs.oracle.com/en-us/iaas/tools/python/2.117.0/api/core/models/oci.core.models.IcmpOptions.html#oci.core.models.IcmpOptions + ), + AddSecurityRuleDetails( + description="Everything to anywhere", + direction="EGRESS", + protocol="all", + destination="0.0.0.0/0", + ), + ], + ), + ) + + time.sleep(1) + proxy_security_group = vcn_client.create_network_security_group( + CreateNetworkSecurityGroupDetails( + compartment_id=self.oci_tenancy, + display_name=f"Jingrow - {self.name} - Proxy - Security Group", + vcn_id=self.vpc_id, + ) + ).data + self.proxy_security_group_id = proxy_security_group.id + + time.sleep(1) + vcn_client.add_network_security_group_security_rules( # noqa: B018 + self.proxy_security_group_id, + AddNetworkSecurityGroupSecurityRulesDetails( + security_rules=[ + AddSecurityRuleDetails( + description="SSH proxy from anywhere", + direction="INGRESS", + protocol="6", + source="0.0.0.0/0", + tcp_options=TcpOptions(destination_port_range=PortRange(min=2222, max=2222)), + ), + AddSecurityRuleDetails( + description="MariaDB from anywhere", + direction="INGRESS", + protocol="6", + source="0.0.0.0/0", + tcp_options=TcpOptions(destination_port_range=PortRange(min=3306, max=3306)), + ), + AddSecurityRuleDetails( + description="Everything to anywhere", + direction="EGRESS", + protocol="all", + destination="0.0.0.0/0", + ), + ], + ), + ).data + + time.sleep(1) + subnet = vcn_client.create_subnet( + CreateSubnetDetails( + compartment_id=self.oci_tenancy, + display_name=f"Jingrow - {self.name} - Public Subnet", + vcn_id=self.vpc_id, + cidr_block=self.subnet_cidr_block, + route_table_id=self.route_table_id, + security_list_ids=[self.network_acl_id], + ) + ).data + self.subnet_id = subnet.id + + time.sleep(1) + # Don't associate IGW with any route table Otherwise it fails with "Rules in the route table must use private IP" + internet_gateway = vcn_client.create_internet_gateway( + CreateInternetGatewayDetails( + compartment_id=self.oci_tenancy, + display_name=f"Jingrow - {self.name} - Internet Gateway", + is_enabled=True, + vcn_id=self.vpc_id, + ) + ).data + self.internet_gateway_id = internet_gateway.id + + time.sleep(1) + vcn_client.update_route_table( + self.route_table_id, + UpdateRouteTableDetails( + route_rules=[ + RouteRule( + destination="0.0.0.0/0", + network_entity_id=self.internet_gateway_id, + ) + ] + ), + ) + + self.save() + + def get_available_vmi(self, series, platform=None) -> str | None: + """Virtual Machine Image available in region for given series""" + return VirtualMachineImage.get_available_for_series(series, self.region, platform=platform) + + @property + def server_doctypes(self): + server_doctypes = {**self.base_servers} + if not self.public: + server_doctypes = {**server_doctypes, **self.private_servers} + return server_doctypes + + def get_same_region_vmis(self, get_series=False): + return jingrow.get_all( + "Virtual Machine Image", + filters={ + "region": self.region, + "series": ("in", list(self.server_doctypes.values())), + "status": "Available", + }, + pluck="name" if not get_series else "series", + ) + + def get_other_region_vmis(self, get_series=False): + vmis = [] + for series in list(self.server_doctypes.values()): + vmis.extend( + jingrow.get_all( + "Virtual Machine Image", + ["name", "series", "creation"], + filters={ + "region": ("!=", self.region), + "series": series, + "status": "Available", + }, + limit=1, + order_by="creation DESC", + pluck="name" if not get_series else "series", + ) + ) + + return vmis + + def copy_virtual_machine_images(self) -> Generator[VirtualMachineImage, None, None]: + """Creates VMIs required for the cluster""" + copies = [] + for vmi in self.get_other_region_vmis(): + copies.append( + jingrow.get_pg( + "Virtual Machine Image", + vmi, + ).copy_image(self.name) + ) + jingrow.db.commit() + yield from copies + + @jingrow.whitelist() + def create_servers(self): + """Creates servers for the cluster""" + if self.images_available < 1: + jingrow.throw( + "Images are not available. Add them or wait for copy to complete", + jingrow.ValidationError, + ) + if self.status != "Active": + jingrow.throw("Cluster is not active", jingrow.ValidationError) + + for pagetype, _ in self.base_servers.items(): + # TODO: remove Test title # + server, _ = self.create_server( + pagetype, + "Test", + ) + match pagetype: # for populating Server pg's fields; assume the trio is created together + case "Database Server": + self.database_server = server.name + case "Proxy Server": + self.proxy_server = server.name + if self.public: + return + for pagetype, _ in self.private_servers.items(): + self.create_server( + pagetype, + "Test", + create_subscription=False, + ) + + def create_vm( + self, machine_type: str, platform: str, disk_size: int, domain: str, series: str, team: str + ) -> "VirtualMachine": + return jingrow.get_pg( + { + "pagetype": "Virtual Machine", + "cluster": self.name, + "domain": domain, + "series": series, + "disk_size": disk_size, + "machine_type": machine_type, + "virtual_machine_image": self.get_available_vmi(series, platform=platform), + "team": team, + }, + ).insert() + + def get_or_create_basic_plan(self, server_type): + plan = jingrow.db.exists("Server Plan", f"Basic Cluster - {server_type}") + if plan: + return jingrow.get_pg("Server Plan", f"Basic Cluster - {server_type}") + return jingrow.get_pg( + { + "pagetype": "Server Plan", + "name": f"Basic Cluster - {server_type}", + "title": f"Basic Cluster - {server_type}", + "instance_type": "t2.medium", + "price_cny": 0, + "price_usd": 0, + "vcpu": 2, + "memory": 4096, + "disk": 50, + } + ).insert(ignore_permissions=True, ignore_if_duplicate=True) + + def create_server( + self, + pagetype: str, + title: str, + plan: "ServerPlan" = None, + domain: str | None = None, + team: str | None = None, + create_subscription=True, + ): + """Creates a server for the cluster""" + domain = domain or jingrow.db.get_single_value("Jcloud Settings", "domain") + server_series = {**self.base_servers, **self.private_servers} + team = team or get_current_team() + plan = plan or self.get_or_create_basic_plan(pagetype) + vm = self.create_vm( + plan.instance_type, plan.platform, plan.disk, domain, server_series[pagetype], team + ) + server = None + match pagetype: + case "Database Server": + server = vm.create_database_server() + server.ram = plan.memory + server.title = f"{title} - Database" + case "Server": + server = vm.create_server() + server.title = f"{title} - Application" + server.ram = plan.memory + server.database_server = self.database_server + server.proxy_server = self.proxy_server + server.new_worker_allocation = True + case "Proxy Server": + server = vm.create_proxy_server() + server.title = f"{title} - Proxy" + case "Monitor Server": + server = vm.create_monitor_server() + server.title = f"{title} - Monitor" + case "Log Server": + server = vm.create_log_server() + server.title = f"{title} - Log" + + if create_subscription: + server.plan = plan.name + server.save() + server.create_subscription(plan.name) + job = server.run_jcloud_job("Create Server") + + return server, job + + @classmethod + def get_all_for_new_bench(cls, extra_filters=None) -> list[dict[str, str]]: + if extra_filters is None: + extra_filters = {} + cluster_names = unique(jingrow.db.get_all("Server", filters={"status": "Active"}, pluck="cluster")) + filters = {"name": ("in", cluster_names), "public": True} + return jingrow.db.get_all( + "Cluster", + filters={**filters, **extra_filters}, + fields=["name", "title", "image", "beta"], + ) diff --git a/jcloud/jcloud/pagetype/cluster/patches/rename_aws_fields.py b/jcloud/jcloud/pagetype/cluster/patches/rename_aws_fields.py new file mode 100644 index 0000000..a566fff --- /dev/null +++ b/jcloud/jcloud/pagetype/cluster/patches/rename_aws_fields.py @@ -0,0 +1,15 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.utils.rename_field import rename_field + + +def execute(): + jingrow.reload_pagetype("Cluster") + rename_field("Cluster", "aws_vpc_id", "vpc_id") + rename_field("Cluster", "aws_subnet_id", "subnet_id") + rename_field("Cluster", "aws_proxy_security_group_id", "proxy_security_group_id") + rename_field("Cluster", "aws_security_group_id", "security_group_id") + rename_field("Cluster", "aws_route_table_id", "route_table_id") + rename_field("Cluster", "aws_network_acl_id", "network_acl_id") diff --git a/jcloud/jcloud/pagetype/cluster/test_cluster.py b/jcloud/jcloud/pagetype/cluster/test_cluster.py new file mode 100644 index 0000000..8d81baa --- /dev/null +++ b/jcloud/jcloud/pagetype/cluster/test_cluster.py @@ -0,0 +1,260 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# See license.txt + + +import unittest +from unittest.mock import MagicMock, patch + +import boto3 +import jingrow +from moto import mock_aws + +from jcloud.jcloud.pagetype.cluster.cluster import Cluster +from jcloud.jcloud.pagetype.proxy_server.proxy_server import ProxyServer +from jcloud.jcloud.pagetype.root_domain.test_root_domain import create_test_root_domain +from jcloud.jcloud.pagetype.ssh_key.test_ssh_key import create_test_ssh_key +from jcloud.jcloud.pagetype.virtual_machine.virtual_machine import VirtualMachine +from jcloud.jcloud.pagetype.virtual_machine_image.virtual_machine_image import ( + VirtualMachineImage, +) +from jcloud.utils.test import foreground_enqueue_pg + + +@patch("jcloud.jcloud.pagetype.cluster.cluster.boto3.client", new=MagicMock()) +def create_test_cluster( + name: str = "Mumbai", + region: str = "ap-south-1", + public: bool = False, + add_default_servers: bool = False, + hybrid: bool = False, +) -> "Cluster": + """Create test Cluster pg""" + + if jingrow.db.exists("Cluster", name): + return jingrow.get_pg("Cluster", name) + cluster = jingrow.get_pg( + { + "pagetype": "Cluster", + "name": name, + "region": region, + "availability_zone": "ap-south-1a", + "cloud_provider": "AWS EC2", + "ssh_key": create_test_ssh_key().name, + "subnet_cidr_block": "10.3.0.0/16", + "aws_access_key_id": "test", + "aws_secret_access_key": "test", + "public": public, + "hybrid": hybrid, + "add_default_servers": add_default_servers, + } + ).insert(ignore_if_duplicate=True) + cluster.reload() + if add_default_servers: + cluster.create_servers() + return cluster + + +class TestCluster(unittest.TestCase): + @mock_aws + def _setup_fake_vmis(self, series: list[str], cluster: Cluster = None): + from jcloud.jcloud.pagetype.virtual_machine_image.test_virtual_machine_image import ( + create_test_virtual_machine_image, + ) + + cluster = cluster or create_test_cluster(name="Mumbai", region="ap-south-1") + for s in series: + create_test_virtual_machine_image(cluster=cluster, series=s) + + @patch.object( + ProxyServer, "validate", new=MagicMock() + ) # avoid TLSCertificate validation + def _create_cluster( + self, + aws_access_key_id, + aws_secret_access_key, + public=False, + add_default_servers=False, + ) -> Cluster: + """Simulate creation of cluster without AWS credentials""" + cluster = jingrow.get_pg( + { + "pagetype": "Cluster", + "name": "Mumbai 2", + "region": "ap-south-1", + "availability_zone": "ap-south-1a", + "cloud_provider": "AWS EC2", + "ssh_key": create_test_ssh_key().name, + "subnet_cidr_block": "10.3.0.0/16", + "aws_access_key_id": aws_access_key_id, + "aws_secret_access_key": aws_secret_access_key, + "public": public, + } + ) + cluster.insert() + if add_default_servers: + cluster.create_servers() + return cluster + + def tearDown(self) -> None: + jingrow.db.rollback() + + +@patch.object(VirtualMachine, "get_latest_ubuntu_image", new=lambda x: "ami-123") +@patch.object(VirtualMachineImage, "wait_for_availability", new=MagicMock()) +@patch.object(VirtualMachineImage, "after_insert", new=MagicMock()) +@patch( + "jcloud.jcloud.pagetype.cluster.cluster.jingrow.enqueue_pg", new=foreground_enqueue_pg +) +@patch("jcloud.jcloud.pagetype.cluster.cluster.jingrow.db.commit", new=MagicMock()) +class TestPrivateCluster(TestCluster): + @mock_aws + def test_add_images_copies_VMIs_from_other_region(self): + self._setup_fake_vmis(["m", "f"]) # mumbai + vmi_count_before = jingrow.db.count("Virtual Machine Image") + cluster = create_test_cluster(name="Frankfurt", region="eu-central-1") + cluster.add_images() + vmi_count_after = jingrow.db.count("Virtual Machine Image") + self.assertEqual(vmi_count_after, vmi_count_before + 2) + + def test_add_images_throws_err_if_no_vmis_to_copy(self): + cluster = create_test_cluster(name="Frankfurt", region="eu-central-1") + self.assertRaises(Exception, cluster.add_images) + + def test_add_images_throws_err_if_some_vmis_are_unavailable(self): + self._setup_fake_vmis(["m", "f"]) # another cluster with n missing + + cluster = create_test_cluster(name="Frankfurt", region="eu-central-1", public=True) + self._setup_fake_vmis(["m", "f"], cluster=cluster) # n is missing + self.assertRaises(Exception, cluster.add_images) + + @mock_aws + def test_creation_of_cluster_in_same_region_reuses_VMIs(self): + self._setup_fake_vmis(["m", "f"]) # mumbai + vmi_count_before = jingrow.db.count("Virtual Machine Image") + create_test_cluster(name="Mumbai 2", region="ap-south-1") + vmi_count_after = jingrow.db.count("Virtual Machine Image") + self.assertNotEqual(vmi_count_before, 0) + self.assertEqual(vmi_count_after, vmi_count_before) + + @mock_aws + def test_create_private_cluster_without_aws_access_key_and_secret_creates_user_in_predefined_group_and_adds_servers( + self, + ): + self._setup_fake_vmis(["m", "f", "n", "p", "e"]) + + root_domain = create_test_root_domain("local.fc.jingrow.dev") + jingrow.db.set_single_value("Jcloud Settings", "domain", root_domain.name) + jingrow.db.set_single_value("Jcloud Settings", "aws_access_key_id", "test") + jingrow.db.set_single_value("Jcloud Settings", "aws_secret_access_key", "test") + + server_count_before = jingrow.db.count("Server") + database_server_count_before = jingrow.db.count("Database Server") + proxy_server_count_before = jingrow.db.count("Proxy Server") + + boto3.client("iam").create_group(GroupName="fc-vpc-customer") + + Cluster.wait_for_aws_creds_seconds = 0 + self._create_cluster( + aws_access_key_id=None, aws_secret_access_key=None, add_default_servers=True + ) + + server_count_after = jingrow.db.count("Server") + database_server_count_after = jingrow.db.count("Database Server") + proxy_server_count_after = jingrow.db.count("Proxy Server") + + self.assertEqual(server_count_before + 1, server_count_after) + self.assertEqual(database_server_count_before + 1, database_server_count_after) + self.assertEqual(proxy_server_count_before + 1, proxy_server_count_after) + + def test_create_cluster_without_aws_access_key_and_id_throws_err_if_the_group_doesnt_exist( + self, + ): + self.assertRaises( + Exception, + self._create_cluster, + aws_access_key_id=None, + aws_secret_access_key=None, + ) + + +@patch.object(VirtualMachineImage, "wait_for_availability", new=MagicMock()) +@patch("jcloud.jcloud.pagetype.cluster.cluster.jingrow.db.commit", new=MagicMock()) +@patch( + "jcloud.jcloud.pagetype.cluster.cluster.jingrow.enqueue_pg", new=foreground_enqueue_pg +) +@patch.object(VirtualMachineImage, "after_insert", new=MagicMock()) +class TestPublicCluster(TestCluster): + @mock_aws + @patch.object(ProxyServer, "validate", new=MagicMock()) + def test_create_servers_without_vmis_throws_err(self): + root_domain = create_test_root_domain("local.fc.jingrow.dev") + jingrow.db.set_single_value("Jcloud Settings", "domain", root_domain.name) + + server_count_before = jingrow.db.count("Server") + database_server_count_before = jingrow.db.count("Database Server") + proxy_server_count_before = jingrow.db.count("Proxy Server") + cluster = create_test_cluster(name="Mumbai", region="ap-south-1", public=True) + self.assertRaises(Exception, cluster.create_servers) + server_count_after = jingrow.db.count("Server") + database_server_count_after = jingrow.db.count("Database Server") + proxy_server_count_after = jingrow.db.count("Proxy Server") + self.assertEqual(server_count_after, server_count_before) + self.assertEqual(database_server_count_after, database_server_count_before) + self.assertEqual(proxy_server_count_after, proxy_server_count_before) + + @mock_aws + def test_add_images_in_public_cluster_only_adds_3_vms(self): + self._setup_fake_vmis(["m", "f", "n", "p", "e"]) + vm_count_before = jingrow.db.count("Virtual Machine Image") + cluster = create_test_cluster(name="Frankfurt", region="eu-central-1", public=True) + cluster.add_images() + vm_count_after = jingrow.db.count("Virtual Machine Image") + self.assertNotEqual(vm_count_before, 0) + self.assertEqual(vm_count_after, vm_count_before + 3) + + @mock_aws + @patch.object(ProxyServer, "validate", new=MagicMock()) + def test_creation_of_public_cluster_with_servers_creates_3(self): + root_domain = create_test_root_domain("local.fc.jingrow.dev") + jingrow.db.set_single_value("Jcloud Settings", "domain", root_domain.name) + self._setup_fake_vmis(["m", "f", "n"]) + + server_count_before = jingrow.db.count("Server") + database_server_count_before = jingrow.db.count("Database Server") + proxy_server_count_before = jingrow.db.count("Proxy Server") + + create_test_cluster( + name="Mumbai 2", region="ap-south-1", public=True, add_default_servers=True + ) + + server_count_after = jingrow.db.count("Server") + database_server_count_after = jingrow.db.count("Database Server") + proxy_server_count_after = jingrow.db.count("Proxy Server") + self.assertEqual(server_count_after, server_count_before + 1) + self.assertEqual(database_server_count_after, database_server_count_before + 1) + self.assertEqual(proxy_server_count_after, proxy_server_count_before + 1) + + @mock_aws + @patch.object(Cluster, "after_insert", new=MagicMock()) # don't create vms/servers + def test_creation_of_public_cluster_uses_keys_from_jcloud_settings(self): + from jcloud.jcloud.pagetype.jcloud_settings.test_jcloud_settings import ( + create_test_jcloud_settings, + ) + + settings = create_test_jcloud_settings() + client = boto3.client("iam") + client.create_user(UserName="test") + key_pairs = client.create_access_key(UserName="test") + settings.aws_access_key_id = key_pairs["AccessKey"]["AccessKeyId"] + settings.aws_secret_access_key = key_pairs["AccessKey"]["SecretAccessKey"] + settings.save() + cluster = self._create_cluster( + aws_access_key_id=None, aws_secret_access_key=None, public=True + ) + self.assertEqual(cluster.aws_access_key_id, key_pairs["AccessKey"]["AccessKeyId"]) + self.assertEqual( + cluster.get_password("aws_secret_access_key"), + key_pairs["AccessKey"]["SecretAccessKey"], + ) + self.assertEqual(len(client.list_users()["Users"]), 1) diff --git a/jcloud/jcloud/pagetype/cluster_plan/__init__.py b/jcloud/jcloud/pagetype/cluster_plan/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/cluster_plan/cluster_plan.js b/jcloud/jcloud/pagetype/cluster_plan/cluster_plan.js new file mode 100644 index 0000000..72bb66b --- /dev/null +++ b/jcloud/jcloud/pagetype/cluster_plan/cluster_plan.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Cluster Plan", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/cluster_plan/cluster_plan.json b/jcloud/jcloud/pagetype/cluster_plan/cluster_plan.json new file mode 100644 index 0000000..d62e07e --- /dev/null +++ b/jcloud/jcloud/pagetype/cluster_plan/cluster_plan.json @@ -0,0 +1,89 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "prompt", + "creation": "2024-03-14 18:20:49.264170", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "enabled", + "title", + "pricing_section", + "price_cny", + "column_break_wnze", + "price_usd", + "allowed_roles_section", + "roles" + ], + "fields": [ + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title" + }, + { + "fieldname": "pricing_section", + "fieldtype": "Section Break", + "label": "Pricing" + }, + { + "fieldname": "price_cny", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Price (CNY)", + "reqd": 1 + }, + { + "fieldname": "column_break_wnze", + "fieldtype": "Column Break" + }, + { + "fieldname": "price_usd", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Price (USD)", + "reqd": 1 + }, + { + "fieldname": "allowed_roles_section", + "fieldtype": "Section Break", + "label": "Allowed Roles" + }, + { + "fieldname": "roles", + "fieldtype": "Table", + "options": "Has Role" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-03-14 18:42:28.291997", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Cluster Plan", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/cluster_plan/cluster_plan.py b/jcloud/jcloud/pagetype/cluster_plan/cluster_plan.py new file mode 100644 index 0000000..869f6d3 --- /dev/null +++ b/jcloud/jcloud/pagetype/cluster_plan/cluster_plan.py @@ -0,0 +1,25 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jcloud.jcloud.pagetype.site_plan.plan import Plan + + +class ClusterPlan(Plan): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.core.pagetype.has_role.has_role import HasRole + from jingrow.types import DF + + enabled: DF.Check + price_cny: DF.Currency + price_usd: DF.Currency + roles: DF.Table[HasRole] + title: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/cluster_plan/test_cluster_plan.py b/jcloud/jcloud/pagetype/cluster_plan/test_cluster_plan.py new file mode 100644 index 0000000..7002cbd --- /dev/null +++ b/jcloud/jcloud/pagetype/cluster_plan/test_cluster_plan.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestClusterPlan(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/code_server/__init__.py b/jcloud/jcloud/pagetype/code_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/code_server/code_server.js b/jcloud/jcloud/pagetype/code_server/code_server.js new file mode 100644 index 0000000..8a79ce8 --- /dev/null +++ b/jcloud/jcloud/pagetype/code_server/code_server.js @@ -0,0 +1,32 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Code Server', { + setup(frm) { + frm.set_query('bench', () => ({ + filters: { + is_code_server_enabled: true, + }, + })); + frm.set_query('group', () => ({ + filters: { + is_code_server_enabled: true, + }, + })); + }, + refresh(frm) { + [ + [__('Start'), 'start'], + [__('Stop'), 'stop'], + [__('Archive'), 'archive'], + ].forEach(([label, action, condition = true]) => { + if (condition) { + frm.add_custom_button( + label, + () => frm.call(action).then((r) => frm.refresh()), + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/code_server/code_server.json b/jcloud/jcloud/pagetype/code_server/code_server.json new file mode 100644 index 0000000..409d13a --- /dev/null +++ b/jcloud/jcloud/pagetype/code_server/code_server.json @@ -0,0 +1,126 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-07-20 20:41:32.167249", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "subdomain", + "team", + "bench", + "group", + "status", + "proxy_server", + "server", + "cluster", + "domain", + "password" + ], + "fields": [ + { + "fieldname": "bench", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Bench", + "options": "Bench", + "reqd": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nStopped\nBroken\nArchived" + }, + { + "fieldname": "proxy_server", + "fieldtype": "Link", + "label": "Proxy Server", + "options": "Proxy Server", + "reqd": 1 + }, + { + "fetch_from": "bench.server", + "fieldname": "server", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Server", + "options": "Server" + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Domain", + "options": "Root Domain", + "reqd": 1 + }, + { + "fieldname": "subdomain", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Subdomain", + "reqd": 1 + }, + { + "fieldname": "password", + "fieldtype": "Password", + "label": "Password" + }, + { + "fetch_from": "bench.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "fetch_from": "bench.group", + "fieldname": "group", + "fieldtype": "Link", + "label": "Release Group", + "options": "Release Group" + }, + { + "fetch_from": "server.cluster", + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster" + } + ], + "index_web_pages_for_search": 1, + "links": [ + { + "link_pagetype": "Agent Job", + "link_fieldname": "code_server" + } + ], + "modified": "2023-07-28 15:51:07.083218", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Code Server", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/code_server/code_server.py b/jcloud/jcloud/pagetype/code_server/code_server.py new file mode 100644 index 0000000..560e55a --- /dev/null +++ b/jcloud/jcloud/pagetype/code_server/code_server.py @@ -0,0 +1,181 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document +from jingrow.model.naming import append_number_if_name_exists + +from jcloud.agent import Agent +from jcloud.utils import log_error +from jcloud.utils.dns import _change_dns_record, create_dns_record + + +class CodeServer(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + bench: DF.Link + cluster: DF.Link | None + domain: DF.Link + group: DF.Link | None + password: DF.Password | None + proxy_server: DF.Link + server: DF.Link | None + status: DF.Literal["Pending", "Running", "Stopped", "Broken", "Archived"] + subdomain: DF.Data + team: DF.Link | None + # end: auto-generated types + + def autoname(self): + self.name = self.subdomain + "." + self.domain + + def validate(self): + if not jingrow.get_value("Bench", self.bench, "is_code_server_enabled"): + jingrow.throw(f"Code Server not enabled for the selected Bench {self.bench}") + + if self.has_value_changed("subdomain"): + if jingrow.db.exists("Code Server", self.name): + jingrow.throw( + f"Code Server {self.name} already exists please choose a different name" + ) + if jingrow.db.exists( + "Code Server", {"bench": self.bench, "status": ("!=", "Archived")} + ): + jingrow.throw( + "Code Server already exists for selected bench choose a different bench" + ) + if not self.proxy_server and self.has_value_changed("server"): + self.proxy_server = jingrow.db.get_value("Server", self.server, "proxy_server") + + def after_insert(self): + self.setup() + + @jingrow.whitelist() + def setup(self): + try: + create_dns_record(pg=self, record_name=self.name) + agent = Agent(self.proxy_server, server_type="Proxy Server") + agent.new_upstream_file(server=self.server, code_server=self.name) + + self.password = jingrow.generate_hash(length=40) + agent = Agent(self.server, server_type="Server") + agent.setup_code_server(self.bench, self.name, self.password) + self.save(ignore_permissions=True) + except Exception as e: + log_error(title="Setup Code Server Failed", data=e) + + @jingrow.whitelist() + def stop(self): + try: + self.status = "Pending" + agent = Agent(self.server, server_type="Server") + agent.stop_code_server(self.bench, self.name) + except Exception as e: + log_error(title="Stop Code Server Failed", data=e) + + @jingrow.whitelist() + def start(self): + try: + self.status = "Pending" + agent = Agent(self.server, server_type="Server") + self.password = jingrow.generate_hash(length=40) + agent.start_code_server(self.bench, self.name, self.password) + self.save(ignore_permissions=True) + except Exception as e: + log_error(title="Start Code Server Failed", data=e) + + @jingrow.whitelist() + def archive(self): + try: + self.status = "Pending" + _change_dns_record( + method="DELETE", + domain=jingrow.get_pg("Root Domain", self.domain), + proxy_server=self.proxy_server, + record_name=self.name, + ) + agent = Agent(self.proxy_server, server_type="Proxy Server") + agent.remove_upstream_file(server=self.server, code_server=self.name) + + agent = Agent(self.server, server_type="Server") + agent.archive_code_server(self.bench, self.name) + self.save(ignore_permissions=True) + except Exception as e: + log_error(title="Archive Code Server Failed", data=e) + + +def process_new_code_server_job_update(job): + jingrow.db.get_value("Code Server", job.code_server, "status", for_update=True) + + other_job_type = { + "Add Code Server to Upstream": "Setup Code Server", + "Setup Code Server": "Add Code Server to Upstream", + }[job.job_type] + + first = job.status + second = jingrow.get_value( + "Agent Job", + {"job_type": other_job_type, "code_server": job.code_server}, + "status", + for_update=True, + ) + + if "Success" == first == second: + updated_status = "Running" + elif "Failure" in (first, second): + updated_status = "Broken" + else: + updated_status = "Pending" + + jingrow.db.set_value("Code Server", job.code_server, "status", updated_status) + + +def process_start_code_server_job_update(job): + if job.status == "Success": + jingrow.db.set_value("Code Server", job.code_server, "status", "Running") + + +def process_stop_code_server_job_update(job): + if job.status == "Success": + jingrow.db.set_value("Code Server", job.code_server, "status", "Stopped") + + +def process_archive_code_server_job_update(job): + jingrow.db.get_value("Code Server", job.code_server, "status", for_update=True) + + other_job_type = { + "Remove Code Server from Upstream": "Archive Code Server", + "Archive Code Server": "Remove Code Server from Upstream", + }[job.job_type] + + first = job.status + second = jingrow.get_value( + "Agent Job", + {"job_type": other_job_type, "code_server": job.code_server}, + "status", + for_update=True, + ) + + if "Success" == first == second: + updated_status = "Archived" + elif "Failure" in (first, second): + updated_status = "Broken" + else: + updated_status = "Pending" + + jingrow.db.set_value("Code Server", job.code_server, "status", updated_status) + if updated_status == "Archived": + release_name(job.code_server) + + +def release_name(name): + if ".archived" in name: + return + new_name = f"{name}.archived" + new_name = append_number_if_name_exists("Code Server", new_name, separator=".") + jingrow.rename_pg("Code Server", name, new_name) diff --git a/jcloud/jcloud/pagetype/code_server/test_code_server.py b/jcloud/jcloud/pagetype/code_server/test_code_server.py new file mode 100644 index 0000000..c7fd2c4 --- /dev/null +++ b/jcloud/jcloud/pagetype/code_server/test_code_server.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestCodeServer(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/common_site_config/__init__.py b/jcloud/jcloud/pagetype/common_site_config/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/common_site_config/common_site_config.json b/jcloud/jcloud/pagetype/common_site_config/common_site_config.json new file mode 100644 index 0000000..f07b896 --- /dev/null +++ b/jcloud/jcloud/pagetype/common_site_config/common_site_config.json @@ -0,0 +1,58 @@ +{ + "actions": [], + "creation": "2023-07-02 11:23:36.999759", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "key", + "value", + "type", + "internal" + ], + "fields": [ + { + "fieldname": "key", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Key", + "reqd": 1 + }, + { + "fieldname": "value", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Value", + "reqd": 1 + }, + { + "fieldname": "type", + "fieldtype": "Select", + "label": "Type", + "options": "\nString\nPassword\nNumber\nBoolean\nJSON" + }, + { + "allow_in_quick_entry": 1, + "default": "0", + "fieldname": "internal", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Internal Usage", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-02-23 10:04:24.528062", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Common Site Config", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/common_site_config/common_site_config.py b/jcloud/jcloud/pagetype/common_site_config/common_site_config.py new file mode 100644 index 0000000..d2b4d38 --- /dev/null +++ b/jcloud/jcloud/pagetype/common_site_config/common_site_config.py @@ -0,0 +1,32 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow + +from jcloud.jcloud.pagetype.site_config.site_config import Config + + +class CommonSiteConfig(Config): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + internal: DF.Check + key: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + type: DF.Literal["", "String", "Password", "Number", "Boolean", "JSON"] + value: DF.Code + # end: auto-generated types + + @staticmethod + def get_list_query(query, filters=None, **list_args): + Config = jingrow.qb.PageType("Common Site Config") + query = query.where(Config.internal == 0).orderby(Config.key, order=jingrow.qb.asc) + configs = query.run(as_dict=True) + return CommonSiteConfig.format_config_for_list(configs) diff --git a/jcloud/jcloud/pagetype/communication_email/__init__.py b/jcloud/jcloud/pagetype/communication_email/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/communication_email/communication_email.json b/jcloud/jcloud/pagetype/communication_email/communication_email.json new file mode 100644 index 0000000..7dcfcca --- /dev/null +++ b/jcloud/jcloud/pagetype/communication_email/communication_email.json @@ -0,0 +1,38 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2021-12-28 19:21:57.514933", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "type", + "value" + ], + "fields": [ + { + "fieldname": "type", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Type" + }, + { + "fieldname": "value", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Value" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-01-03 13:46:29.756678", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Communication Email", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/communication_email/communication_email.py b/jcloud/jcloud/pagetype/communication_email/communication_email.py new file mode 100644 index 0000000..50bd7f4 --- /dev/null +++ b/jcloud/jcloud/pagetype/communication_email/communication_email.py @@ -0,0 +1,24 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class CommunicationEmail(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + type: DF.Data | None + value: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/cookie_preference_log/__init__.py b/jcloud/jcloud/pagetype/cookie_preference_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/cookie_preference_log/cookie_preference_log.js b/jcloud/jcloud/pagetype/cookie_preference_log/cookie_preference_log.js new file mode 100644 index 0000000..9d3bc80 --- /dev/null +++ b/jcloud/jcloud/pagetype/cookie_preference_log/cookie_preference_log.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Cookie Preference Log', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/cookie_preference_log/cookie_preference_log.json b/jcloud/jcloud/pagetype/cookie_preference_log/cookie_preference_log.json new file mode 100644 index 0000000..9a3967c --- /dev/null +++ b/jcloud/jcloud/pagetype/cookie_preference_log/cookie_preference_log.json @@ -0,0 +1,69 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2021-11-24 14:21:26.131280", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "ip_address", + "agreed_to_analytics_cookies", + "agreed_to_functionality_cookies", + "agreed_to_performance_cookies" + ], + "fields": [ + { + "fieldname": "ip_address", + "fieldtype": "Data", + "in_list_view": 1, + "label": "IP Address", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "agreed_to_analytics_cookies", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Agreed To Analytics Cookies", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "agreed_to_functionality_cookies", + "fieldtype": "Check", + "label": "Agreed To Functionality Cookies", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "agreed_to_performance_cookies", + "fieldtype": "Check", + "label": "Agreed To Performance Cookies", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2021-11-24 14:28:51.910191", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Cookie Preference Log", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "ip_address" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/cookie_preference_log/cookie_preference_log.py b/jcloud/jcloud/pagetype/cookie_preference_log/cookie_preference_log.py new file mode 100644 index 0000000..bafd80e --- /dev/null +++ b/jcloud/jcloud/pagetype/cookie_preference_log/cookie_preference_log.py @@ -0,0 +1,23 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class CookiePreferenceLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + agreed_to_analytics_cookies: DF.Check + agreed_to_functionality_cookies: DF.Check + agreed_to_performance_cookies: DF.Check + ip_address: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/cookie_preference_log/test_cookie_preference_log.py b/jcloud/jcloud/pagetype/cookie_preference_log/test_cookie_preference_log.py new file mode 100644 index 0000000..cdd5115 --- /dev/null +++ b/jcloud/jcloud/pagetype/cookie_preference_log/test_cookie_preference_log.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestCookiePreferenceLog(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/currency_exchange/__init__.py b/jcloud/jcloud/pagetype/currency_exchange/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/currency_exchange/currency_exchange.js b/jcloud/jcloud/pagetype/currency_exchange/currency_exchange.js new file mode 100644 index 0000000..1525b2d --- /dev/null +++ b/jcloud/jcloud/pagetype/currency_exchange/currency_exchange.js @@ -0,0 +1,8 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Currency Exchange", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/currency_exchange/currency_exchange.json b/jcloud/jcloud/pagetype/currency_exchange/currency_exchange.json new file mode 100644 index 0000000..22fe113 --- /dev/null +++ b/jcloud/jcloud/pagetype/currency_exchange/currency_exchange.json @@ -0,0 +1,76 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "format:{date}-{from_currency}-{to_currency}", + "creation": "2025-01-18 10:54:45.521101", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "date", + "exchange_rate", + "column_break_iqor", + "from_currency", + "to_currency" + ], + "fields": [ + { + "default": "Today", + "fieldname": "date", + "fieldtype": "Date", + "label": "Date", + "reqd": 1 + }, + { + "fieldname": "exchange_rate", + "fieldtype": "Float", + "in_list_view": 1, + "label": "Exchange Rate", + "reqd": 1 + }, + { + "fieldname": "column_break_iqor", + "fieldtype": "Column Break" + }, + { + "fieldname": "from_currency", + "fieldtype": "Link", + "in_list_view": 1, + "label": "From Currency", + "options": "Currency", + "reqd": 1 + }, + { + "fieldname": "to_currency", + "fieldtype": "Link", + "in_list_view": 1, + "label": "To Currency", + "options": "Currency", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-01-18 10:54:45.521101", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Currency Exchange", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/currency_exchange/currency_exchange.py b/jcloud/jcloud/pagetype/currency_exchange/currency_exchange.py new file mode 100644 index 0000000..47777c8 --- /dev/null +++ b/jcloud/jcloud/pagetype/currency_exchange/currency_exchange.py @@ -0,0 +1,23 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class CurrencyExchange(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + date: DF.Date + exchange_rate: DF.Float + from_currency: DF.Link + to_currency: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/currency_exchange/test_currency_exchange.py b/jcloud/jcloud/pagetype/currency_exchange/test_currency_exchange.py new file mode 100644 index 0000000..b0920e4 --- /dev/null +++ b/jcloud/jcloud/pagetype/currency_exchange/test_currency_exchange.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestCurrencyExchange(UnitTestCase): + """ + Unit tests for CurrencyExchange. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestCurrencyExchange(IntegrationTestCase): + """ + Integration tests for CurrencyExchange. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/dashboard_banner/__init__.py b/jcloud/jcloud/pagetype/dashboard_banner/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/dashboard_banner/dashboard_banner.js b/jcloud/jcloud/pagetype/dashboard_banner/dashboard_banner.js new file mode 100644 index 0000000..14b5b36 --- /dev/null +++ b/jcloud/jcloud/pagetype/dashboard_banner/dashboard_banner.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Dashboard Banner", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/dashboard_banner/dashboard_banner.json b/jcloud/jcloud/pagetype/dashboard_banner/dashboard_banner.json new file mode 100644 index 0000000..b4f127f --- /dev/null +++ b/jcloud/jcloud/pagetype/dashboard_banner/dashboard_banner.json @@ -0,0 +1,60 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-06-24 09:20:47.114289", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "enabled", + "title", + "message", + "type" + ], + "fields": [ + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "message", + "fieldtype": "Data", + "label": "Message" + }, + { + "fieldname": "type", + "fieldtype": "Select", + "label": "Type", + "options": "Info\nSuccess\nError\nWarning" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title" + } + ], + "index_web_pages_for_search": 1, + "issingle": 1, + "links": [], + "modified": "2024-06-24 09:36:26.970846", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Dashboard Banner", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "print": 1, + "read": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/dashboard_banner/dashboard_banner.py b/jcloud/jcloud/pagetype/dashboard_banner/dashboard_banner.py new file mode 100644 index 0000000..d98d1ee --- /dev/null +++ b/jcloud/jcloud/pagetype/dashboard_banner/dashboard_banner.py @@ -0,0 +1,23 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class DashboardBanner(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + enabled: DF.Check + message: DF.Data | None + title: DF.Data | None + type: DF.Literal["Info", "Success", "Error", "Warning"] + # end: auto-generated types + + dashboard_fields = ["enabled", "message", "title", "type"] diff --git a/jcloud/jcloud/pagetype/dashboard_banner/test_dashboard_banner.py b/jcloud/jcloud/pagetype/dashboard_banner/test_dashboard_banner.py new file mode 100644 index 0000000..cf95a0f --- /dev/null +++ b/jcloud/jcloud/pagetype/dashboard_banner/test_dashboard_banner.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestDashboardBanner(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/database_server/__init__.py b/jcloud/jcloud/pagetype/database_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/database_server/database_server.js b/jcloud/jcloud/pagetype/database_server/database_server.js new file mode 100644 index 0000000..3838661 --- /dev/null +++ b/jcloud/jcloud/pagetype/database_server/database_server.js @@ -0,0 +1,288 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Database Server', { + refresh: function (frm) { + frm.add_web_link( + `/dashboard/servers/${frm.pg.name}`, + __('Visit Dashboard'), + ); + + [ + [__('Ping Agent'), 'ping_agent', false, !frm.pg.is_server_setup], + [__('Ping Ansible'), 'ping_ansible', true, frm.pg.is_server_prepared], + [ + __('Ping Ansible Unprepared'), + 'ping_ansible_unprepared', + true, + !frm.pg.is_server_prepared, + ], + [__('Update Agent'), 'update_agent', true, frm.pg.is_server_setup], + [ + __('Update Agent Ansible'), + 'update_agent_ansible', + true, + frm.pg.is_server_setup, + ], + [ + __('Fetch Keys'), + 'fetch_keys', + true, + frm.pg.is_server_setup && + (!frm.pg.jingrow_public_key || !frm.pg.root_public_key), + ], + [ + __('Prepare Server'), + 'prepare_server', + true, + !frm.pg.is_server_prepared, + ], + [__('Setup Server'), 'setup_server', true, !frm.pg.is_server_setup], + [ + __('Setup Rename'), + 'rename_server', + true, + frm.pg.is_server_setup && + frm.pg.is_server_prepared && + !frm.pg.is_server_renamed, + ], + [ + __('Convert From Jingrow Server'), + 'convert_from_jingrow_server', + true, + frm.pg.is_server_setup, + ], + [ + __('Setup Replication'), + 'setup_replication', + true, + frm.pg.is_server_setup && + !frm.pg.is_primary && + !frm.pg.is_replication_setup, + ], + [ + __('Trigger Failover'), + 'trigger_failover', + true, + frm.pg.is_server_setup && + !frm.pg.is_primary && + frm.pg.is_replication_setup, + ], + [ + __('Reset Root Password'), + 'reset_root_password', + true, + frm.pg.is_server_setup, + ], + [ + __('Enable Performance Schema'), + 'enable_performance_schema', + true, + frm.pg.is_server_setup && !frm.pg.is_performance_schema_enabled, + ], + [ + __('Disable Performance Schema'), + 'disable_performance_schema', + true, + frm.pg.is_server_setup && frm.pg.is_performance_schema_enabled, + ], + [__('Restart MariaDB'), 'restart_mariadb', true, frm.pg.is_server_setup], + [__('Stop MariaDB'), 'stop_mariadb', true, frm.pg.is_server_setup], + [ + __('Run Upgrade MariaDB Job'), + 'run_upgrade_mariadb_job', + true, + frm.pg.is_server_setup, + ], + [__('Upgrade MariaDB'), 'upgrade_mariadb', true, frm.pg.is_server_setup], + [__('Update MariaDB'), 'update_mariadb', true, frm.pg.is_server_setup], + [ + __('Upgrade MariaDB Patched'), + 'upgrade_mariadb_patched', + true, + frm.pg.is_server_setup, + ], + [ + __('Reconfigure MariaDB Exporter'), + 'reconfigure_mariadb_exporter', + true, + frm.pg.is_server_setup, + ], + [ + __('Setup Deadlock Logger'), + 'setup_deadlock_logger', + true, + frm.pg.is_server_setup, + ], + [ + __('Setup Percona Stalk'), + 'setup_pt_stalk', + true, + frm.pg.is_server_setup, + ], + [ + __('Fetch MariaDB Stalks'), + 'fetch_stalks', + true, + frm.pg.is_server_setup && frm.pg.is_stalk_setup, + ], + [ + __('Fetch Keys'), + 'fetch_keys', + false, + frm.pg.is_server_setup && + (!frm.pg.jingrow_public_key || !frm.pg.root_public_key), + ], + [__('Update TLS Certificate'), 'update_tls_certificate', true], + [ + __('Adjust Memory Config'), + 'adjust_memory_config', + true, + frm.pg.status === 'Active', + ], + [__('Create Image'), 'create_image', true, frm.pg.status == 'Active'], + [__('Archive'), 'archive', true, frm.pg.status !== 'Archived'], + [ + __('Reboot with serial console'), + 'reboot_with_serial_console', + true, + frm.pg.virtual_machine, + ], + [ + __('Setup Essentials'), + 'setup_essentials', + true, + frm.pg.is_self_hosted, + ], + [ + __('Mount Volumes'), + 'mount_volumes', + true, + frm.pg.virtual_machine && frm.pg.mounts, + ], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + if (frm.pg.is_server_setup) { + frm.add_custom_button( + __('Increase Swap'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Increase Swap'), + fields: [ + { + fieldtype: 'Int', + label: __('Swap Size'), + description: __('Size in GB'), + fieldname: 'swap_size', + default: 4, + }, + ], + }); + + dialog.set_primary_action(__('Increase Swap'), (args) => { + frm.call('increase_swap', args).then(() => { + dialog.hide(); + frm.refresh(); + }); + }); + dialog.show(); + }, + __('Actions'), + ); + frm.add_custom_button( + __('Perform Physical Backup'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Perform Physical Backup'), + fields: [ + { + fieldtype: 'Data', + label: __('Backup Path'), + description: __('Absolute path to store the backup'), + default: '/tmp/replica', + fieldname: 'path', + reqd: 1, + }, + ], + }); + + dialog.set_primary_action(__('Backup'), (args) => { + frm.call('perform_physical_backup', args).then(() => { + dialog.hide(); + frm.refresh(); + }); + }); + dialog.show(); + }, + __('Actions'), + ); + frm.add_custom_button( + __('Update Memory Allocator'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Update Memory Allocator'), + fields: [ + { + fieldtype: 'Select', + label: __('Memory Allocator'), + options: ['System', 'jemalloc', 'TCMalloc'] + .filter((option) => option !== frm.pg.memory_allocator) + .join('\n'), + fieldname: 'memory_allocator', + reqd: 1, + }, + ], + }); + + dialog.set_primary_action(__('Update'), (args) => { + frm.call({ + method: 'update_memory_allocator', + pg: frm.pg, + args: args, + freeze: true, + callback: () => { + dialog.hide(); + frm.refresh(); + }, + }); + }); + dialog.show(); + }, + __('Dangerous Actions'), + ); + } + }, + + hostname: function (frm) { + jcloud.set_hostname_abbreviation(frm); + }, +}); diff --git a/jcloud/jcloud/pagetype/database_server/database_server.json b/jcloud/jcloud/pagetype/database_server/database_server.json new file mode 100644 index 0000000..d3fd23a --- /dev/null +++ b/jcloud/jcloud/pagetype/database_server/database_server.json @@ -0,0 +1,588 @@ +{ + "actions": [], + "creation": "2020-10-02 17:33:00.453792", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "hostname", + "hostname_abbreviation", + "domain", + "self_hosted_server_domain", + "title", + "column_break_4", + "cluster", + "provider", + "virtual_machine", + "public", + "is_self_hosted", + "is_server_setup", + "is_server_prepared", + "is_server_renamed", + "enable_physical_backup", + "billing_section", + "team", + "column_break_11", + "plan", + "auto_add_storage_min", + "auto_add_storage_max", + "networking_section", + "ip", + "column_break_10", + "private_ip", + "private_mac_address", + "private_vlan_id", + "agent_section", + "agent_password", + "mariadb_section", + "self_hosted_mariadb_server", + "mariadb_root_password", + "server_id", + "is_primary", + "column_break_12", + "primary", + "is_replication_setup", + "ssh_section", + "jingrow_user_password", + "jingrow_public_key", + "column_break_18", + "ssh_user", + "ssh_port", + "root_public_key", + "section_break_cees", + "ram", + "column_break_apox", + "tags_section", + "tags", + "mounts_section", + "has_data_volume", + "mounts", + "mariadb_settings_tab", + "memory_limits_section", + "memory_high", + "memory_max", + "memory_swap_max", + "column_break_eiyu", + "memory_allocator", + "memory_allocator_version", + "section_break_ladc", + "is_performance_schema_enabled", + "mariadb_system_variables", + "mariadb_stalk_section", + "is_stalk_setup", + "stalk_gdb_collector", + "stalk_strace_collector", + "column_break_qrkk", + "stalk_function", + "stalk_variable", + "stalk_threshold", + "column_break_objb", + "stalk_interval", + "stalk_cycles", + "stalk_sleep" + ], + "fields": [ + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nInstalling\nActive\nBroken\nArchived", + "read_only": 1, + "reqd": 1 + }, + { + "default": "0", + "fieldname": "is_server_setup", + "fieldtype": "Check", + "label": "Server Setup", + "read_only": 1 + }, + { + "fetch_from": "virtual_machine.public_ip_address", + "fieldname": "ip", + "fieldtype": "Data", + "in_list_view": 1, + "label": "IP", + "set_only_once": 1 + }, + { + "fetch_from": "virtual_machine.private_ip_address", + "fieldname": "private_ip", + "fieldtype": "Data", + "label": "Private IP", + "set_only_once": 1 + }, + { + "fieldname": "mariadb_section", + "fieldtype": "Section Break", + "label": "MariaDB" + }, + { + "fieldname": "mariadb_root_password", + "fieldtype": "Password", + "label": "MariaDB Root Password", + "read_only": 1 + }, + { + "fieldname": "agent_section", + "fieldtype": "Section Break", + "label": "Agent" + }, + { + "fieldname": "agent_password", + "fieldtype": "Password", + "label": "Agent Password", + "set_only_once": 1 + }, + { + "fieldname": "server_id", + "fieldtype": "Int", + "label": "Server ID", + "set_only_once": 1 + }, + { + "depends_on": "eval: !pg.is_primary", + "fieldname": "primary", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Primary", + "mandatory_depends_on": "eval: !pg.is_primary", + "options": "Database Server" + }, + { + "default": "1", + "fieldname": "is_primary", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Is Primary" + }, + { + "fieldname": "column_break_12", + "fieldtype": "Column Break" + }, + { + "fieldname": "ssh_section", + "fieldtype": "Section Break", + "label": "SSH" + }, + { + "fieldname": "root_public_key", + "fieldtype": "Code", + "label": "Root Public Key", + "read_only": 1 + }, + { + "fieldname": "jingrow_public_key", + "fieldtype": "Code", + "label": "Jingrow Public Key", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval: !pg.is_primary", + "fieldname": "is_replication_setup", + "fieldtype": "Check", + "label": "Replication Setup", + "read_only": 1 + }, + { + "fieldname": "column_break_18", + "fieldtype": "Column Break" + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Cluster", + "options": "Cluster", + "set_only_once": 1 + }, + { + "fieldname": "hostname", + "fieldtype": "Data", + "label": "Hostname", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "label": "Domain", + "options": "Root Domain", + "read_only": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "default": "Generic", + "fieldname": "provider", + "fieldtype": "Select", + "label": "Provider", + "options": "Generic\nScaleway\nAWS EC2\nOCI", + "set_only_once": 1 + }, + { + "fieldname": "jingrow_user_password", + "fieldtype": "Password", + "label": "Jingrow User Password", + "set_only_once": 1 + }, + { + "fieldname": "networking_section", + "fieldtype": "Section Break", + "label": "Networking" + }, + { + "fieldname": "column_break_10", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_mac_address", + "fieldtype": "Data", + "label": "Private Mac Address", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_vlan_id", + "fieldtype": "Data", + "label": "Private VLAN ID", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.provider === \"AWS EC2\"", + "fieldname": "virtual_machine", + "fieldtype": "Link", + "label": "Virtual Machine", + "mandatory_depends_on": "eval:pg.provider === \"AWS EC2\"", + "options": "Virtual Machine" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "fieldname": "billing_section", + "fieldtype": "Section Break", + "label": "Billing" + }, + { + "fieldname": "column_break_11", + "fieldtype": "Column Break" + }, + { + "fieldname": "plan", + "fieldtype": "Link", + "label": "Plan", + "options": "Server Plan" + }, + { + "default": "0", + "fieldname": "is_server_prepared", + "fieldtype": "Check", + "label": "Is Server Prepared", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "is_server_renamed", + "fieldtype": "Check", + "label": "Is Server Renamed", + "read_only": 1 + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title" + }, + { + "fieldname": "ssh_user", + "fieldtype": "Data", + "label": "SSH User" + }, + { + "default": "0", + "fieldname": "is_self_hosted", + "fieldtype": "Check", + "label": "Is Self Hosted" + }, + { + "depends_on": "eval:pg.is_self_hosted", + "fieldname": "self_hosted_server_domain", + "fieldtype": "Data", + "label": "Self Hosted Server Domain" + }, + { + "depends_on": "eval:pg.is_self_hosted", + "fieldname": "self_hosted_mariadb_server", + "fieldtype": "Data", + "label": "Self Hosted MariaDB Server IP" + }, + { + "default": "22", + "fieldname": "ssh_port", + "fieldtype": "Int", + "label": "SSH Port" + }, + { + "fieldname": "mariadb_settings_tab", + "fieldtype": "Tab Break", + "label": "MariaDB Settings" + }, + { + "fieldname": "mariadb_system_variables", + "fieldtype": "Table", + "label": "MariaDB System Variables", + "options": "Database Server MariaDB Variable" + }, + { + "fieldname": "memory_limits_section", + "fieldtype": "Section Break", + "label": "Memory" + }, + { + "fieldname": "memory_high", + "fieldtype": "Float", + "label": "Memory High (GB)", + "non_negative": 1 + }, + { + "fieldname": "column_break_eiyu", + "fieldtype": "Column Break" + }, + { + "fieldname": "memory_max", + "fieldtype": "Float", + "label": "Memory Max (GB)", + "non_negative": 1 + }, + { + "fieldname": "section_break_ladc", + "fieldtype": "Section Break" + }, + { + "default": "0", + "fieldname": "is_performance_schema_enabled", + "fieldtype": "Check", + "label": "Is Performance Schema Enabled", + "read_only": 1 + }, + { + "fieldname": "tags_section", + "fieldtype": "Section Break", + "label": "Tags" + }, + { + "fieldname": "tags", + "fieldtype": "Table", + "label": "Tags", + "options": "Resource Tag" + }, + { + "fieldname": "section_break_cees", + "fieldtype": "Section Break" + }, + { + "fieldname": "ram", + "fieldtype": "Float", + "label": "RAM (MB)" + }, + { + "fieldname": "column_break_apox", + "fieldtype": "Column Break" + }, + { + "default": "0.1", + "fieldname": "memory_swap_max", + "fieldtype": "Float", + "label": "Memory Swap Max (GB)", + "non_negative": 1 + }, + { + "fieldname": "hostname_abbreviation", + "fieldtype": "Data", + "label": "Hostname Abbreviation" + }, + { + "default": "0", + "fieldname": "is_stalk_setup", + "fieldtype": "Check", + "label": "Is Stalk Setup", + "read_only": 1 + }, + { + "collapsible": 1, + "fieldname": "mariadb_stalk_section", + "fieldtype": "Section Break", + "label": "MariaDB Stalk" + }, + { + "fieldname": "column_break_qrkk", + "fieldtype": "Column Break" + }, + { + "default": "status", + "fieldname": "stalk_function", + "fieldtype": "Data", + "label": "Stalk Function" + }, + { + "default": "Threads_running", + "fieldname": "stalk_variable", + "fieldtype": "Data", + "label": "Stalk Variable" + }, + { + "default": "25", + "fieldname": "stalk_threshold", + "fieldtype": "Int", + "label": "Stalk Threshold" + }, + { + "fieldname": "column_break_objb", + "fieldtype": "Column Break" + }, + { + "default": "1", + "fieldname": "stalk_interval", + "fieldtype": "Float", + "label": "Stalk Interval" + }, + { + "default": "5", + "fieldname": "stalk_cycles", + "fieldtype": "Int", + "label": "Stalk Cycles" + }, + { + "default": "0", + "fieldname": "stalk_gdb_collector", + "fieldtype": "Check", + "label": "Stalk GDB Collector" + }, + { + "default": "0", + "fieldname": "stalk_strace_collector", + "fieldtype": "Check", + "label": "Stalk strace Collector" + }, + { + "default": "300", + "fieldname": "stalk_sleep", + "fieldtype": "Int", + "label": "Stalk Sleep" + }, + { + "default": "0", + "fieldname": "public", + "fieldtype": "Check", + "label": "Public" + }, + { + "default": "TCMalloc", + "fieldname": "memory_allocator", + "fieldtype": "Select", + "label": "Memory Allocator", + "options": "System\njemalloc\nTCMalloc", + "read_only": 1 + }, + { + "fieldname": "memory_allocator_version", + "fieldtype": "Data", + "label": "Memory Allocator Version", + "read_only": 1 + }, + { + "default": "50", + "description": "Minimum storage to add automatically each time", + "fieldname": "auto_add_storage_min", + "fieldtype": "Int", + "label": "Auto Add Storage Min", + "non_negative": 1, + "options": "50" + }, + { + "default": "250", + "description": "Maximum storage to add automatically each time", + "fieldname": "auto_add_storage_max", + "fieldtype": "Int", + "label": "Auto Add Storage Max", + "non_negative": 1 + }, + { + "fieldname": "mounts_section", + "fieldtype": "Section Break", + "label": "Mounts" + }, + { + "fieldname": "mounts", + "fieldtype": "Table", + "label": "Mounts", + "options": "Server Mount" + }, + { + "default": "0", + "fetch_from": "virtual_machine.has_data_volume", + "fieldname": "has_data_volume", + "fieldtype": "Check", + "label": "Has Data Volume", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "enable_physical_backup", + "fieldtype": "Check", + "label": "Enable Physical Backup" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-01-27 10:45:05.652361", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Database Server", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/database_server/database_server.py b/jcloud/jcloud/pagetype/database_server/database_server.py new file mode 100644 index 0000000..ffb301a --- /dev/null +++ b/jcloud/jcloud/pagetype/database_server/database_server.py @@ -0,0 +1,967 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import json +from typing import Any + +import jingrow +from jingrow.core.pagetype.version.version import get_diff +from jingrow.core.utils import find + +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable import ( + DatabaseServerMariaDBVariable, +) +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class DatabaseServer(BaseServer): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable import ( + DatabaseServerMariaDBVariable, + ) + from jcloud.jcloud.pagetype.resource_tag.resource_tag import ResourceTag + from jcloud.jcloud.pagetype.server_mount.server_mount import ServerMount + + agent_password: DF.Password | None + auto_add_storage_max: DF.Int + auto_add_storage_min: DF.Int + cluster: DF.Link | None + domain: DF.Link | None + enable_physical_backup: DF.Check + jingrow_public_key: DF.Code | None + jingrow_user_password: DF.Password | None + has_data_volume: DF.Check + hostname: DF.Data + hostname_abbreviation: DF.Data | None + ip: DF.Data | None + is_performance_schema_enabled: DF.Check + is_primary: DF.Check + is_replication_setup: DF.Check + is_self_hosted: DF.Check + is_server_prepared: DF.Check + is_server_renamed: DF.Check + is_server_setup: DF.Check + is_stalk_setup: DF.Check + mariadb_root_password: DF.Password | None + mariadb_system_variables: DF.Table[DatabaseServerMariaDBVariable] + memory_allocator: DF.Literal["System", "jemalloc", "TCMalloc"] + memory_allocator_version: DF.Data | None + memory_high: DF.Float + memory_max: DF.Float + memory_swap_max: DF.Float + mounts: DF.Table[ServerMount] + plan: DF.Link | None + primary: DF.Link | None + private_ip: DF.Data | None + private_mac_address: DF.Data | None + private_vlan_id: DF.Data | None + provider: DF.Literal["Generic", "Scaleway", "AWS EC2", "OCI"] + public: DF.Check + ram: DF.Float + root_public_key: DF.Code | None + self_hosted_mariadb_server: DF.Data | None + self_hosted_server_domain: DF.Data | None + server_id: DF.Int + ssh_port: DF.Int + ssh_user: DF.Data | None + stalk_cycles: DF.Int + stalk_function: DF.Data | None + stalk_gdb_collector: DF.Check + stalk_interval: DF.Float + stalk_sleep: DF.Int + stalk_strace_collector: DF.Check + stalk_threshold: DF.Int + stalk_variable: DF.Data | None + status: DF.Literal["Pending", "Installing", "Active", "Broken", "Archived"] + tags: DF.Table[ResourceTag] + team: DF.Link | None + title: DF.Data | None + virtual_machine: DF.Link | None + # end: auto-generated types + + def validate(self): + super().validate() + self.validate_mariadb_root_password() + self.validate_server_id() + self.validate_mariadb_system_variables() + + def validate_mariadb_root_password(self): + if not self.mariadb_root_password: + self.mariadb_root_password = jingrow.generate_hash(length=32) + + def validate_mariadb_system_variables(self): + variable: DatabaseServerMariaDBVariable + for variable in self.mariadb_system_variables: + variable.validate() + + def on_update(self): + if self.flags.in_insert or self.is_new(): + return + self.update_mariadb_system_variables() + if ( + self.has_value_changed("memory_high") + or self.has_value_changed("memory_max") + or self.has_value_changed("memory_swap_max") + ): + self.update_memory_limits() + + if self.has_value_changed("team") and self.subscription and self.subscription.team != self.team: + self.subscription.disable() + + # enable subscription if exists + if subscription := jingrow.db.get_value( + "Subscription", + { + "document_type": self.pagetype, + "document_name": self.name, + "team": self.team, + "plan": self.plan, + }, + ): + jingrow.db.set_value("Subscription", subscription, "enabled", 1) + else: + try: + # create new subscription + jingrow.get_pg( + { + "pagetype": "Subscription", + "document_type": self.pagetype, + "document_name": self.name, + "team": self.team, + "plan": self.plan, + } + ).insert() + except Exception: + jingrow.log_error("Database Subscription Creation Error") + + def update_memory_limits(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_update_memory_limits", enqueue_after_commit=True) + + def _update_memory_limits(self): + self.memory_swap_max = self.memory_swap_max or 0.1 + if not self.memory_high or not self.memory_max: + return + ansible = Ansible( + playbook="database_memory_limits.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "server": self.name, + "memory_high": self.memory_high, + "memory_max": self.memory_max, + "memory_swap_max": self.memory_swap_max, + }, + ) + play = ansible.run() + if play.status == "Failure": + log_error("Database Server Update Memory Limits Error", server=self.name) + + def update_mariadb_system_variables(self): + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_update_mariadb_system_variables", + queue="long", + enqueue_after_commit=True, + variables=self.get_variables_to_update(), + ) + + def get_changed_variables( + self, row_changed: list[tuple[str, int, str, list[tuple]]] + ) -> list[DatabaseServerMariaDBVariable]: + res = [] + for li in row_changed: + if li[0] == "mariadb_system_variables": + values = li[3] + for value in values: + if value[1] or value[2]: # Either value is truthy + res.append(jingrow.get_pg("Database Server MariaDB Variable", li[2])) + + return res + + def get_newly_added_variables(self, added) -> list[DatabaseServerMariaDBVariable]: + return [ + DatabaseServerMariaDBVariable(row[1].pagetype, row[1].name) + for row in added + if row[0] == "mariadb_system_variables" + ] + + def get_variables_to_update(self) -> list[DatabaseServerMariaDBVariable]: + old_pg = self.get_pg_before_save() + if not old_pg: + return self.mariadb_system_variables + diff = get_diff(old_pg, self) or {} + return self.get_changed_variables(diff.get("row_changed", {})) + self.get_newly_added_variables( + diff.get("added", []) + ) + + def _update_mariadb_system_variables(self, variables: list[DatabaseServerMariaDBVariable] | None = None): + if variables is None: + variables = [] + restart = False + for variable in variables: + variable.update_on_server() + if not variable.dynamic: + restart = True + if restart: + self._restart_mariadb() + + @jingrow.whitelist() + def restart_mariadb(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_restart_mariadb") + + def _restart_mariadb(self): + ansible = Ansible( + playbook="restart_mysql.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "server": self.name, + }, + ) + play = ansible.run() + if play.status == "Failure": + log_error("MariaDB Restart Error", server=self.name) + + @jingrow.whitelist() + def stop_mariadb(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_stop_mariadb", timeout=1800) + + def _stop_mariadb(self): + ansible = Ansible( + playbook="stop_mariadb.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "server": self.name, + }, + ) + play = ansible.run() + if play.status == "Failure": + log_error("MariaDB Stop Error", server=self.name) + + @jingrow.whitelist() + def run_upgrade_mariadb_job(self): + self.run_jcloud_job("Upgrade MariaDB") + + @jingrow.whitelist() + def upgrade_mariadb(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_upgrade_mariadb", timeout=1800) + + def _upgrade_mariadb(self): + ansible = Ansible( + playbook="upgrade_mariadb.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "server": self.name, + }, + ) + play = ansible.run() + if play.status == "Failure": + log_error("MariaDB Upgrade Error", server=self.name) + + @jingrow.whitelist() + def update_mariadb(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_update_mariadb", timeout=1800) + + def _update_mariadb(self): + ansible = Ansible( + playbook="update_mariadb.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "server": self.name, + }, + ) + play = ansible.run() + if play.status == "Failure": + log_error("MariaDB Update Error", server=self.name) + + @jingrow.whitelist() + def upgrade_mariadb_patched(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_upgrade_mariadb_patched", timeout=1800) + + def _upgrade_mariadb_patched(self): + ansible = Ansible( + playbook="upgrade_mariadb_patched.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "server": self.name, + }, + ) + play = ansible.run() + if play.status == "Failure": + log_error("MariaDB Upgrade Error", server=self.name) + + def add_mariadb_variable( + self, + variable: str, + value_type: str, + value: Any, + skip: bool = False, + persist: bool = True, + ): + """Add or update MariaDB variable on the server""" + existing = find(self.mariadb_system_variables, lambda x: x.mariadb_variable == variable) + if existing: + existing.set(value_type, value) + existing.set("skip", skip) + existing.set("persist", persist) + else: + self.append( + "mariadb_system_variables", + { + "mariadb_variable": variable, + value_type: value, + "skip": skip, + "persist": persist, + }, + ) + self.save() + + def validate_server_id(self): + if self.is_new() and not self.server_id: + server_ids = jingrow.get_all("Database Server", fields=["server_id"], pluck="server_id") + if server_ids: + self.server_id = max(server_ids or []) + 1 + else: + self.server_id = 1 + + def _setup_server(self): + config = self._get_config() + try: + ansible = Ansible( + playbook="self_hosted_db.yml" if getattr(self, "is_self_hosted", False) else "database.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "server_type": self.pagetype, + "server": self.name, + "workers": "2", + "agent_password": config.agent_password, + "agent_repository_url": config.agent_repository_url, + "monitoring_password": config.monitoring_password, + "log_server": config.log_server, + "kibana_password": config.kibana_password, + "private_ip": self.private_ip, + "server_id": self.server_id, + "allocator": self.memory_allocator.lower(), + "mariadb_root_password": config.mariadb_root_password, + "certificate_private_key": config.certificate.private_key, + "certificate_full_chain": config.certificate.full_chain, + "certificate_intermediate_chain": config.certificate.intermediate_chain, + "mariadb_depends_on_mounts": self.mariadb_depends_on_mounts, + **self.get_mount_variables(), + }, + ) + play = ansible.run() + self.reload() + self._set_mount_status(play) + if play.status == "Success": + self.status = "Active" + self.is_server_setup = True + + self.process_hybrid_server_setup() + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Database Server Setup Exception", server=self.as_dict()) + self.save() + + def _get_config(self): + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": self.domain}, "name" + ) + certificate = jingrow.get_pg("TLS Certificate", certificate_name) + + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if log_server: + kibana_password = jingrow.get_pg("Log Server", log_server).get_password("kibana_password") + else: + kibana_password = None + + return jingrow._dict( + dict( + agent_password=self.get_password("agent_password"), + agent_repository_url=self.get_agent_repository_url(), + mariadb_root_password=self.get_password("mariadb_root_password"), + certificate=certificate, + monitoring_password=jingrow.get_pg("Cluster", self.cluster).get_password( + "monitoring_password" + ), + log_server=log_server, + kibana_password=kibana_password, + ) + ) + + @jingrow.whitelist() + def setup_essentials(self): + """Setup missing essentials after server setup""" + config = self._get_config() + + try: + ansible = Ansible( + playbook="setup_essentials.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "server": self.name, + "workers": "2", + "agent_password": config.agent_password, + "agent_repository_url": config.agent_repository_url, + "monitoring_password": config.monitoring_password, + "log_server": config.log_server, + "kibana_password": config.kibana_password, + "private_ip": self.private_ip, + "server_id": self.server_id, + "certificate_private_key": config.certificate.private_key, + "certificate_full_chain": config.certificate.full_chain, + "certificate_intermediate_chain": config.certificate.intermediate_chain, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + except Exception: + self.status = "Broken" + log_error("Setup failed for missing essentials", server=self.as_dict()) + self.save() + + def process_hybrid_server_setup(self): + try: + hybrid_server = jingrow.db.get_value("Self Hosted Server", {"database_server": self.name}, "name") + + if hybrid_server: + hybrid_server = jingrow.get_pg("Self Hosted Server", hybrid_server) + + if not hybrid_server.different_database_server: + hybrid_server._setup_app_server() + except Exception: + log_error("Hybrid Server Setup exception", server=self.as_dict()) + + def _setup_primary(self, secondary): + mariadb_root_password = self.get_password("mariadb_root_password") + secondary_root_public_key = jingrow.db.get_value("Database Server", secondary, "root_public_key") + try: + ansible = Ansible( + playbook="primary.yml", + server=self, + variables={ + "backup_path": "/tmp/replica", + "mariadb_root_password": mariadb_root_password, + "secondary_root_public_key": secondary_root_public_key, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Primary Server Setup Exception", server=self.as_dict()) + self.save() + + def _setup_secondary(self): + primary = jingrow.get_pg("Database Server", self.primary) + mariadb_root_password = primary.get_password("mariadb_root_password") + try: + ansible = Ansible( + playbook="secondary.yml", + server=self, + variables={ + "mariadb_root_password": mariadb_root_password, + "primary_private_ip": primary.private_ip, + "private_ip": self.private_ip, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_replication_setup = True + self.mariadb_root_password = mariadb_root_password + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Secondary Server Setup Exception", server=self.as_dict()) + self.save() + + def _setup_replication(self): + primary = jingrow.get_pg("Database Server", self.primary) + primary._setup_primary(self.name) + if primary.status == "Active": + self._setup_secondary() + + @jingrow.whitelist() + def setup_replication(self): + if self.is_primary: + return + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_replication", queue="long", timeout=18000) + + @jingrow.whitelist() + def perform_physical_backup(self, path): + if not path: + jingrow.throw("Provide a path to store the physical backup") + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_perform_physical_backup", + queue="long", + timeout=18000, + path=path, + ) + + def _perform_physical_backup(self, path): + mariadb_root_password = self.get_password("mariadb_root_password") + try: + ansible = Ansible( + playbook="mariadb_physical_backup.yml", + server=self, + variables={ + "mariadb_root_password": mariadb_root_password, + "backup_path": path, + }, + ) + ansible.run() + except Exception: + log_error("MariaDB Physical Backup Exception", server=self.as_dict()) + + def _trigger_failover(self): + try: + ansible = Ansible( + playbook="failover.yml", + server=self, + variables={"mariadb_root_password": self.get_password("mariadb_root_password")}, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_replication_setup = False + self.is_primary = True + old_primary = self.primary + self.primary = None + servers = jingrow.get_all("Server", {"database_server": old_primary}) + for server in servers: + server = jingrow.get_pg("Server", server.name) + server.database_server = self.name + server.save() + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Database Server Failover Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def trigger_failover(self): + if self.is_primary: + return + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_trigger_failover", queue="long", timeout=1200) + + def _convert_from_jingrow_server(self): + mariadb_root_password = self.get_password("mariadb_root_password") + try: + ansible = Ansible( + playbook="convert.yml", + server=self, + user=self.ssh_user, + port=self.ssh_port, + variables={ + "private_ip": self.private_ip, + "mariadb_root_password": mariadb_root_password, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_server_setup = True + server = jingrow.get_pg("Server", self.name) + server.database_server = self.name + server.save() + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Database Server Conversion Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def convert_from_jingrow_server(self): + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_convert_from_jingrow_server", queue="long", timeout=1200) + + def _install_exporters(self): + mariadb_root_password = self.get_password("mariadb_root_password") + monitoring_password = jingrow.get_pg("Cluster", self.cluster).get_password("monitoring_password") + try: + ansible = Ansible( + playbook="database_exporters.yml", + server=self, + variables={ + "private_ip": self.private_ip, + "mariadb_root_password": mariadb_root_password, + "monitoring_password": monitoring_password, + }, + ) + ansible.run() + except Exception: + log_error("Exporters Install Exception", server=self.as_dict()) + + @jingrow.whitelist() + def reset_root_password(self): + if self.is_primary: + self.reset_root_password_primary() + else: + self.reset_root_password_secondary() + + def reset_root_password_primary(self): + old_password = self.get_password("mariadb_root_password") + self.mariadb_root_password = jingrow.generate_hash(length=32) + try: + ansible = Ansible( + playbook="mariadb_change_root_password.yml", + server=self, + variables={ + "mariadb_old_root_password": old_password, + "mariadb_root_password": self.mariadb_root_password, + "private_ip": self.private_ip, + }, + ) + ansible.run() + self.save() + except Exception: + log_error("Database Server Password Reset Exception", server=self.as_dict()) + raise + + @jingrow.whitelist() + def enable_performance_schema(self): + for key, value in PERFORMANCE_SCHEMA_VARIABLES.items(): + if isinstance(value, int): + type_key = "value_int" + elif isinstance(value, str): + type_key = "value_str" + + existing_variable = find(self.mariadb_system_variables, lambda x: x.mariadb_variable == key) + + if existing_variable: + existing_variable.set(type_key, value) + else: + self.append( + "mariadb_system_variables", + {"mariadb_variable": key, type_key: value, "persist": True}, + ) + + self.is_performance_schema_enabled = True + self.save() + + @jingrow.whitelist() + def disable_performance_schema(self): + existing_variable = find( + self.mariadb_system_variables, lambda x: x.mariadb_variable == "performance_schema" + ) + if existing_variable: + existing_variable.value_str = "OFF" + else: + self.append( + "mariadb_system_variables", + {"mariadb_variable": "performance_schema", "value_str": "OFF", "persist": True}, + ) + + self.is_performance_schema_enabled = False + self.save() + + def reset_root_password_secondary(self): + primary = jingrow.get_pg("Database Server", self.primary) + self.mariadb_root_password = primary.get_password("mariadb_root_password") + try: + ansible = Ansible( + playbook="mariadb_change_root_password_secondary.yml", + server=self, + variables={ + "mariadb_root_password": self.mariadb_root_password, + "private_ip": self.private_ip, + }, + ) + ansible.run() + self.save() + except Exception: + log_error("Database Server Password Reset Exception", server=self.as_dict()) + raise + + @jingrow.whitelist() + def setup_deadlock_logger(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_deadlock_logger", queue="long", timeout=1200) + + def _setup_deadlock_logger(self): + try: + ansible = Ansible( + playbook="deadlock_logger.yml", + server=self, + variables={ + "server": self.name, + "mariadb_root_password": self.get_password("mariadb_root_password"), + }, + ) + ansible.run() + except Exception: + log_error("Deadlock Logger Setup Exception", server=self.as_dict()) + + @jingrow.whitelist() + def setup_pt_stalk(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_pt_stalk", queue="long", timeout=1200) + + def _setup_pt_stalk(self): + extra_port_variable = find( + self.mariadb_system_variables, lambda x: x.mariadb_variable == "extra_port" + ) + if extra_port_variable: + mariadb_port = extra_port_variable.value_str + else: + mariadb_port = 3306 + try: + ansible = Ansible( + playbook="pt_stalk.yml", + server=self, + variables={ + "private_ip": self.private_ip, + "mariadb_port": mariadb_port, + "stalk_function": self.stalk_function, + "stalk_variable": self.stalk_variable, + "stalk_threshold": self.stalk_threshold, + "stalk_sleep": self.stalk_sleep, + "stalk_cycles": self.stalk_cycles, + "stalk_interval": self.stalk_interval, + "stalk_gdb_collector": bool(self.stalk_gdb_collector), + "stalk_strace_collector": bool(self.stalk_strace_collector), + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.is_stalk_setup = True + self.save() + except Exception: + log_error("Percona Stalk Setup Exception", server=self.as_dict()) + + @jingrow.whitelist() + def setup_mariadb_debug_symbols(self): + jingrow.enqueue_pg( + self.pagetype, self.name, "_setup_mariadb_debug_symbols", queue="long", timeout=1200 + ) + + def _setup_mariadb_debug_symbols(self): + try: + ansible = Ansible( + playbook="mariadb_debug_symbols.yml", + server=self, + ) + ansible.run() + except Exception: + log_error("MariaDB Debug Symbols Setup Exception", server=self.as_dict()) + + @jingrow.whitelist() + def fetch_stalks(self): + jingrow.enqueue( + "jcloud.jcloud.pagetype.mariadb_stalk.mariadb_stalk.fetch_server_stalks", + server=self.name, + job_id=f"fetch_mariadb_stalk:{self.name}", + deduplicate=True, + queue="long", + ) + + def get_stalks(self): + if self.agent.should_skip_requests(): + return [] + result = self.agent.get("database/stalks", raises=False) + if (not result) or ("error" in result): + return [] + return result + + def get_stalk(self, name): + if self.agent.should_skip_requests(): + return {} + return self.agent.get(f"database/stalks/{name}") + + def _rename_server(self): + agent_password = self.get_password("agent_password") + agent_repository_url = self.get_agent_repository_url() + mariadb_root_password = self.get_password("mariadb_root_password") + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": self.domain}, "name" + ) + certificate = jingrow.get_pg("TLS Certificate", certificate_name) + monitoring_password = jingrow.get_pg("Cluster", self.cluster).get_password("monitoring_password") + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if log_server: + kibana_password = jingrow.get_pg("Log Server", log_server).get_password("kibana_password") + else: + kibana_password = None + + try: + ansible = Ansible( + playbook="database_rename.yml", + server=self, + variables={ + "server_type": self.pagetype, + "server": self.name, + "workers": "2", + "agent_password": agent_password, + "agent_repository_url": agent_repository_url, + "monitoring_password": monitoring_password, + "log_server": log_server, + "kibana_password": kibana_password, + "private_ip": self.private_ip, + "server_id": self.server_id, + "mariadb_root_password": mariadb_root_password, + "certificate_private_key": certificate.private_key, + "certificate_full_chain": certificate.full_chain, + "certificate_intermediate_chain": certificate.intermediate_chain, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_server_renamed = True + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Database Server Rename Exception", server=self.as_dict()) + self.save() + + @property + def ram_for_mariadb(self): + return self.real_ram - 700 # OS and other services + + @jingrow.whitelist() + def adjust_memory_config(self): + if not self.ram: + return + + self.memory_high = round(max(self.ram_for_mariadb / 1024 - 1, 1), 3) + self.memory_max = round(max(self.ram_for_mariadb / 1024, 2), 3) + self.save() + + self.add_mariadb_variable( + "innodb_buffer_pool_size", + "value_int", + int(self.ram_for_mariadb * 0.65), # will be rounded up based on chunk_size + ) + + @jingrow.whitelist() + def reconfigure_mariadb_exporter(self): + jingrow.enqueue_pg( + self.pagetype, self.name, "_reconfigure_mariadb_exporter", queue="long", timeout=1200 + ) + + def _reconfigure_mariadb_exporter(self): + mariadb_root_password = self.get_password("mariadb_root_password") + try: + ansible = Ansible( + playbook="reconfigure_mysqld_exporter.yml", + server=self, + variables={ + "private_ip": self.private_ip, + "mariadb_root_password": mariadb_root_password, + }, + ) + ansible.run() + except Exception: + log_error("Database Server MariaDB Exporter Reconfigure Exception", server=self.as_dict()) + + @jingrow.whitelist() + def update_memory_allocator(self, memory_allocator): + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_update_memory_allocator", + memory_allocator=memory_allocator, + enqueue_after_commit=True, + ) + + def _update_memory_allocator(self, memory_allocator): + ansible = Ansible( + playbook="mariadb_memory_allocator.yml", + server=self, + variables={ + "server": self.name, + "allocator": memory_allocator.lower(), + "mariadb_root_password": self.get_password("mariadb_root_password"), + }, + ) + play = ansible.run() + if play.status == "Failure": + log_error("MariaDB Memory Allocator Setup Error", server=self.name) + elif play.status == "Success": + result = json.loads( + jingrow.get_all( + "Ansible Task", + filters={"play": play.name, "task": "Show Memory Allocator"}, + pluck="result", + order_by="creation DESC", + limit=1, + )[0] + ) + query_result = result.get("query_result") + if query_result: + self.reload() + self.memory_allocator = memory_allocator + self.memory_allocator_version = query_result[0][0]["Value"] + self.save() + + @property + def mariadb_depends_on_mounts(self): + mount_points = set(mount.mount_point for mount in self.mounts) + mariadb_mount_points = set(["/var/lib/mysql", "/etc/mysql"]) + return mariadb_mount_points.issubset(mount_points) + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Database Server") + +PERFORMANCE_SCHEMA_VARIABLES = { + "performance_schema": "1", + "performance-schema-instrument": "'%=ON'", + "performance-schema-consumer-events-stages-current": "ON", + "performance-schema-consumer-events-stages-history": "ON", + "performance-schema-consumer-events-stages-history-long": "ON", + "performance-schema-consumer-events-statements-current": "ON", + "performance-schema-consumer-events-statements-history": "ON", + "performance-schema-consumer-events-statements-history-long": "ON", + "performance-schema-consumer-events-waits-current": "ON", + "performance-schema-consumer-events-waits-history": "ON", + "performance-schema-consumer-events-waits-history-long": "ON", +} diff --git a/jcloud/jcloud/pagetype/database_server/database_server_dashboard.py b/jcloud/jcloud/pagetype/database_server/database_server_dashboard.py new file mode 100644 index 0000000..3302d84 --- /dev/null +++ b/jcloud/jcloud/pagetype/database_server/database_server_dashboard.py @@ -0,0 +1,16 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow import _ + + +def get_data(): + return { + "fieldname": "server", + "non_standard_fieldnames": {"Server": "database_server"}, + "transactions": [ + {"label": _("Related Documents"), "items": ["Server"]}, + {"label": _("Logs"), "items": ["Agent Job", "Ansible Play"]}, + ], + } diff --git a/jcloud/jcloud/pagetype/database_server/test_database_server.py b/jcloud/jcloud/pagetype/database_server/test_database_server.py new file mode 100644 index 0000000..9892892 --- /dev/null +++ b/jcloud/jcloud/pagetype/database_server/test_database_server.py @@ -0,0 +1,148 @@ +# Copyright (c) 2020, JINGROW +# See license.txt + + +from unittest.mock import MagicMock, Mock, patch + +import jingrow +from jingrow.core.utils import find +from jingrow.model.naming import make_autoname +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.database_server.database_server import DatabaseServer +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.jcloud.pagetype.virtual_machine.test_virtual_machine import create_test_virtual_machine +from jcloud.runner import Ansible +from jcloud.utils.test import foreground_enqueue_pg + + +@patch.object(BaseServer, "after_insert", new=Mock()) +def create_test_database_server(ip=None, cluster="Default") -> DatabaseServer: + """Create test Database Server pg""" + if not ip: + ip = jingrow.mock("ipv4") + server = jingrow.get_pg( + { + "pagetype": "Database Server", + "status": "Active", + "ip": ip, + "private_ip": jingrow.mock("ipv4_private"), + "agent_password": jingrow.mock("password"), + "hostname": f"m{make_autoname('.##')}", + "cluster": cluster, + "ram": 16384, + "virtual_machine": create_test_virtual_machine().name, + } + ).insert(ignore_if_duplicate=True) + server.reload() + return server + + +@patch.object(Ansible, "run", new=Mock()) +class TestDatabaseServer(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.Ansible", + ) + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_mariadb_service_restarted_on_restart_mariadb_fn_call(self, Mock_Ansible: Mock): + server = create_test_database_server() + server.restart_mariadb() + server.reload() # modified timestamp datatype + Mock_Ansible.assert_called_with( + playbook="restart_mysql.yml", + server=server, + user=server.ssh_user or "root", + port=server.ssh_port or 22, + variables={ + "server": server.name, + }, + ) + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.Ansible", + ) + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_memory_limits_updated_on_update_of_corresponding_fields(self, Mock_Ansible: MagicMock): + server = create_test_database_server() + server.memory_high = 1 + server.save() + Mock_Ansible.assert_not_called() + server.memory_max = 2 + server.save() + server.reload() # modified timestamp datatype + + Mock_Ansible.assert_called_with( + playbook="database_memory_limits.yml", + server=server, + user=server.ssh_user or "root", + port=server.ssh_port or 22, + variables={ + "server": server.name, + "memory_high": server.memory_high, + "memory_max": server.memory_max, + "memory_swap_max": 0.1, + }, + ) + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.Ansible", + ) + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_reconfigure_mariadb_exporter_play_runs_on_reconfigure_fn_call(self, Mock_Ansible: Mock): + server = create_test_database_server() + server.reconfigure_mariadb_exporter() + server.reload() + Mock_Ansible.assert_called_with( + playbook="reconfigure_mysqld_exporter.yml", + server=server, + variables={ + "private_ip": server.private_ip, + "mariadb_root_password": server.get_password("mariadb_root_password"), + }, + ) + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.Ansible", + ) + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_exception_on_failed_reconfigure_fn_call(self, Mock_Ansible: Mock): + Mock_Ansible.side_effect = Exception() + server = create_test_database_server() + self.assertRaises(Exception, server.reconfigure_mariadb_exporter) + + @patch("jcloud.jcloud.pagetype.database_server.database_server.Ansible", new=Mock()) + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_adjust_memory_config_sets_memory_limits_with_some_buffer(self): + server = create_test_database_server() + server.ram = 16384 + self.assertEqual(server.real_ram, 15707.248) + self.assertEqual(server.ram_for_mariadb, 15007.248) + server.adjust_memory_config() + server.reload() + self.assertEqual(server.memory_high, 13.656) + self.assertEqual(server.memory_max, 14.656) + self.assertEqual( + find( + server.mariadb_system_variables, + lambda x: x.mariadb_variable == "innodb_buffer_pool_size", + ).value_int, + int(15007.248 * 0.65), + ) diff --git a/jcloud/jcloud/pagetype/database_server_mariadb_variable/__init__.py b/jcloud/jcloud/pagetype/database_server_mariadb_variable/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/database_server_mariadb_variable/database_server_mariadb_variable.js b/jcloud/jcloud/pagetype/database_server_mariadb_variable/database_server_mariadb_variable.js new file mode 100644 index 0000000..9722497 --- /dev/null +++ b/jcloud/jcloud/pagetype/database_server_mariadb_variable/database_server_mariadb_variable.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Database Server MariaDB Variable", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/database_server_mariadb_variable/database_server_mariadb_variable.json b/jcloud/jcloud/pagetype/database_server_mariadb_variable/database_server_mariadb_variable.json new file mode 100644 index 0000000..9462281 --- /dev/null +++ b/jcloud/jcloud/pagetype/database_server_mariadb_variable/database_server_mariadb_variable.json @@ -0,0 +1,69 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-05-30 15:28:55.572163", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "mariadb_variable", + "value_int", + "value_str", + "value_float", + "skip", + "persist" + ], + "fields": [ + { + "fieldname": "mariadb_variable", + "fieldtype": "Link", + "in_list_view": 1, + "label": "MariaDB Variable", + "options": "MariaDB Variable" + }, + { + "fieldname": "value_int", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Value Int (MB)" + }, + { + "fieldname": "value_float", + "fieldtype": "Float", + "label": "Value Float" + }, + { + "default": "0", + "fieldname": "skip", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Skip" + }, + { + "default": "0", + "fieldname": "persist", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Persist" + }, + { + "fieldname": "value_str", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Value Str" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-07-03 07:15:03.695917", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Database Server MariaDB Variable", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/database_server_mariadb_variable/database_server_mariadb_variable.py b/jcloud/jcloud/pagetype/database_server_mariadb_variable/database_server_mariadb_variable.py new file mode 100644 index 0000000..20d489c --- /dev/null +++ b/jcloud/jcloud/pagetype/database_server_mariadb_variable/database_server_mariadb_variable.py @@ -0,0 +1,156 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +from typing import Any + +import jingrow +from jingrow.model.document import Document + +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class DatabaseServerMariaDBVariable(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + mariadb_variable: DF.Link | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + persist: DF.Check + skip: DF.Check + value_float: DF.Float + value_int: DF.Int + value_str: DF.Data | None + # end: auto-generated types + + @property + def datatype(self) -> str: + return jingrow.db.get_value("MariaDB Variable", self.mariadb_variable, "datatype") + + @property + def value_fields(self) -> list[str]: + return list(filter(lambda x: x.startswith("value_"), self.as_dict().keys())) + + @property + def value_field(self) -> str: + """Return the first value field that has a value""" + for f in self.value_fields: + if self.get(f): + return f + + @property + def value(self) -> Any: + """Return the value of the first value field that has a value""" + v = self.get(self.value_field) + if self.value_field == "value_int": + v = v * 1024 * 1024 # Convert MB to bytes + return v + + @property + def dynamic(self) -> bool: + if not self.get("_dynamic"): + self._dynamic = jingrow.db.get_value( + "MariaDB Variable", self.mariadb_variable, "dynamic" + ) + return self._dynamic + + @dynamic.setter + def dynamic(self, value: bool): + self._dynamic = value + + @property + def skippable(self) -> bool: + return jingrow.db.get_value("MariaDB Variable", self.mariadb_variable, "skippable") + + def get_variable_dict_for_play(self) -> dict: + var = self.mariadb_variable + if self.skip: + var = "skip-" + var + res = { + "variable": var, + "dynamic": self.dynamic, + "persist": self.persist, + "skip": self.skip, + } + if not self.skip: + res.update({"value": self.value}) + return res + + def validate_only_one_value_is_set(self): + if sum([bool(self.get(f)) for f in self.value_fields]) > 1: + jingrow.throw("Only one value can be set for MariaDB system variable") + + def validate_datatype_of_field_is_correct(self): + if type(self.value).__name__ != self.datatype.lower(): + jingrow.throw(f"Value for {self.mariadb_variable} must be {self.datatype}") + + def validate_value_field_set_is_correct(self): + if self.value_field != f"value_{self.datatype.lower()}": + jingrow.throw( + f"Value field for {self.mariadb_variable} must be value_{self.datatype.lower()}" + ) + + def validate_skipped_should_be_skippable(self): + if self.skip and not self.skippable: + jingrow.throw( + f"Only skippable variables can be skipped. {self.mariadb_variable} is not skippable" + ) + + def set_default_value_if_no_value(self): + if self.value: + return + default_value = jingrow.db.get_value( + "MariaDB Variable", self.mariadb_variable, "default_value" + ) + if default_value: + self.set(f"value_{self.datatype.lower()}", default_value) + + def validate_empty_only_if_skippable(self): + if not self.value and not self.skippable: + jingrow.throw(f"Value for {self.mariadb_variable} cannot be empty") + + def set_persist_and_unset_dynamic_if_skipped(self): + if self.skip: + self.persist = True + self.dynamic = False + + def validate( # Is not called by FF. Called manually from database_server.py + self, + ): + self.validate_only_one_value_is_set() + self.set_default_value_if_no_value() + self.validate_skipped_should_be_skippable() + self.validate_empty_only_if_skippable() + self.set_persist_and_unset_dynamic_if_skipped() + if self.value: + self.validate_value_field_set_is_correct() + self.validate_datatype_of_field_is_correct() + + def update_on_server(self): + server = jingrow.get_pg("Database Server", self.parent) + ansible = Ansible( + playbook="mysqld_variable.yml", + server=server, + user=server.ssh_user or "root", + port=server.ssh_port or 22, + variables={ + "server": server.name, + **self.get_variable_dict_for_play(), + }, + ) + play = ansible.run() + if play.status == "Failure": + log_error("MariaDB System Variable Update Error", server=server.name) + + +def on_pagetype_update(): + jingrow.db.add_unique( + "Database Server MariaDB Variable", ("mariadb_variable", "parent") + ) diff --git a/jcloud/jcloud/pagetype/database_server_mariadb_variable/patches/add_unique_constraint.py b/jcloud/jcloud/pagetype/database_server_mariadb_variable/patches/add_unique_constraint.py new file mode 100644 index 0000000..3f05ce0 --- /dev/null +++ b/jcloud/jcloud/pagetype/database_server_mariadb_variable/patches/add_unique_constraint.py @@ -0,0 +1,12 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "database_server_mariadb_variable") + jingrow.get_pg("PageType", "Database Server MariaDB Variable").run_module_method( + "on_pagetype_update" + ) diff --git a/jcloud/jcloud/pagetype/database_server_mariadb_variable/test_database_server_mariadb_variable.py b/jcloud/jcloud/pagetype/database_server_mariadb_variable/test_database_server_mariadb_variable.py new file mode 100644 index 0000000..26cd101 --- /dev/null +++ b/jcloud/jcloud/pagetype/database_server_mariadb_variable/test_database_server_mariadb_variable.py @@ -0,0 +1,392 @@ +# Copyright (c) 2023, JINGROW +# See license.txt +from unittest.mock import Mock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.database_server.test_database_server import ( + create_test_database_server, +) +from jcloud.runner import Ansible +from jcloud.utils.test import foreground_enqueue_pg + + +@patch.object(Ansible, "run", new=Mock()) +class TestDatabaseServerMariaDBVariable(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_multiple_values_for_same_mariadb_system_variable_cant_be_set(self): + """Test that multiple values for same MariaDB system variable can't be set""" + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": 1000}, + ) + server.save() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": 2000}, + ) + + with self.assertRaises(jingrow.ValidationError): + server.save() + + def test_only_one_datatype_value_can_be_set_for_one_mariadb_variable(self): + """Test that only one datatype value can be set for one MariaDB variable""" + server = create_test_database_server() + server.append( + "mariadb_system_variables", + { + "mariadb_variable": "innodb_buffer_pool_size", + "value_int": 1000, + "value_str": str(1000 * 1024), + }, + ) + with self.assertRaises(jingrow.ValidationError): + server.save() + + def test_wrong_datatype_value_cannot_be_set_for_variable(self): + """Test that wrong datatype value cannot be set for variable""" + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": "OFF"}, + ) + with self.assertRaises(jingrow.ValidationError): + server.save() + + def test_value_field_set_matches_datatype(self): + server = create_test_database_server() + server.append( + "mariadb_system_variables", + { + "mariadb_variable": "innodb_buffer_pool_size", + "value_str": "1000", # seeing if only value is checked and not the field + }, + ) + with self.assertRaises(jingrow.ValidationError): + server.save() + server.append( + "mariadb_system_variables", + { + "mariadb_variable": "log_bin", + "value_int": False, # seeing if only value is checked and not the field + }, + ) + with self.assertRaises(jingrow.ValidationError): + server.save() + + def test_only_skippable_variables_can_be_skipped(self): + """Test that only skippable variables can be skipped""" + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": 1000, "skip": True}, + ) + with self.assertRaises(jingrow.ValidationError): + server.save() + server.reload() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "log_bin", "skip": True}, + ) + try: + server.save() + except jingrow.ValidationError: + self.fail("Should be able to skip skippable variables") + + @patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_skip_implies_persist_and_not_dynamic(self, Mock_Ansible): + """Test that skip enables persist and not dynamic""" + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "log_bin", "skip": True}, + ) + server.save() + Mock_Ansible.assert_called_once() + args, kwargs = Mock_Ansible.call_args + expected = { + "server": server.name, + "variable": "skip-log_bin", + "dynamic": 0, + "persist": 1, + "skip": 1, + } + self.assertDictEqual(kwargs["variables"], expected) + + def test_default_value_is_applied_if_empty(self): + """Test that default value is applied if empty""" + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "log_bin"}, + ) + server.save() + default_value = jingrow.db.get_value("MariaDB Variable", "log_bin", "default_value") + self.assertEqual(server.mariadb_system_variables[0].value_str, default_value) + + @patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + wraps=foreground_enqueue_pg, + ) + def test_ansible_playbook_triggered_with_correct_input_on_update_of_child_table( + self, mock_enqueue_pg: Mock, Mock_Ansible + ): + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": 1000}, + ) + server.save() + args, kwargs = Mock_Ansible.call_args + expected_vars = { + "server": server.name, + "variable": "innodb_buffer_pool_size", + "value": 1000 * 1024 * 1024, # convert to bytes + "dynamic": 1, + "persist": 0, + "skip": 0, + } + self.assertEqual("mysqld_variable.yml", kwargs["playbook"]) + server.reload() # reload to get the right typing for datetime field + self.assertDocumentEqual(server, kwargs["server"]) + self.assertDictEqual(expected_vars, kwargs["variables"]) + + def test_ansible_playbook_not_triggered_on_update_of_unrelated_things(self): + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": 1000}, + ) + server.save() + server.status = "Broken" + with patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) as Mock_Ansible: + server.save() + Mock_Ansible.assert_not_called() + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_playbook_run_on_update_of_child_table(self): + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": 1000}, + ) + server.save() + server.mariadb_system_variables[0].value_int = 2000 + with patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) as Mock_Ansible: + server.save() + Mock_Ansible.assert_called_once() + + @patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + wraps=foreground_enqueue_pg, + ) + def test_multiple_playbooks_triggered_for_multiple_variables( + self, mock_enqueue_pg, Mock_Ansible + ): + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": 1000}, + ) + server.append( + "mariadb_system_variables", + {"mariadb_variable": "log_bin", "skip": True}, + ) + server.save() + self.assertEqual(2, Mock_Ansible.call_count) + + @patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_persist_check_passes_option_to_playbook_run(self, Mock_Ansible): + server = create_test_database_server() + server.append( + "mariadb_system_variables", + { + "mariadb_variable": "innodb_buffer_pool_size", + "value_int": 1000, + "persist": True, + }, + ) + server.save() + args, kwargs = Mock_Ansible.call_args + self.assertTrue(kwargs["variables"]["persist"]) + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_playbook_run_on_addition_of_variable_and_only_that_variable(self): + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": 1000}, + ) + server.save() + with patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) as Mock_Ansible: + server.append( + "mariadb_system_variables", + {"mariadb_variable": "log_bin", "skip": True}, + ) + server.save() + Mock_Ansible.assert_called_once() + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_playbook_run_only_for_variable_changed(self): + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": 1000}, + ) + server.append( + "mariadb_system_variables", + {"mariadb_variable": "log_bin", "skip": True}, + ) + server.save() + with patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) as Mock_Ansible: + server.mariadb_system_variables[0].value_int = 2000 + server.save() + Mock_Ansible.assert_called_once() + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_playbooks_triggered_for_added_and_changed_variables_in_one_save(self): + server = create_test_database_server() + server.append( + "mariadb_system_variables", + {"mariadb_variable": "innodb_buffer_pool_size", "value_int": 1000}, + ) + server.save() + with patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) as Mock_Ansible: + server.append( + "mariadb_system_variables", + {"mariadb_variable": "log_bin", "skip": False}, + ) + server.mariadb_system_variables[0].value_int = 2000 + server.save() + self.assertEqual(2, Mock_Ansible.call_count) + for call in Mock_Ansible.call_args_list: + args, kwargs = call + self.assertIn( + kwargs["variables"]["variable"], ["innodb_buffer_pool_size", "log_bin"] + ) + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + wraps=foreground_enqueue_pg, + ) + def test_background_jobs_not_created_for_new_server_pg(self, mock_enqueue_pg): + create_test_database_server() + mock_enqueue_pg.assert_not_called() + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + wraps=foreground_enqueue_pg, + ) + def test_update_of_pg_with_member_that_is_not_a_field_works(self, mock_enqueue_pg): + server = create_test_database_server() + server.reload() + server.x = 4096 # member that is not field + try: + server.save() + except Exception: + self.fail("Update of pg without variables failed") + + @patch( + "jcloud.jcloud.pagetype.database_server.database_server.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_add_variable_method_adds_and_updates_variables(self): + server = create_test_database_server() + + with patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) as Mock_Ansible: + server.add_mariadb_variable("tmp_disk_table_size", "value_int", 10241) + + Mock_Ansible.assert_called_once() + + server.reload() + self.assertEqual(1, len(server.mariadb_system_variables)) + self.assertEqual( + "tmp_disk_table_size", server.mariadb_system_variables[0].mariadb_variable + ) + self.assertEqual(10241, server.mariadb_system_variables[0].value_int) + + with patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) as Mock_Ansible: + server.add_mariadb_variable("tmp_disk_table_size", "value_int", 10242) + + Mock_Ansible.assert_called_once() + + self.assertEqual(1, len(server.mariadb_system_variables)) + self.assertEqual( + "tmp_disk_table_size", server.mariadb_system_variables[0].mariadb_variable + ) + self.assertEqual(10242, server.mariadb_system_variables[0].value_int) + + with patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) as Mock_Ansible: + server.add_mariadb_variable("tmp_disk_table_size", "value_int", 10242) # no change + Mock_Ansible.assert_not_called() + + with patch( + "jcloud.jcloud.pagetype.database_server_mariadb_variable.database_server_mariadb_variable.Ansible", + wraps=Ansible, + ) as Mock_Ansible: + server.add_mariadb_variable( + "tmp_disk_table_size", "value_int", 10242, persist=False + ) # no change + Mock_Ansible.assert_called_once() diff --git a/jcloud/jcloud/pagetype/deploy/__init__.py b/jcloud/jcloud/pagetype/deploy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/deploy/deploy.js b/jcloud/jcloud/pagetype/deploy/deploy.js new file mode 100644 index 0000000..616b694 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy/deploy.js @@ -0,0 +1,17 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Deploy', { + onload: function (frm) { + frm.set_query('candidate', function () { + return { + filters: { + group: frm.pg.group, + }, + }; + }); + }, + // refresh: function(frm) { + + // } +}); diff --git a/jcloud/jcloud/pagetype/deploy/deploy.json b/jcloud/jcloud/pagetype/deploy/deploy.json new file mode 100644 index 0000000..3495120 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy/deploy.json @@ -0,0 +1,105 @@ +{ + "actions": [], + "creation": "2020-04-06 12:08:13.396131", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "group", + "team", + "staging", + "column_break_2", + "candidate", + "section_break_5", + "benches" + ], + "fields": [ + { + "fetch_from": "candidate.group", + "fieldname": "group", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Release Group", + "options": "Release Group", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "candidate", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Deploy Candidate", + "options": "Deploy Candidate", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "benches", + "fieldtype": "Table", + "label": "Deploy Benches", + "options": "Deploy Bench", + "reqd": 1 + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_5", + "fieldtype": "Section Break" + }, + { + "fetch_from": "group.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1, + "reqd": 1 + }, + { + "default": "0", + "fieldname": "staging", + "fieldtype": "Check", + "in_standard_filter": 1, + "label": "Staging" + } + ], + "links": [], + "modified": "2021-07-23 10:34:15.747070", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Deploy", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/deploy/deploy.py b/jcloud/jcloud/pagetype/deploy/deploy.py new file mode 100644 index 0000000..acdec32 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy/deploy.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.document import Document +from jingrow.model.naming import append_number_if_name_exists + +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.utils import log_error + + +class Deploy(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.deploy_bench.deploy_bench import DeployBench + + benches: DF.Table[DeployBench] + candidate: DF.Link + group: DF.Link + staging: DF.Check + team: DF.Link + # end: auto-generated types + + def autoname(self): + self.name = append_number_if_name_exists("Deploy", self.candidate, separator="-") + + def after_insert(self): + self.create_benches() + + def create_benches(self): + candidate = jingrow.get_cached_pg("Deploy Candidate", self.candidate) + environment_variables = [ + {"key": v.key, "value": v.value} for v in candidate.environment_variables + ] + + group = jingrow.get_cached_pg("Release Group", self.group) + mounts = [ + { + "source": v.source, + "destination": v.destination, + "is_absolute_path": v.is_absolute_path, + } + for v in group.mounts + ] + + for bench in self.benches: + new = jingrow.get_pg( + { + "pagetype": "Bench", + "server": bench.server, + "group": self.group, + "candidate": self.candidate, + "workers": 1, + "staging": self.staging, + "environment_variables": environment_variables, + "mounts": mounts, + } + ).insert() + bench.bench = new.name + + jingrow.enqueue( + "jcloud.jcloud.pagetype.deploy.deploy.create_deploy_candidate_differences", + destination=self.candidate, + enqueue_after_commit=True, + ) + self.save() + + +def create_deploy_candidate_differences(destination): + destination = jingrow.get_cached_pg("Deploy Candidate", destination) + group = destination.group + destination_creation = destination.creation + candidates = jingrow.get_all( + "Bench", + pluck="candidate", + filters={ + "status": ("!=", "Archived"), + "group": group, + "candidate": ("!=", destination.name), + }, + ) + candidates = list(set(candidates)) + for source in candidates: + try: + source_creation = jingrow.db.get_value("Deploy Candidate", source, "creation") + if source_creation < destination_creation: + if jingrow.get_all( + "Deploy Candidate Difference", + filters={ + "group": group, + "source": source, + "destination": destination.name, + }, + limit=1, + ): + continue + jingrow.get_pg( + { + "pagetype": "Deploy Candidate Difference", + "group": group, + "source": source, + "destination": destination.name, + } + ).insert() + jingrow.db.commit() + except Exception: + log_error( + "Deploy Candidate Difference Creation Error", + destination=destination, + candidates=candidates, + source=source, + ) + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Deploy") diff --git a/jcloud/jcloud/pagetype/deploy/test_deploy.py b/jcloud/jcloud/pagetype/deploy/test_deploy.py new file mode 100644 index 0000000..7897d32 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy/test_deploy.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestDeploy(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/deploy_bench/__init__.py b/jcloud/jcloud/pagetype/deploy_bench/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/deploy_bench/deploy_bench.js b/jcloud/jcloud/pagetype/deploy_bench/deploy_bench.js new file mode 100644 index 0000000..93143fd --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_bench/deploy_bench.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Deploy Bench', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/deploy_bench/deploy_bench.json b/jcloud/jcloud/pagetype/deploy_bench/deploy_bench.json new file mode 100644 index 0000000..4a610fa --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_bench/deploy_bench.json @@ -0,0 +1,40 @@ +{ + "actions": [], + "creation": "2020-04-06 12:10:28.479712", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "server", + "bench" + ], + "fields": [ + { + "fieldname": "server", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Server", + "options": "Server", + "reqd": 1 + }, + { + "fieldname": "bench", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Bench", + "options": "Bench", + "read_only": 1 + } + ], + "istable": 1, + "links": [], + "modified": "2020-12-16 18:32:30.211437", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Deploy Bench", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/deploy_bench/deploy_bench.py b/jcloud/jcloud/pagetype/deploy_bench/deploy_bench.py new file mode 100644 index 0000000..eb47788 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_bench/deploy_bench.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class DeployBench(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + bench: DF.Link | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + server: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/deploy_bench/test_deploy_bench.py b/jcloud/jcloud/pagetype/deploy_bench/test_deploy_bench.py new file mode 100644 index 0000000..e640bd4 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_bench/test_deploy_bench.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestDeployBench(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/deploy_candidate/__init__.py b/jcloud/jcloud/pagetype/deploy_candidate/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/deploy_candidate/cache_utils.py b/jcloud/jcloud/pagetype/deploy_candidate/cache_utils.py new file mode 100644 index 0000000..928a22a --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/cache_utils.py @@ -0,0 +1,261 @@ +import os +import platform +import random +import re +import shlex +import shutil +import subprocess +from datetime import datetime +from pathlib import Path +from textwrap import dedent +from typing import Tuple, TypedDict + +CommandOutput = TypedDict( + "CommandOutput", + cwd=str, + image_tag=str, + returncode=int, + output=str, +) + + +def copy_file_from_docker_cache( + container_source: str, + host_dest: str = ".", + cache_target: str = "/home/jingrow/.cache", +): + """ + Function is used to copy files from docker cache i.e. `cache_target/container_source` + to the host system i.e `host_dest`. + + This function is required cause cache files may be available only during docker build. + + This works by: + - copy the file from mount cache (image) to another_folder (image) + - create a container from image + - copy file from another_folder (container) to host system (using docker cp) + - remove container and then image + """ + filename = Path(container_source).name + container_dest_dirpath = Path(cache_target).parent / "container_dest" + container_dest_filepath = container_dest_dirpath / filename + command = ( + f"mkdir -p {container_dest_dirpath} && " + + f"cp {container_source} {container_dest_filepath}" + ) + output = run_command_in_docker_cache( + command, + cache_target, + False, + ) + + if output["returncode"] == 0: + container_id = create_container(output["image_tag"]) + copy_file_from_container( + container_id, + container_dest_filepath, + Path(host_dest), + ) + remove_container(container_id) + + run_image_rm(output["image_tag"]) + return output + + +def run_command_in_docker_cache( + command: str = "ls -A", + cache_target: str = "/home/jingrow/.cache", + remove_image: bool = True, +) -> CommandOutput: + """ + This function works by capturing the output of the given `command` + by running it in the cache dir (`cache_target`) while building a + dummy image. + + The primary purpose is to check the contents of the mounted cache. It's + an incredibly hacky way to achieve this, but afaik the only one. + + Note: The `ARG CACHE_BUST=1` line is used to cause layer cache miss + while running `command` at `cache_target`. This is achieved by changing + `CACHE_BUST` value every run. + + Warning: Takes time to run, use judiciously. + """ + dockerfile = get_cache_check_dockerfile( + command, + cache_target, + ) + df_path = prep_dockerfile_path(dockerfile) + output = run_build_command(df_path, remove_image) + return output + + +def get_cache_check_dockerfile(command: str, cache_target: str) -> str: + """ + Note: Mount cache is identified by different attributes, hence it should + be the same as the Dockerfile else it will always result in a cache miss. + + Ref: https://docs.docker.com/engine/reference/builder/#run---mounttypecache + """ + df = f""" + FROM ubuntu:20.04 + ARG CACHE_BUST=1 + WORKDIR {cache_target} + RUN --mount=type=cache,target={cache_target},uid=1000,gid=1000 {command} + """ + return dedent(df).strip() + + +def create_container(image_tag: str) -> str: + args = shlex.split(f"docker create --platform linux/amd64 {image_tag}") + return subprocess.run( + args, + env=os.environ.copy(), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + ).stdout.strip() + + +def copy_file_from_container( + container_id: str, + container_filepath: Path, + host_dest: Path, +): + container_source = f"{container_id}:{container_filepath}" + args = ["docker", "cp", container_source, host_dest.as_posix()] + proc = subprocess.run( + args, + env=os.environ.copy(), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + ) + + if not proc.returncode: + print( + f"file copied:\n" + f"- from {container_source}\n" + f"- to {host_dest.absolute().as_posix()}" + ) + else: + print(proc.stdout) + + +def remove_container(container_id: str) -> str: + args = shlex.split(f"docker rm -v {container_id}") + subprocess.run( + args, + env=os.environ.copy(), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + ).stdout + + +def prep_dockerfile_path(dockerfile: str) -> Path: + dir = Path("cache_check_dockerfile_dir") + if dir.is_dir(): + shutil.rmtree(dir) + + dir.mkdir() + df_path = dir / "Dockerfile" + with open(df_path, "w") as df: + df.write(dockerfile) + + return df_path + + +def run_build_command(df_path: Path, remove_image: bool) -> CommandOutput: + command, image_tag = get_cache_check_build_command() + env = os.environ.copy() + env["DOCKER_BUILDKIT"] = "1" + env["BUILDKIT_PROGRESS"] = "plain" + + output = subprocess.run( + shlex.split(command), + env=env, + cwd=df_path.parent, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + ) + if remove_image: + run_image_rm(image_tag) + return dict( + cwd=df_path.parent.absolute().as_posix(), + image_tag=image_tag, + returncode=output.returncode, + output=strip_build_output(output.stdout), + ) + + +def get_cache_check_build_command() -> Tuple[str, str]: + command = "docker build" + if ( + platform.machine() == "arm64" + and platform.system() == "Darwin" + and platform.processor() == "arm" + ): + command += "x build --platform linux/amd64" + + now_ts = datetime.timestamp(datetime.today()) + command += f" --build-arg CACHE_BUST={now_ts}" + + image_tag = f"cache_check:id-{random.getrandbits(40):x}" + command += f" --tag {image_tag} ." + return command, image_tag + + +def run_image_rm(image_tag: str): + command = f"docker image rm {image_tag}" + subprocess.run( + shlex.split(command), stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) + + +def strip_build_output(stdout: str) -> str: + output = [] + is_output = False + + line_rx = re.compile(r"^#\d+\s\d+\.\d+\s") + done_rx = re.compile(r"^#\d+\sDONE\s\d+\.\d+s$") + + for line in stdout.split("\n"): + if is_output and (m := line_rx.match(line)): + start = m.end() + output.append(line[start:]) + elif is_output and done_rx.search(line): + break + elif "--mount=type=cache,target=" in line: + is_output = True + return "\n".join(output) + + +def get_cached_apps() -> dict[str, list[str]]: + result = run_command_in_docker_cache( + command="ls -A bench/apps", + cache_target="/home/jingrow/.cache", + ) + + apps = dict() + if result["returncode"] != 0: + return apps + + for line in result["output"].split("\n"): + # File Name: app_name-cache_key.ext + splits = line.split("-", 1) + if len(splits) != 2: + continue + + app_name, suffix = splits + suffix_splits = suffix.split(".", 1) + if len(suffix_splits) != 2 or suffix_splits[1] not in ["tar", "tgz"]: + continue + + if app_name not in apps: + apps[app_name] = [] + + app_hash = suffix_splits[0] + apps[app_name].append(app_hash) + return apps diff --git a/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate.js b/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate.js new file mode 100644 index 0000000..c34d9e6 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate.js @@ -0,0 +1,129 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Deploy Candidate', { + refresh: function (frm) { + frm.add_web_link( + `/dashboard/groups/${frm.pg.group}/deploys/${frm.pg.name}`, + 'Visit Dashboard', + ); + + frm.fields_dict['apps'].grid.get_field('app').get_query = function (pg) { + return { + query: 'jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.desk_app', + filters: { release_group: pg.group }, + }; + }; + + if (frm.pg.status === 'Success') { + set_handler(frm, 'Deploy', 'deploy', {}, 'Deploy'); + } + + if (['Draft', 'Failure', 'Success'].includes(frm.pg.status)) { + set_handler( + frm, + 'Complete', + 'build', + { no_push: false, no_build: false, no_cache: false }, + 'Build', + ); + set_handler( + frm, + 'Generate Context', + 'build', + { no_push: true, no_build: true, no_cache: false }, + 'Build', + ); + set_handler( + frm, + 'Without Cache', + 'build', + { no_push: false, no_build: false, no_cache: true }, + 'Build', + ); + set_handler( + frm, + 'Without Push', + 'build', + { no_push: true, no_build: false, no_cache: false }, + 'Build', + ); + set_handler(frm, 'Redeploy', 'redeploy', { no_cache: false }, 'Deploy'); + set_handler( + frm, + 'Redeploy (No Cache)', + 'redeploy', + { no_cache: true }, + 'Deploy', + ); + set_handler( + frm, + 'Schedule Build and Deploy', + 'schedule_build_and_deploy', + { run_now: false }, + 'Deploy', + ); + } + + // Build already running + else { + set_handler(frm, 'Stop and Fail', 'stop_and_fail', {}, 'Build'); + set_handler(frm, 'Fail and Redeploy', 'fail_and_redeploy', {}, 'Deploy'); + } + + if (frm.pg.status !== 'Draft') { + set_handler( + frm, + 'Cleanup Directory', + 'cleanup_build_directory', + {}, + 'Build', + ); + } + }, +}); + +function set_handler(frm, label, method, args, group) { + const handler = get_handler(frm, method, args); + frm.add_custom_button(label, handler, group); +} + +function get_handler(frm, method, args) { + return async function handler() { + const { message: data } = await frm.call({ method, args, pg: frm.pg }); + + if (data?.error) { + jingrow.msgprint({ + title: 'Action Failed', + indicator: 'yellow', + message: data.message, + }); + return; + } + + if (method.endsWith('redeploy') && data?.message) { + jingrow.msgprint({ + title: 'Redeploy Triggered', + indicator: 'green', + message: __(`Duplicate {0} created and redeploy triggered.`, [ + `Deploy Candidate`, + ]), + }); + } + + if (method === 'deploy' && data) { + jingrow.msgprint({ + title: 'Deploy Created', + indicator: 'green', + message: __( + `{0} been created (or found) from current Deploy Candidate`, + [`Deploy`], + ), + }); + } else if (method === 'deploy' && !data) { + jingrow.msgprint({ title: 'Deploy could not be created' }); + } + + frm.refresh(); + }; +} diff --git a/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate.json b/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate.json new file mode 100644 index 0000000..ea1cc1e --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate.json @@ -0,0 +1,478 @@ +{ + "actions": [], + "creation": "2022-01-28 20:07:29.425024", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "column_break_2", + "group", + "team", + "scheduled_time", + "section_break_xefr", + "user_addressable_failure", + "retry_count", + "column_break_rpow", + "manually_failed", + "error_key", + "section_break_6", + "build_start", + "build_duration", + "pending_start", + "build_directory", + "last_updated", + "column_break_7", + "build_end", + "pending_duration", + "pending_end", + "build_server", + "no_cache", + "section_break_11", + "docker_image", + "docker_image_id", + "column_break_13", + "docker_image_repository", + "docker_image_tag", + "feature_flags_section", + "is_redisearch_enabled", + "redis_cache_size", + "use_app_cache", + "compress_app_cache", + "column_break_tkdd", + "merge_all_rq_queues", + "merge_default_and_short_rq_queues", + "use_rq_workerpool", + "gunicorn_threads_per_worker", + "parameters_tab", + "section_break_4", + "apps", + "dependencies", + "packages", + "environment_variables", + "output_tab", + "section_break_9", + "build_error", + "build_steps", + "build_output", + "ssh_tab", + "ssh_section", + "user_public_key", + "user_private_key", + "user_certificate" + ], + "fields": [ + { + "fieldname": "group", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Release Group", + "options": "Release Group", + "reqd": 1, + "search_index": 1, + "set_only_once": 1 + }, + { + "default": "Draft", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Draft\nScheduled\nPending\nPreparing\nRunning\nSuccess\nFailure", + "read_only": 1, + "search_index": 1 + }, + { + "fieldname": "section_break_4", + "fieldtype": "Section Break", + "hide_border": 1 + }, + { + "fieldname": "build_directory", + "fieldtype": "Data", + "label": "Build Directory", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval: pg.status !== \"Draft\"", + "fieldname": "build_duration", + "fieldtype": "Time", + "label": "Build Duration", + "read_only": 1 + }, + { + "fieldname": "build_start", + "fieldtype": "Datetime", + "label": "Build Start", + "read_only": 1 + }, + { + "fieldname": "build_end", + "fieldtype": "Datetime", + "label": "Build End", + "read_only": 1 + }, + { + "fieldname": "section_break_9", + "fieldtype": "Section Break" + }, + { + "fieldname": "build_steps", + "fieldtype": "Table", + "label": "Build Steps", + "options": "Deploy Candidate Build Step", + "read_only": 1 + }, + { + "fieldname": "build_output", + "fieldtype": "Code", + "label": "Build Output", + "read_only": 1 + }, + { + "fieldname": "docker_image_id", + "fieldtype": "Data", + "label": "Docker Image ID", + "read_only": 1 + }, + { + "fieldname": "column_break_7", + "fieldtype": "Column Break" + }, + { + "fieldname": "docker_image_tag", + "fieldtype": "Data", + "label": "Docker Image Tag", + "read_only": 1 + }, + { + "fieldname": "section_break_11", + "fieldtype": "Section Break", + "label": "Docker Config" + }, + { + "fieldname": "column_break_13", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_6", + "fieldtype": "Section Break", + "label": "Build Meta" + }, + { + "fieldname": "apps", + "fieldtype": "Table", + "label": "Apps", + "options": "Deploy Candidate App", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "docker_image_repository", + "fieldtype": "Data", + "label": "Docker Image Repository", + "read_only": 1 + }, + { + "fieldname": "docker_image", + "fieldtype": "Data", + "label": "Docker Image", + "read_only": 1 + }, + { + "fetch_from": "group.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "dependencies", + "fieldtype": "Table", + "label": "Dependencies", + "options": "Deploy Candidate Dependency", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "ssh_section", + "fieldtype": "Section Break" + }, + { + "fieldname": "user_public_key", + "fieldtype": "Code", + "label": "User Public Key", + "read_only": 1 + }, + { + "fieldname": "user_private_key", + "fieldtype": "Code", + "label": "User Private Key", + "read_only": 1 + }, + { + "fieldname": "user_certificate", + "fieldtype": "Code", + "label": "User Certificate", + "read_only": 1 + }, + { + "fieldname": "feature_flags_section", + "fieldtype": "Section Break", + "label": "Feature Flags" + }, + { + "default": "0", + "fetch_from": "group.is_redisearch_enabled", + "fieldname": "is_redisearch_enabled", + "fieldtype": "Check", + "label": "Is RediSearch Enabled" + }, + { + "fieldname": "packages", + "fieldtype": "Table", + "label": "Packages", + "options": "Deploy Candidate Package", + "read_only": 1 + }, + { + "fieldname": "column_break_tkdd", + "fieldtype": "Column Break" + }, + { + "fieldname": "environment_variables", + "fieldtype": "Table", + "label": "Environment Variables", + "options": "Deploy Candidate Variable", + "read_only": 1 + }, + { + "default": "0", + "fetch_from": "group.merge_all_rq_queues", + "fieldname": "merge_all_rq_queues", + "fieldtype": "Check", + "label": "Merge All RQ Queues", + "read_only": 1 + }, + { + "default": "0", + "fetch_from": "group.merge_default_and_short_rq_queues", + "fieldname": "merge_default_and_short_rq_queues", + "fieldtype": "Check", + "label": "Merge Default and Short RQ Queues", + "read_only": 1 + }, + { + "depends_on": "eval: pg.scheduled_time", + "fieldname": "scheduled_time", + "fieldtype": "Datetime", + "label": "Scheduled Time", + "read_only": 1 + }, + { + "fetch_from": "group.gunicorn_threads_per_worker", + "fieldname": "gunicorn_threads_per_worker", + "fieldtype": "Int", + "label": "Gunicorn Threads Per Worker", + "read_only": 1 + }, + { + "default": "0", + "fetch_from": "group.use_rq_workerpool", + "fieldname": "use_rq_workerpool", + "fieldtype": "Check", + "label": "Use RQ WorkerPool" + }, + { + "default": "0", + "fetch_from": "group.use_app_cache", + "fieldname": "use_app_cache", + "fieldtype": "Check", + "label": "Use App Cache" + }, + { + "default": "0", + "fetch_from": "group.compress_app_cache", + "fieldname": "compress_app_cache", + "fieldtype": "Check", + "label": "Compress App Cache" + }, + { + "description": "Used when parsing build output.", + "fieldname": "last_updated", + "fieldtype": "Datetime", + "hidden": 1, + "label": "Last Updated", + "read_only": 1 + }, + { + "fieldname": "parameters_tab", + "fieldtype": "Tab Break", + "label": "Apps & Deps" + }, + { + "fieldname": "output_tab", + "fieldtype": "Tab Break", + "label": "Output" + }, + { + "fieldname": "ssh_tab", + "fieldtype": "Tab Break", + "label": "SSH" + }, + { + "depends_on": "eval:pg.build_error", + "fieldname": "build_error", + "fieldtype": "Code", + "label": "Build Error", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval: pg.user_addressable_failure", + "description": "Set if the build failure is user addressable, i.e. the cause of failure is not FC.", + "fieldname": "user_addressable_failure", + "fieldtype": "Check", + "label": "User Addressable Failure", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval: pg.manually_failed", + "description": "Set if the build was manually failed or cancelled.", + "fieldname": "manually_failed", + "fieldtype": "Check", + "label": "Manually Failed", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval: pg.status !== \"Draft\" && pg.status !== \"Pending\"", + "description": "Time spent by the build in queue, i.e. in status Pending.", + "fieldname": "pending_duration", + "fieldtype": "Time", + "label": "Pending Duration", + "read_only": 1 + }, + { + "fieldname": "section_break_xefr", + "fieldtype": "Section Break", + "label": "Failure" + }, + { + "fieldname": "column_break_rpow", + "fieldtype": "Column Break" + }, + { + "fieldname": "pending_start", + "fieldtype": "Datetime", + "hidden": 1, + "label": "Pending Start", + "read_only": 1 + }, + { + "fieldname": "pending_end", + "fieldtype": "Datetime", + "hidden": 1, + "label": "Pending End", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval: pg.retry_count", + "description": "Under certain failure conditions, a build will be retried.", + "fieldname": "retry_count", + "fieldtype": "Int", + "label": "Retry Count", + "read_only": 1 + }, + { + "depends_on": "eval:pg.build_server", + "fieldname": "build_server", + "fieldtype": "Link", + "label": "Build Server", + "options": "Server", + "read_only": 1 + }, + { + "depends_on": "eval: pg.user_addressable_failure", + "description": "Set if Failure is User Addressable. Used to prevent subsequent builds unless a valid change has been made.", + "fieldname": "error_key", + "fieldtype": "Data", + "label": "Error Key" + }, + { + "default": "0", + "depends_on": "eval: pg.no_cache", + "description": "Set if the Docker build was run with the --no-cache flag.", + "fieldname": "no_cache", + "fieldtype": "Check", + "label": "No Cache" + }, + { + "default": "512", + "fetch_from": "group.redis_cache_size", + "fieldname": "redis_cache_size", + "fieldtype": "Int", + "label": "Redis Cache Size (MB)" + } + ], + "links": [ + { + "link_pagetype": "Agent Job", + "link_fieldname": "reference_name" + }, + { + "link_pagetype": "Error Log", + "link_fieldname": "reference_name" + }, + { + "link_pagetype": "Jcloud Notification", + "link_fieldname": "document_name" + } + ], + "modified": "2024-12-27 11:59:09.917364", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Deploy Candidate", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "group", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate.py b/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate.py new file mode 100644 index 0000000..111977a --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate.py @@ -0,0 +1,2004 @@ +from __future__ import annotations + +import contextlib + +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +import glob +import json +import os +import re +import shlex +import shutil +import subprocess +import tarfile +import tempfile +import typing +from datetime import datetime, timedelta +from subprocess import Popen +from typing import Any, Literal + +import jingrow +import jingrow.utils +from jingrow.core.utils import find +from jingrow.model.document import Document +from jingrow.model.naming import make_autoname +from jingrow.utils import now_datetime as now +from jingrow.utils import rounded +from tenacity import retry, stop_after_attempt, wait_fixed + +from jcloud.agent import Agent +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.jcloud.pagetype.app_release.app_release import ( + AppReleasePair, + get_changed_files_between_hashes, +) +from jcloud.jcloud.pagetype.deploy_candidate.deploy_notifications import ( + create_build_failed_notification, +) +from jcloud.jcloud.pagetype.deploy_candidate.docker_output_parsers import ( + DockerBuildOutputParser, + UploadStepUpdater, +) +from jcloud.jcloud.pagetype.deploy_candidate.utils import ( + PackageManagerFiles, + get_build_server, + get_package_manager_files, + is_suspended, + load_pyproject, +) +from jcloud.jcloud.pagetype.deploy_candidate.validations import PreBuildValidations +from jcloud.utils import get_current_team, log_error, reconnect_on_failure +from jcloud.utils.jobs import get_background_jobs, stop_background_job + +# build_duration, pending_duration are Time fields, >= 1 day is invalid +MAX_DURATION = timedelta(hours=23, minutes=59, seconds=59) +TRANSITORY_STATES = ["Scheduled", "Pending", "Preparing", "Running"] +RESTING_STATES = ["Draft", "Success", "Failure"] + +if typing.TYPE_CHECKING: + from rq.job import Job + + from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob + from jcloud.jcloud.pagetype.app_release.app_release import AppRelease + from jcloud.jcloud.pagetype.release_group.release_group import ReleaseGroup + + +class DeployCandidate(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.deploy_candidate_app.deploy_candidate_app import ( + DeployCandidateApp, + ) + from jcloud.jcloud.pagetype.deploy_candidate_build_step.deploy_candidate_build_step import ( + DeployCandidateBuildStep, + ) + from jcloud.jcloud.pagetype.deploy_candidate_dependency.deploy_candidate_dependency import ( + DeployCandidateDependency, + ) + from jcloud.jcloud.pagetype.deploy_candidate_package.deploy_candidate_package import ( + DeployCandidatePackage, + ) + from jcloud.jcloud.pagetype.deploy_candidate_variable.deploy_candidate_variable import ( + DeployCandidateVariable, + ) + + apps: DF.Table[DeployCandidateApp] + build_directory: DF.Data | None + build_duration: DF.Time | None + build_end: DF.Datetime | None + build_error: DF.Code | None + build_output: DF.Code | None + build_server: DF.Link | None + build_start: DF.Datetime | None + build_steps: DF.Table[DeployCandidateBuildStep] + compress_app_cache: DF.Check + dependencies: DF.Table[DeployCandidateDependency] + docker_image: DF.Data | None + docker_image_id: DF.Data | None + docker_image_repository: DF.Data | None + docker_image_tag: DF.Data | None + environment_variables: DF.Table[DeployCandidateVariable] + error_key: DF.Data | None + group: DF.Link + gunicorn_threads_per_worker: DF.Int + is_redisearch_enabled: DF.Check + last_updated: DF.Datetime | None + manually_failed: DF.Check + merge_all_rq_queues: DF.Check + merge_default_and_short_rq_queues: DF.Check + no_cache: DF.Check + packages: DF.Table[DeployCandidatePackage] + pending_duration: DF.Time | None + pending_end: DF.Datetime | None + pending_start: DF.Datetime | None + redis_cache_size: DF.Int + retry_count: DF.Int + scheduled_time: DF.Datetime | None + status: DF.Literal["Draft", "Scheduled", "Pending", "Preparing", "Running", "Success", "Failure"] + team: DF.Link + use_app_cache: DF.Check + use_rq_workerpool: DF.Check + user_addressable_failure: DF.Check + user_certificate: DF.Code | None + user_private_key: DF.Code | None + user_public_key: DF.Code | None + # end: auto-generated types + + build_output_parser: DockerBuildOutputParser | None + upload_step_updater: UploadStepUpdater | None + + dashboard_fields = ( + "name", + "status", + "creation", + "deployed", + "build_steps", + "build_start", + "build_end", + "build_duration", + "build_error", + "apps", + "group", + "retry_count", + ) + + @staticmethod + def get_list_query(query): + results = query.run(as_dict=True) + names = [r.name for r in results if r.status and r.status != "Success"] + notifications = jingrow.get_all( + "Jcloud Notification", + fields=["name", "document_name"], + filters={ + "document_type": "Deploy Candidate", + "document_name": ["in", names], + "class": "Error", + "is_actionable": True, + "is_addressed": False, + }, + ) + notification_map = {n.document_name: n.name for n in notifications} + for result in results: + if name := result.get("name"): + result.addressable_notification = notification_map.get(name) + + return results + + def get_pg(self, pg): + def get_job_duration_in_seconds(duration): + if not duration: + return 0 + return f"{float(rounded(duration.total_seconds(), 2))}s" + + pg.jobs = [] + deploys = jingrow.get_all("Deploy", {"candidate": self.name}, limit=1) + if deploys: + deploy = jingrow.get_pg("Deploy", deploys[0].name) + for bench in deploy.benches: + if not bench.bench: + continue + job = jingrow.get_all( + "Agent Job", + ["name", "status", "end", "duration", "bench"], + {"bench": bench.bench, "job_type": "New Bench"}, + limit=1, + ) or [{}] + pg.jobs.append( + { + **job[0], + "title": f"Deploying {bench.bench}", + "duration": get_job_duration_in_seconds(job[0].duration) if job else 0, + } + ) + + # if any job is in running, pending state, set the status to deploying + if any(job.get("status") in ["Running", "Pending"] for job in pg.jobs): + pg.status = "Deploying" + + def autoname(self): + group = self.group[6:] + series = f"deploy-{group}-.######" + self.name = make_autoname(series) + + def before_insert(self): + if self.status == "Draft": + self.pending_duration = 0 + self.build_duration = 0 + + def on_trash(self): + jingrow.db.delete( + "Jcloud Notification", + {"document_type": self.pagetype, "document_name": self.name}, + ) + + def get_unpublished_marketplace_releases(self) -> list[str]: + rg: ReleaseGroup = jingrow.get_pg("Release Group", self.group) + marketplace_app_sources = rg.get_marketplace_app_sources() + + if not marketplace_app_sources: + return [] + + # Marketplace App Releases in this deploy candidate + dc_app_releases = jingrow.get_all( + "Deploy Candidate App", + filters={"parent": self.name, "source": ("in", marketplace_app_sources)}, + pluck="release", + ) + + # Unapproved app releases for marketplace apps + return jingrow.get_all( + "App Release", + filters={"name": ("in", dc_app_releases), "status": ("!=", "Approved")}, + pluck="name", + ) + + def pre_build(self, method, **kwargs): + # This should always be the first call in pre-build + self.reset_build_state() + + if not self.validate_status(): + return + + if "no_cache" in kwargs: + self.no_cache = kwargs.get("no_cache") + del kwargs["no_cache"] + + no_build = kwargs.get("no_build", False) + self.set_build_server(no_build) + self._set_status_pending() + self.add_pre_build_steps() + self.save() + ( + user, + session_data, + team, + ) = ( + jingrow.session.user, + jingrow.session.data, + get_current_team(True), + ) + jingrow.set_user(jingrow.get_value("Team", team.name, "user")) + queue = "default" if jingrow.conf.developer_mode else "build" + + jingrow.enqueue_pg( + self.pagetype, + self.name, + method, + queue=queue, + timeout=2400, + enqueue_after_commit=True, + **kwargs, + ) + jingrow.set_user(user) + jingrow.session.data = session_data + jingrow.db.commit() + + def set_build_server(self, no_build: bool): + if not self.build_server: + self.build_server = get_build_server(self.group) + + if self.build_server or no_build: + return + + throw_no_build_server() + + def validate_status(self): + if self.status in ["Draft", "Success", "Failure", "Scheduled"]: + return True + + jingrow.msgprint( + f"Build is in {self.status} state. " + "Please wait for build to succeed or fail before retrying." + ) + return False + + @jingrow.whitelist() + def build( + self, + no_push: bool = False, + no_build: bool = False, + no_cache: bool = False, + ): + self.pre_build( + method="_build", + no_push=no_push, + no_build=no_build, + no_cache=no_cache, + ) + + @jingrow.whitelist() + def fail_and_redeploy(self): + if (res := self.stop_and_fail()) and res["error"]: + return res + return self.redeploy() + + @jingrow.whitelist() + def stop_and_fail(self): + not_failable = ["Draft", "Failure", "Success"] + if self.status in not_failable: + return dict( + error=True, + message=f"Cannot stop and fail if status one of [{', '.join(not_failable)}]", + ) + self.manually_failed = True + self._stop_and_fail() + return dict(error=False, message="Failed successfully") + + def _stop_and_fail(self, commit=True): + self.stop_build_jobs() + self._set_status_failure(commit) + + @jingrow.whitelist() + def redeploy(self, no_cache: bool = False): + if not (dc := self.get_duplicate_dc()): + return dict(error=True, message="Cannot create duplicate Deploy Candidate") + + dc.build_and_deploy(no_cache=no_cache) + return dict(error=False, message=dc.name) + + @jingrow.whitelist() + def schedule_build_and_deploy( + self, + run_now: bool = True, + scheduled_time: datetime | None = None, + ): + if self.status == "Scheduled": + return + + if run_now and not is_suspended(): + self.build_and_deploy() + return + + self.status = "Scheduled" + self.scheduled_time = scheduled_time or now() + self.save() + jingrow.db.commit() + + def run_scheduled_build_and_deploy(self): + """ + Build and Deploy will run only if the build is Scheduled + and if builds have not been suspended. + """ + if self.status != "Scheduled" or is_suspended(): + return + self.build_and_deploy() + + def build_and_deploy(self, no_cache: bool = False): + self.pre_build( + method="_build", + deploy_after_build=True, + no_cache=no_cache, + ) + + @jingrow.whitelist() + def deploy(self): + try: + return self.create_deploy() + except Exception: + log_error("Deploy Creation Error", pg=self) + + def _build( + self, + no_push: bool = False, + no_build: bool = False, + # Used for processing build agent job + deploy_after_build: bool = False, + ): + self._set_status_preparing() + self._set_output_parsers() + try: + self._prepare_build(no_push) + self._start_build( + no_push, + no_build, + deploy_after_build, + ) + except Exception as exc: + self.handle_build_failure(exc) + + def handle_build_failure( + self, + exc: Exception | None = None, + job: "AgentJob | None" = None, + ) -> None: + self._flush_output_parsers() + self._set_status_failure() + should_retry = self.should_build_retry(exc=exc, job=job) + + if not should_retry: + self._fail_site_group_deploy_if_exists() + + # Do not send a notification if the build is being retried. + if not should_retry and create_build_failed_notification(self, exc): + self.user_addressable_failure = True + self.save(ignore_permissions=True) + jingrow.db.commit() + return + + if should_retry: + self.schedule_build_retry() + return + + if exc: + # Log and raise error if build failure is not actionable or no retry + log_error("Deploy Candidate Build Exception", pg=self) + + def should_build_retry( + self, + exc: Exception | None, + job: "AgentJob | None", + ) -> bool: + if self.status != "Failure": + return False + + # Retry twice before giving up + if self.retry_count >= 3: + return False + + bo = self.build_output + if isinstance(bo, str) and should_build_retry_build_output(bo): + return True + + if exc and should_build_retry_exc(exc): + return True + + if job and should_build_retry_job(job): + return True + + return False + + def schedule_build_retry(self): + self.retry_count += 1 + minutes = min(5**self.retry_count, 125) + scheduled_time = now() + timedelta(minutes=minutes) + self.schedule_build_and_deploy( + run_now=False, + scheduled_time=scheduled_time, + ) + + def _set_output_parsers(self): + self.build_output_parser = DockerBuildOutputParser(self) + self.upload_step_updater = UploadStepUpdater(self) + + def _flush_output_parsers(self, commit=False): + if self.build_output_parser: + self.build_output_parser.flush_output(commit) + + if self.upload_step_updater: + self.upload_step_updater.flush_output(commit) + + def _prepare_build(self, no_push: bool = False): + if not self.no_cache: + self._update_app_releases() + + if not self.no_cache: + self._set_app_cached_flags() + + self._prepare_build_directory() + self._prepare_build_context(no_push) + + def _start_build( + self, + no_push: bool = False, + no_build: bool = False, + deploy_after_build: bool = False, + ): + self._update_docker_image_metadata() + if no_build: + self._set_status_success() + return + + if not self.build_server: + throw_no_build_server() + + # Build runs on build server + self._run_build_agent_jobs( + deploy_after_build, + no_push, + ) + + def _run_build_agent_jobs( + self, + deploy_after_build: bool, + no_push: bool, + ) -> None: + context_filename = self._package_and_upload_context() + settings = self._fetch_registry_settings() + + Agent(self.build_server).run_build( + { + "filename": context_filename, + "image_repository": self.docker_image_repository, + "image_tag": self.docker_image_tag, + "registry": { + "url": settings.docker_registry_url, + "username": settings.docker_registry_username, + "password": settings.docker_registry_password, + }, + "no_cache": self.no_cache, + "no_push": no_push, + # Next few values are not used by agent but are + # read in `process_run_build` + "deploy_candidate": self.name, + "deploy_after_build": deploy_after_build, + } + ) + self.last_updated = now() + self._set_status_running() + + def _package_and_upload_context(self) -> str: + context_filepath = self._package_build_context() + context_filename = self._upload_build_context( + context_filepath, + self.build_server, + ) + os.remove(context_filepath) + return context_filename + + def _package_build_context(self) -> str: + """Creates a tarball of the build context and returns the path to it.""" + step = self.get_step("package", "context") or jingrow._dict() + step.status = "Running" + start_time = now() + self.save(ignore_permissions=True, ignore_version=True) + jingrow.db.commit() + + # make sure to set ownership of build_directory and its contents to 1000:1000 + def fix_content_permission(tarinfo): + tarinfo.uid = 1000 + tarinfo.gid = 1000 + return tarinfo + + tmp_file_path = tempfile.mkstemp(suffix=".tar.gz")[1] + with tarfile.open(tmp_file_path, "w:gz", compresslevel=5) as tar: + if jingrow.conf.developer_mode: + tar.add(self.build_directory, arcname=".", filter=fix_content_permission) + else: + tar.add(self.build_directory, arcname=".") + + step.status = "Success" + step.duration = get_duration(start_time) + self.save(ignore_permissions=True, ignore_version=True) + jingrow.db.commit() + return tmp_file_path + + def _upload_build_context(self, context_filepath: str, build_server: str): + step = self.get_step("upload", "context") or jingrow._dict() + step.status = "Running" + start_time = now() + self.save(ignore_permissions=True, ignore_version=True) + jingrow.db.commit() + + try: + upload_filename = self.upload_build_context_for_docker_build( + context_filepath, + build_server, + ) + except Exception: + step.status = "Failure" + raise + + step.status = "Success" + step.duration = get_duration(start_time) + return upload_filename + + @retry( + reraise=True, + wait=wait_fixed(300), + stop=stop_after_attempt(3), + ) + def upload_build_context_for_docker_build( + self, + context_filepath: str, + build_server: str, + ): + agent = Agent(build_server) + with open(context_filepath, "rb") as file: + if upload_filename := agent.upload_build_context_for_docker_build(file, self.name): + return upload_filename + + message = "Failed to upload build context to remote docker builder" + if agent.response: + message += f"\nagent response: {agent.response.text}" + + raise Exception(message) + + @staticmethod + def process_run_build(job: "AgentJob", response_data: "dict | None"): + request_data = json.loads(job.request_data) + dc: DeployCandidate = jingrow.get_pg( + "Deploy Candidate", + request_data["deploy_candidate"], + ) + dc._process_run_build(job, request_data, response_data) + + def _process_run_build( + self, + job: "AgentJob", + request_data: dict, + response_data: dict | None, + ): + job_data = json.loads(job.data or "{}") + output_data = json.loads(job_data.get("output", "{}")) + + """ + Due to how agent - jcloud communication takes place, every time an + output is published all of it has to be re-parsed from the start. + + This is due to a method of streaming agent output to jcloud not + existing. + """ + self._set_output_parsers() + if output := get_remote_step_output( + "build", + output_data, + response_data, + ): + self.build_output_parser.parse_and_update(output) + + if output := get_remote_step_output( + "push", + output_data, + response_data, + ): + self.upload_step_updater.start() + self.upload_step_updater.process(output) + + if self.has_remote_build_failed(job, job_data): + self.handle_build_failure(exc=None, job=job) + else: + self._update_status_from_remote_build_job(job) + + # Fallback case cause upload step can be left hanging + self.correct_upload_step_status() + + if self.status == "Success" and request_data.get("deploy_after_build"): + self.create_deploy() + + def has_remote_build_failed(self, job: "AgentJob", job_data: dict) -> bool: + if job.status == "Failure": + return True + + if job_data.get("build_failure"): + return True + + if (usu := self.upload_step_updater) and usu.upload_step and usu.upload_step.status == "Failure": + return True + + if self.get_first_step("status", "Failure"): + return True + + return False + + def correct_upload_step_status(self): + if not (usu := self.upload_step_updater) or not usu.upload_step: + return + + if self.status == "Success" and usu.upload_step.status == "Running": + self.upload_step_updater.end("Success") + + elif self.status == "Failure" and usu.upload_step.status not in [ + "Failure", + "Pending", + ]: + self.upload_step_updater.end("Pending") + + def _update_status_from_remote_build_job(self, job: "AgentJob"): + match job.status: + case "Pending" | "Running": + return self._set_status_running() + case "Failure" | "Undelivered" | "Delivery Failure": + return self._set_status_failure() + case "Success": + return self._set_status_success() + case _: + raise Exception("unreachable code execution") + + def _update_docker_image_metadata(self): + settings = self._fetch_registry_settings() + + if settings.docker_registry_namespace: + namespace = f"{settings.docker_registry_namespace}/{settings.domain}" + else: + namespace = f"{settings.domain}" + + self.docker_image_repository = f"{settings.docker_registry_url}/{namespace}/{self.group}" + self.docker_image_tag = self.name + self.docker_image = f"{self.docker_image_repository}:{self.docker_image_tag}" + + def _fetch_registry_settings(self): + return jingrow.db.get_value( + "Jcloud Settings", + None, + [ + "domain", + "docker_registry_url", + "docker_registry_namespace", + "docker_registry_username", + "docker_registry_password", + ], + as_dict=True, + ) + + def _set_status_pending(self): + self.status = "Pending" + self.pending_start = now() + self.save(ignore_permissions=True) + jingrow.db.commit() + + def _set_status_preparing(self): + self._set_pending_duration() + self.status = "Preparing" + self.build_start = now() + self.save(ignore_permissions=True) + jingrow.db.commit() + + def _set_status_running(self): + self.status = "Running" + self.save(ignore_permissions=True) + jingrow.db.commit() + + @reconnect_on_failure() + def _set_status_failure(self, commit=True): + self.status = "Failure" + self._fail_last_running_step() + self._set_build_duration() + self.save(ignore_permissions=True) + if commit: + jingrow.db.commit() + + @reconnect_on_failure() + def _set_status_success(self, commit=True): + self.status = "Success" + self.build_error = None + self._set_build_duration() + self.save(ignore_permissions=True) + if commit: + jingrow.db.commit() + + def _set_build_duration(self): + self.build_end = now() + if not isinstance(self.build_start, datetime): + return + + self.build_duration = min( + self.build_end - self.build_start, + MAX_DURATION, + ) + + def _set_pending_duration(self): + self.pending_end = now() + if not isinstance(self.pending_start, datetime): + return + + self.pending_duration = min( + self.pending_end - self.pending_start, + MAX_DURATION, + ) + + def _fail_last_running_step(self): + for step in self.build_steps: + if step.status == "Failure": + return + + if step.status == "Running": + step.status = "Failure" + break + + def reset_build_state(self): + # Build directory + self.cleanup_build_directory() + self.build_directory = None + # Build output + self.build_steps.clear() + self.build_error = "" + self.build_output = "" + self.last_updated = None + # Failure flags + self.user_addressable_failure = False + self.manually_failed = False + # Build times + self.build_start = None + self.build_end = None + self.build_duration = None + # Pending times + self.pending_start = None + self.pending_end = None + self.pending_duration = None + + def add_pre_build_steps(self): + """ + This function just adds build steps that occur before + a docker build, rest of the steps are updated after the + Dockerfile is generated in: + - `_update_build_steps` + - `_update_post_build_steps` + """ + app_titles = {a.app: a.title for a in self.apps} + + # Clone app slugs + slugs: list[tuple[str, str]] = [("clone", app.app) for app in self.apps] + + slugs.extend( + [ + # Pre-build validation slug + ("validate", "pre-build"), + # Build slugs + ("package", "context"), + ("upload", "context"), + ] + ) + + for stage_slug, step_slug in slugs: + stage, step = get_build_stage_and_step( + stage_slug, + step_slug, + app_titles, + ) + step_dict = dict( + status="Pending", + stage_slug=stage_slug, + step_slug=step_slug, + stage=stage, + step=step, + ) + self.append("build_steps", step_dict) + self.save() + + def _set_app_cached_flags(self) -> None: + for app in self.apps: + app.use_cached = bool(self.use_app_cache) + + def _prepare_build_directory(self): + build_directory = jingrow.get_value("Jcloud Settings", None, "build_directory") + if not os.path.exists(build_directory): + os.mkdir(build_directory) + + group_directory = os.path.join(build_directory, self.group) + if not os.path.exists(group_directory): + os.mkdir(group_directory) + + self.build_directory = os.path.join(build_directory, self.group, self.name) + if os.path.exists(self.build_directory): + shutil.rmtree(self.build_directory) + + os.mkdir(self.build_directory) + + @jingrow.whitelist() + def cleanup_build_directory(self): + if not self.build_directory: + return + + if os.path.exists(self.build_directory): + shutil.rmtree(self.build_directory) + + self.build_directory = None + self.save() + + def _update_app_releases(self) -> None: + if not jingrow.get_value("Release Group", self.group, "use_delta_builds"): + return + + try: + update = self.get_pull_update_dict() + except Exception: + log_error(title="Failed to get Pull Update Dict", pg=self) + return + + for app in self.apps: + if app.app not in update: + continue + + release_pair = update[app.app] + + # Previously deployed release used for get-app + app.hash = release_pair["old"]["hash"] + app.release = release_pair["old"]["name"] + + # New release to be pulled after get-app + app.pullable_hash = release_pair["new"]["hash"] + app.pullable_release = release_pair["new"]["name"] + + def _prepare_build_context(self, no_push: bool): + repo_path_map = self._clone_repos() + pmf = get_package_manager_files(repo_path_map) + self._run_prebuild_validations_and_update_step(pmf) + + """ + Due to dependencies mentioned in an apps pyproject.toml + file, _update_packages() needs to run after the repos + have been cloned. + """ + self._update_packages(pmf) + self.save(ignore_permissions=True) + + # Set props used when generating the Dockerfile + self._set_additional_packages() + self._set_container_mounts() + + dockerfile = self._generate_dockerfile() + self._add_build_steps(dockerfile) + self._add_post_build_steps(no_push) + + self._copy_config_files() + self._generate_redis_cache_config() + self._generate_redis_queue_config() + self._generate_supervisor_config() + self._generate_apps_txt() + self.generate_ssh_keys() + + def _clone_repos(self): + apps_directory = os.path.join(self.build_directory, "apps") + os.makedirs(apps_directory, exist_ok=True) + + repo_path_map: dict[str, str] = {} + + for app in self.apps: + repo_path_map[app.app] = self._clone_app_repo(app) + app.app_name = self._get_app_name(app.app) + self.save(ignore_permissions=True, ignore_version=True) + jingrow.db.commit() + + return repo_path_map + + def _run_prebuild_validations_and_update_step(self, pmf: PackageManagerFiles): + """ + Errors thrown here will be caught by a function up the + stack. + + Since they should be from expected invalids, they should + also be user addressable. + """ + if not (step := self.get_step("validate", "pre-build")): + raise jingrow.ValidationError("Validate Pre-build step not found") + + # Start step + step.status = "Running" + start_time = now() + self.save(ignore_permissions=True, ignore_version=True) + jingrow.db.commit() + + # Run Pre-build Validations + PreBuildValidations(self, pmf).validate() + + # End step + step.duration = get_duration(start_time) + step.output = "Pre-build validations passed" + step.status = "Success" + self.save(ignore_permissions=True, ignore_version=True) + jingrow.db.commit() + + def _clone_app_repo(self, app: "DeployCandidateApp") -> str: + """ + Clones the app repository if it has not been cloned and + copies it into the build context directory. + + Returned path points to the repository that needs to be + validated. + """ + if not (step := self.get_step("clone", app.app)): + raise jingrow.ValidationError(f"App {app.app} clone step not found") + + if not self.build_directory: + raise jingrow.ValidationError("Build Directory not set") + + step.command = f"git clone {app.app}" + source, cloned = jingrow.db.get_value( + "App Release", + app.release, + ["clone_directory", "cloned"], + ) + + if cloned and os.path.exists(source): + step.cached = True + step.status = "Success" + else: + source = self._clone_release_and_update_step(app.release, step) + + target = os.path.join(self.build_directory, "apps", app.app) + shutil.copytree(source, target, symlinks=True) + + """ + Pullable updates don't need cloning as they get cloned when + the app is checked for possible pullable updates in: + + self.get_pull_update_dict + └─ app_release.get_changed_files_between_hashes + """ + if app.pullable_release: + source = jingrow.get_value("App Release", app.pullable_release, "clone_directory") + target = os.path.join(self.build_directory, "app_updates", app.app) + shutil.copytree(source, target, symlinks=True) + + return target + + def _clone_release_and_update_step(self, release: str, step: "DeployCandidateBuildStep"): + # Start step + step.status = "Running" + start_time = now() + self.save(ignore_version=True) + jingrow.db.commit() + + # Clone Release + release: AppRelease = jingrow.get_pg( + "App Release", + release, + for_update=True, + ) + release._clone(force=True) + + # End step + step.duration = get_duration(start_time) + step.output = release.output + step.status = "Success" + return release.clone_directory + + def _update_packages(self, pmf: PackageManagerFiles): + existing_apt_packages = set() + for pkgs in self.packages: + if pkgs.package_manager != "apt": + continue + for p in pkgs.package.split(" "): + existing_apt_packages.add(p) + + """ + Individual apps can mention apt dependencies in their pyproject.toml. + + For Example: + ``` + [deploy.dependencies.apt] + packages = [ + "ffmpeg", + "libsm6", + "libxext6", + ] + ``` + + For each app, these are grouped together into a single package row. + """ + for app in self.apps: + pyproject = pmf[app.app]["pyproject"] or {} + deps = pyproject.get("deploy", {}).get("dependencies", {}) + pkgs = deps.get("apt", {}).get("packages", []) + + app_packages = [] + for p in pkgs: + if p in existing_apt_packages: + continue + existing_apt_packages.add(p) + app_packages.append(p) + + if not app_packages: + continue + + package = dict(package_manager="apt", package=" ".join(app_packages)) + self.append("packages", package) + + def _set_additional_packages(self): + """ + additional_packages is used when rendering the Dockerfile template + """ + self.additional_packages = [] + dep_versions = {d.dependency: d.version for d in self.dependencies} + for p in self.packages: + # second clause cause: '/opt/certbot/bin/pip' + if p.package_manager not in ["apt", "pip"] and not p.package_manager.endswith("/pip"): + continue + + prerequisites = jingrow.render_template(p.package_prerequisites, dep_versions) + package = dict( + package_manager=p.package_manager, + package=p.package, + prerequisites=prerequisites, + after_install=p.after_install, + ) + self.additional_packages.append(package) + + def _set_container_mounts(self): + self.container_mounts = jingrow.get_all( + "Release Group Mount", + {"parent": self.group, "is_absolute_path": False}, + ["destination"], + order_by="idx", + ) + + def _generate_dockerfile(self): + dockerfile = os.path.join(self.build_directory, "Dockerfile") + with open(dockerfile, "w") as f: + dockerfile_template = "jcloud/docker/Dockerfile" + + for d in self.dependencies: + if d.dependency == "BENCH_VERSION" and d.version == "5.2.1": + dockerfile_template = "jcloud/docker/Dockerfile_Bench_5_2_1" + + content = jingrow.render_template(dockerfile_template, {"pg": self}, is_path=True) + f.write(content) + return content + + def _add_build_steps(self, dockerfile: str): + """ + This function adds build steps that take place inside docker build. + These steps are added from the generated Dockerfile. + + Build steps are updated when docker build runs and prints a string of + the following format `#stage-{ stage_slug }-{ step_slug }` to the output. + + To add additional build steps: + - Update STAGE_SLUG_MAP + - Update STEP_SLUG_MAP + - Update get_build_stage_and_step + """ + app_titles = {a.app: a.title for a in self.apps} + + checkpoints = self._get_dockerfile_checkpoints(dockerfile) + for checkpoint in checkpoints: + splits = checkpoint.split("-", 1) + if len(splits) != 2: + continue + + stage_slug, step_slug = splits + stage, step = get_build_stage_and_step( + stage_slug, + step_slug, + app_titles, + ) + + step = dict( + status="Pending", + stage_slug=stage_slug, + step_slug=step_slug, + stage=stage, + step=step, + ) + self.append("build_steps", step) + + def _get_dockerfile_checkpoints(self, dockerfile: str) -> list[str]: + """ + Returns checkpoint slugs from a generated Dockerfile + """ + + # Example: "`#stage-pre-essentials`", "`#stage-apps-print_designer`" + rx = re.compile(r"`#stage-([^`]+)`") + + # Example: "pre-essentials", "apps-print_designer" + checkpoints = [] + for line in dockerfile.split("\n"): + matches = rx.findall(line) + checkpoints.extend(matches) + + return checkpoints + + def _add_post_build_steps(self, no_push: bool): + slugs = [] + if not no_push: + slugs.append(("upload", "image")) + + for stage_slug, step_slug in slugs: + stage, step = get_build_stage_and_step(stage_slug, step_slug, {}) + step = dict( + status="Pending", + stage_slug=stage_slug, + step_slug=step_slug, + stage=stage, + step=step, + ) + self.append("build_steps", step) + + def _copy_config_files(self): + for target in ["common_site_config.json", "supervisord.conf", ".vimrc"]: + shutil.copy(os.path.join(jingrow.get_app_path("jcloud", "docker"), target), self.build_directory) + + for target in ["config", "redis"]: + shutil.copytree( + os.path.join(jingrow.get_app_path("jcloud", "docker"), target), + os.path.join(self.build_directory, target), + symlinks=True, + ) + + def _generate_redis_cache_config(self): + redis_cache_conf = os.path.join(self.build_directory, "config", "redis-cache.conf") + with open(redis_cache_conf, "w") as f: + redis_cache_conf_template = "jcloud/docker/config/redis-cache.conf" + content = jingrow.render_template(redis_cache_conf_template, {"pg": self}, is_path=True) + f.write(content) + + def _generate_redis_queue_config(self): + redis_queue_conf = os.path.join(self.build_directory, "config", "redis-queue.conf") + with open(redis_queue_conf, "w") as f: + redis_queue_conf_template = "jcloud/docker/config/redis-queue.conf" + content = jingrow.render_template(redis_queue_conf_template, {"pg": self}, is_path=True) + f.write(content) + + def _generate_supervisor_config(self): + supervisor_conf = os.path.join(self.build_directory, "config", "supervisor.conf") + with open(supervisor_conf, "w") as f: + supervisor_conf_template = "jcloud/docker/config/supervisor.conf" + content = jingrow.render_template(supervisor_conf_template, {"pg": self}, is_path=True) + f.write(content) + + def _generate_apps_txt(self): + apps_txt = os.path.join(self.build_directory, "apps.txt") + with open(apps_txt, "w") as f: + content = "\n".join([app.app_name for app in self.apps]) + f.write(content) + + def _get_app_name(self, app): + """Retrieves `name` attribute of app - equivalent to distribution name + of python package. Fetches from pyproject.toml, setup.cfg or setup.py + whichever defines it in that order. + """ + app_name = None + apps_path = os.path.join(self.build_directory, "apps") + + config_py_path = os.path.join(apps_path, app, "setup.cfg") + setup_py_path = os.path.join(apps_path, app, "setup.py") + + app_name = self._get_app_pyproject(app).get("project", {}).get("name") + + if not app_name and os.path.exists(config_py_path): + from setuptools.config import read_configuration + + config = read_configuration(config_py_path) + app_name = config.get("metadata", {}).get("name") + + if not app_name and os.path.exists(setup_py_path): + # retrieve app name from setup.py as fallback + with open(setup_py_path, "rb") as f: + contents = f.read().decode("utf-8") + search = re.search(r'name\s*=\s*[\'"](.*)[\'"]', contents) + + if search: + app_name = search[1] + + if app_name and app != app_name: + return app_name + + return app + + def _get_app_pyproject(self, app): + apps_path = os.path.join(self.build_directory, "apps") + pyproject_path = os.path.join(apps_path, app, "pyproject.toml") + if not os.path.exists(pyproject_path): + return {} + + return load_pyproject(app, pyproject_path) + + def run(self, command, environment=None, directory=None): + process = Popen( + shlex.split(command), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + env=environment, + cwd=directory or self.build_directory, + universal_newlines=True, + ) + yield from process.stdout + process.stdout.close() + return_code = process.wait() + if return_code: + raise subprocess.CalledProcessError(return_code, command) + + def generate_ssh_keys(self): + ca = jingrow.db.get_single_value("Jcloud Settings", "ssh_certificate_authority") + if not ca: + return + + ca = jingrow.get_pg("SSH Certificate Authority", ca) + ssh_directory = os.path.join(self.build_directory, "config", "ssh") + + self.generate_host_keys(ca, ssh_directory) + self.generate_user_keys(ca, ssh_directory) + + ca_public_key = os.path.join(ssh_directory, "ca.pub") + with open(ca_public_key, "w") as f: + f.write(ca.public_key) + + # Generate authorized principal file + principals = os.path.join(ssh_directory, "principals") + with open(principals, "w") as f: + f.write(f"restrict,pty {self.group}") + + def generate_host_keys(self, ca, ssh_directory): + # Generate host keys + list( + self.run( + f"ssh-keygen -C {self.name} -t rsa -b 4096 -N '' -f ssh_host_rsa_key", + directory=ssh_directory, + ) + ) + + # Generate host Certificate + host_public_key_path = os.path.join(ssh_directory, "ssh_host_rsa_key.pub") + ca.sign(self.name, None, "+52w", host_public_key_path, 0, host_key=True) + + def generate_user_keys(self, ca, ssh_directory): + # Generate user keys + list( + self.run( + f"ssh-keygen -C {self.name} -t rsa -b 4096 -N '' -f id_rsa", + directory=ssh_directory, + ) + ) + + # Generate user certificates + user_public_key_path = os.path.join(ssh_directory, "id_rsa.pub") + ca.sign(self.name, [self.group], "+52w", user_public_key_path, 0) + + user_private_key_path = os.path.join(ssh_directory, "id_rsa") + with open(user_private_key_path) as f: + self.user_private_key = f.read() + + with open(user_public_key_path) as f: + self.user_public_key = f.read() + + user_certificate_path = os.path.join(ssh_directory, "id_rsa-cert.pub") + with open(user_certificate_path) as f: + self.user_certificate = f.read() + + # Remove user key files + os.remove(user_private_key_path) + os.remove(user_public_key_path) + os.remove(user_certificate_path) + + def get_certificate(self): + return { + "id_rsa": self.user_private_key, + "id_rsa.pub": self.user_public_key, + "id_rsa-cert.pub": self.user_certificate, + } + + def update_step(self, stage_slug: str, step_slug: str, update_dict: dict[str, Any]): + step = self.get_step(stage_slug, step_slug) + if not step: + return + + for key, value in update_dict.items(): + step.set(key, value) + + def get_step(self, stage_slug: str, step_slug: str) -> "DeployCandidateBuildStep | None": + return find( + self.build_steps, + lambda x: x.stage_slug == stage_slug and x.step_slug == step_slug, + ) + + def create_deploy(self): + servers = jingrow.get_pg("Release Group", self.group).servers + servers = [server.server for server in servers] + if not servers: + return None + + deploy_pg = jingrow.db.exists( + "Deploy", {"group": self.group, "candidate": self.name, "staging": False} + ) + + if deploy_pg: + return str(deploy_pg) + + return self._create_deploy(servers).name + + def _create_deploy(self, servers: list[str]): + return jingrow.get_pg( + { + "pagetype": "Deploy", + "group": self.group, + "candidate": self.name, + "benches": [{"server": server} for server in servers], + } + ).insert() + + def on_update(self): + if self.status == "Running": + jingrow.publish_realtime( + f"bench_deploy:{self.name}:steps", + pagetype=self.pagetype, + docname=self.name, + message={"steps": self.build_steps, "name": self.name}, + ) + else: + jingrow.publish_realtime( + f"bench_deploy:{self.name}:finished", + pagetype=self.pagetype, + docname=self.name, + ) + + def get_dependency_version(self, dependency: str, as_env: bool = False): + if dependency.islower(): + dependency = dependency.upper() + "_VERSION" + + version = find(self.dependencies, lambda x: x.dependency == dependency).version + + if as_env: + return f"{dependency} {version}" + + return version + + def get_pull_update_dict(self) -> dict[str, AppReleasePair]: + """ + Returns a dict of apps with: + + `old` hash: for which there already exist cached layers from previously + deployed Benches that have been created from this Deploy Candidate. + + `new` hash: which can just be 'git pull' updated, i.e. a new layer does + not need to be built for them from scratch. + """ + + # Deployed Benches from current DC with (potentially) cached layers + benches = jingrow.get_all("Bench", filters={"group": self.group, "status": "Active"}, limit=1) + if not benches: + return {} + + bench_name = benches[0]["name"] + deployed_apps = jingrow.get_all( + "Bench App", + filters={"parent": bench_name}, + fields=["app", "source", "hash"], + ) + deployed_apps_map = {app.app: app for app in deployed_apps} + + pull_update: dict[str, AppReleasePair] = {} + + for app in self.apps: + app_name = app.app + + """ + If True, new app added to the Release Group. Downstream layers will + be rebuilt regardless of layer change. + """ + if app_name not in deployed_apps_map: + break + + deployed_app = deployed_apps_map[app_name] + + """ + If True, app source updated in Release Group. Downstream layers may + have to be rebuilt. Erring on the side of caution. + """ + if deployed_app["source"] != app.source: + break + + update_hash = app.hash + deployed_hash = deployed_app["hash"] + + if update_hash == deployed_hash: + continue + + changes = get_changed_files_between_hashes( + app.source, + deployed_hash, + update_hash, + ) + # deployed commit is after update commit + if not changes: + break + + file_diff, pair = changes + if not can_pull_update(file_diff): + """ + If current app is not being pull_updated, then no need to + pull update apps later in the sequence. + + This is because once an image layer hash changes all layers + after it have to be rebuilt. + """ + break + + pull_update[app_name] = pair + return pull_update + + def get_first_step(self, key: str, value: str | list[str]) -> "DeployCandidateBuildStep | None": + if isinstance(value, str): + value = [value] + + for build_step in self.build_steps: + if build_step.get(key) not in value: + continue + return build_step + return None + + def get_duplicate_dc(self) -> "DeployCandidate | None": + rg: ReleaseGroup = jingrow.get_pg("Release Group", self.group) + if not (dc := rg.create_deploy_candidate()): + return None + + # Set new DC apps to pull from the same sources + new_app_map = {a.app: a for a in dc.apps} + for app in self.apps: + if not (new_app := new_app_map.get(app.app)): + continue + + new_app.hash = app.hash + new_app.release = app.release + new_app.source = app.source + + # Remove apps from new DC if they aren't in the old DC + old_app_map = {a.app: a for a in self.apps} + for app in dc.apps: + if old_app_map.get(app.app): + continue + + dc.remove(app) + + self.save() + return dc + + def has_app(self, name: str) -> bool: + org = None + if "/" in name: + org, name = name.split("/", maxsplit=1) + + for app in self.apps: + if app.app != name: + continue + + if org is None: + return True + + owner = jingrow.db.get_value( + "App Source", + app.source, + "repository_owner", + ) + return owner == org + return False + + def stop_build_jobs(self): + for job in get_background_jobs(self.pagetype, self.name, status="started"): + if not is_build_job(job): + continue + stop_background_job(job) + + def _fail_site_group_deploy_if_exists(self): + site_group_deploy = jingrow.db.get_value( + "Site Group Deploy", + { + "release_group": self.group, + "site": ("is", "not set"), + "bench": ("is", "not set"), + }, + ) + if site_group_deploy: + jingrow.get_pg("Site Group Deploy", site_group_deploy).update_site_group_deploy_on_deploy_failure( + self, + ) + + +def can_pull_update(file_paths: list[str]) -> bool: + """ + Updated app files between current and previous build + that do not cause get-app to update the filesystem can + be git pulled. + + Function returns True ONLY if all files are of this kind. + """ + return all(pull_update_file_filter(fp) for fp in file_paths) + + +def pull_update_file_filter(file_path: str) -> bool: + blacklist = [ + # Requires pip install + "requirements.txt", + "pyproject.toml", + "setup.py", + # Requires yarn install, build + "package.json", + ".vue", + ".ts", + ".jsx", + ".tsx", + ".scss", + ] + if any(file_path.endswith(f) for f in blacklist): + return False + + # Non build requiring frontend files + for ext in [".html", ".js", ".css"]: + if not file_path.endswith(ext): + continue + + if "/www/" in file_path: + return True + + # Probably requires build + return False + + return True + + +def cleanup_build_directories(): + # Cleanup Build Directories for Deploy Candidates older than a day + dcs = jingrow.get_all( + "Deploy Candidate", + { + "status": ("!=", "Draft"), + "build_directory": ("is", "set"), + "creation": ("<=", jingrow.utils.add_to_date(None, hours=-6)), + }, + order_by="creation asc", + pluck="name", + limit=100, + ) + for dc in dcs: + pg: DeployCandidate = jingrow.get_pg("Deploy Candidate", dc) + try: + pg.cleanup_build_directory() + jingrow.db.commit() + except Exception as e: + jingrow.db.rollback() + log_error(title="Deploy Candidate Build Cleanup Error", exception=e, pg=pg) + + # Delete all temporary files created by the build process + glob_path = os.path.join(tempfile.gettempdir(), f"{tempfile.gettempprefix()}*.tar.gz") + six_hours_ago = jingrow.utils.add_to_date(None, hours=-6) + for file in glob.glob(glob_path): + # Use local time to compare timestamps + if os.stat(file).st_ctime < six_hours_ago.timestamp(): + os.remove(file) + + +def ansi_escape(text): + # Reference: + # https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python + ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") + return ansi_escape.sub("", text) + + +@jingrow.whitelist() +def desk_app(pagetype, txt, searchfield, start, page_len, filters): + return jingrow.get_all( + "Release Group App", + filters={"parent": filters["release_group"]}, + fields=["app"], + as_list=True, + ) + + +def delete_draft_candidates(): + dcs = jingrow.get_all( + "Deploy Candidate", + { + "status": "Draft", + "creation": ("<=", jingrow.utils.add_days(None, -1)), + }, + order_by="creation asc", + pluck="name", + limit=1000, + ) + + for dc in dcs: + if jingrow.db.exists("Bench", {"candidate": dc}): + jingrow.db.set_value("Deploy Candidate", dc, "status", "Success", update_modified=False) + jingrow.db.commit() + continue + try: + jingrow.delete_pg("Deploy Candidate", dc, delete_permanently=True) + jingrow.db.commit() + except Exception: + log_error( + "Draft Deploy Candidate Deletion Error", + reference_pagetype="Deploy Candidate", + reference_name=dc, + ) + jingrow.db.rollback() + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Deploy Candidate") + + +@jingrow.whitelist() +def toggle_builds(suspend): + jingrow.only_for("System Manager") + jingrow.db.set_single_value("Jcloud Settings", "suspend_builds", suspend) + + +def run_scheduled_builds(max_builds: int = 5): + if is_suspended(): + return + + dcs = jingrow.get_all( + "Deploy Candidate", + { + "status": "Scheduled", + "scheduled_time": ("<=", jingrow.utils.now_datetime()), + }, + pluck="name", + limit=max_builds, + ) + for dc in dcs: + pg: DeployCandidate = jingrow.get_pg("Deploy Candidate", dc) + try: + pg.run_scheduled_build_and_deploy() + jingrow.db.commit() + except Exception: + jingrow.db.rollback() + log_error(title="Scheduled Deploy Candidate Error", pg=pg) + + +# Key: stage_slug +STAGE_SLUG_MAP = { + "clone": "Clone Repositories", + "pre_before": "Run Before Prerequisite Script", + "pre": "Setup Prerequisites", + "pre_after": "Run After Prerequisite Script", + "bench": "Setup Bench", + "apps": "Install Apps", + "validate": "Run Validations", + "pull": "Pull Updates", + "mounts": "Setup Mounts", + "package": "Package", + "upload": "Upload", +} + +# Key: (stage_slug, step_slug) +STEP_SLUG_MAP = { + ("pre", "essentials"): "Install Essential Packages", + ("pre", "redis"): "Install Redis", + ("pre", "python"): "Install Python", + ("pre", "wkhtmltopdf"): "Install wkhtmltopdf", + ("pre", "fonts"): "Install Fonts", + ("pre", "node"): "Install Node.js", + ("pre", "yarn"): "Install Yarn", + ("pre", "pip"): "Install pip", + ("pre", "code-server"): "Install Code Server", + ("bench", "bench"): "Install Bench", + ("bench", "env"): "Setup Virtual Environment", + ("validate", "pre-build"): "Pre-build", + ("validate", "dependencies"): "Validate Dependencies", + ("mounts", "create"): "Prepare Mounts", + ("upload", "image"): "Docker Image", + ("package", "context"): "Build Context", + ("upload", "context"): "Build Context", +} + + +def get_build_stage_and_step( + stage_slug: str, step_slug: str, app_titles: dict[str, str] | None = None +) -> tuple[str, str]: + stage = STAGE_SLUG_MAP.get(stage_slug, stage_slug) + step = step_slug + if stage_slug == "clone" or stage_slug == "apps": + step = app_titles.get(step_slug, step_slug) + else: + step = STEP_SLUG_MAP.get((stage_slug, step_slug), step_slug) + return (stage, step) + + +def get_remote_step_output( + step_name: Literal["build", "push"], + output_data: dict, + response_data: dict | None, +): + if output := output_data.get(step_name): + return output + + if not isinstance(response_data, dict): + return None + + job_step_name = "Build Image" if step_name == "build" else "Push Docker Image" + for step in response_data.get("steps", []): + if step.get("name") != job_step_name: + continue + + commands = step.get("commands", []) + if not isinstance(commands, list) or len(commands) == 0 or not isinstance(commands[0], dict): + continue + + output = commands[0].get("output") + if not isinstance(output, str): + continue + + with contextlib.suppress(AttributeError, json.JSONDecodeError): + return json.loads(output).get(step_name, []) + + return None + + +def is_build_job(job: Job) -> bool: + pg_method: str = job.kwargs.get("kwargs", {}).get("pg_method", "") + return pg_method.startswith("_build") + + +def get_duration(start_time: datetime, end_time: datetime | None = None): + end_time = end_time or now() + seconds_elapsed = (end_time - start_time).total_seconds() + value = rounded(seconds_elapsed, 3) + return float(value) + + +def check_builds_status( + last_n_days=0, + last_n_hours=4, + stuck_threshold_in_hours=2, +): + fail_or_retry_stuck_builds( + last_n_days=last_n_days, + last_n_hours=last_n_hours, + stuck_threshold_in_hours=stuck_threshold_in_hours, + ) + correct_false_positives( + last_n_days=last_n_days, + last_n_hours=last_n_hours, + ) + jingrow.db.commit() + + +def fail_or_retry_stuck_builds( + last_n_days=0, + last_n_hours=4, + stuck_threshold_in_hours=2, +): + # Fails or retries builds builds from the `last_n_days` and `last_n_hours` that + # have not been updated for longer than `stuck_threshold_in_hours`. + result = jingrow.db.sql( + """ + select dc.name as name + from `tabDeploy Candidate` as dc + where dc.modified between now() - interval %s day - interval %s hour and now() + and dc.modified < now() - interval %s hour + and dc.status not in ('Draft', 'Failure', 'Success') + """, + ( + last_n_days, + last_n_hours, + stuck_threshold_in_hours, + ), + ) + + for (name,) in result: + dc: DeployCandidate = jingrow.get_pg("Deploy Candidate", name) + dc.manually_failed = True + dc._stop_and_fail(False) + if can_retry_build(dc.name, dc.group, dc.build_start): + dc.schedule_build_retry() + + +def can_retry_build(name: str, group: str, build_start: datetime): + # Can retry only if build was started today and + # if no builds were started after the current build. + if build_start.date().isoformat() != jingrow.utils.today(): + return False + + result = jingrow.db.count( + "Deploy Candidate", + filters={ + "group": group, + "build_start": [">", build_start], + "name": ["!=", name], # sanity filter + }, + ) + + if isinstance(result, int): + return result == 0 + return False + + +def correct_false_positives(last_n_days=0, last_n_hours=1): + # Fails jobs non Failed jobs that have steps with Failure status + result = jingrow.db.sql( + """ + with dc as ( + select dc.name as name, dc.status as status + from `tabDeploy Candidate` as dc + where dc.modified between now() - interval %s day - interval %s hour and now() + and dc.status != "Failure" + ) + select dc.name + from dc join `tabDeploy Candidate Build Step` as dcb + on dc.name = dcb.parent + where dcb.status = "Failure" + """, + ( + last_n_days, + last_n_hours, + ), + ) + + for (name,) in result: + correct_status(name) + + +def correct_status(dc_name: str): + dc: DeployCandidate = jingrow.get_pg("Deploy Candidate", dc_name) + found_failed = False + for bs in dc.build_steps: + if bs.status == "Failure": + found_failed = True + continue + + if not found_failed: + continue + + bs.status = "Pending" + + if not found_failed: + return + + dc._stop_and_fail(False) + + +def should_build_retry_build_output(build_output: str): + # Build failed cause APT could not get lock. + if "Could not get lock /var/cache/apt/archives/lock" in build_output: + return True + + # Build failed cause Docker could not find a mounted file/folder + if "failed to compute cache key: failed to calculate checksum of ref" in build_output: + return True + + # Failed to pull package from pypi + if "Connection to pypi.org timed out" in build_output: + return True + + # Caused when fetching Python from deadsnakes/ppa + if "Error: retrieving gpg key timed out" in build_output: + return True + + # Yarn registry bad gateway + if ( + "error https://registry.yarnpkg.com/" in build_output + and 'Request failed "502 Bad Gateway"' in build_output + ): + return True + + # NPM registry internal server error + if ( + "Error: https://registry.npmjs.org/" in build_output + and 'Request failed "500 Internal Server Error"' in build_output + ): + return True + + return False + + +def should_build_retry_exc(exc: Exception): + error = jingrow.get_traceback(False) + if not error and len(exc.args) == 0: + return False + + error = error or "\n".join(str(a) for a in exc.args) + + # Failed to upload build context (Mostly 502) + if "Failed to upload build context" in error: + return True + + # Redis refused connection (jcloud side) + if "redis.exceptions.ConnectionError: Error 111" in error: + return True + + if "rq.timeouts.JobTimeoutException: Task exceeded maximum timeout value" in error: + return True + + return False + + +def should_build_retry_job(job: "AgentJob"): + if not job.traceback: + return False + + # Failed to upload docker image + if "TimeoutError: timed out" in job.traceback: + return True + + # Redis connection reset + if "ConnectionResetError: [Errno 104] Connection reset by peer" in job.traceback: + return True + + # Redis connection refused + if "ConnectionRefusedError: [Errno 111] Connection refused" in job.traceback: + return True + + return False + + +def throw_no_build_server(): + jingrow.throw( + "Server not found to run builds. " + "Please set Build Server under Jcloud Settings > Docker > Docker Build." + ) diff --git a/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate_dashboard.py b/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate_dashboard.py new file mode 100644 index 0000000..4ceac61 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate_dashboard.py @@ -0,0 +1,9 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +def get_data(): + return { + "fieldname": "candidate", + "transactions": [{"items": ["Bench", "Deploy"]}], + } diff --git a/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate_list.js b/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate_list.js new file mode 100644 index 0000000..df60126 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/deploy_candidate_list.js @@ -0,0 +1,27 @@ +jingrow.listview_settings['Deploy Candidate'] = { + refresh: show_toggle_builds_button, +}; + +function show_toggle_builds_button(list_view) { + if (!has_common(jingrow.user_roles, ['Administrator', 'System Manager'])) + return; + + jingrow.db + .get_single_value('Jcloud Settings', 'suspend_builds') + .then((suspend_builds) => { + const label = suspend_builds ? __('Resume Builds') : __('Suspend Builds'); + + list_view.page.add_inner_button(label, () => { + jingrow + .xcall( + 'jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.toggle_builds', + { suspend: !suspend_builds }, + ) + .then(() => { + // clear the button and show one with the opposite label + list_view.page.remove_inner_button(label); + show_toggle_builds_button(list_view); + }); + }); + }); +} diff --git a/jcloud/jcloud/pagetype/deploy_candidate/deploy_notifications.py b/jcloud/jcloud/pagetype/deploy_candidate/deploy_notifications.py new file mode 100644 index 0000000..d798c6d --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/deploy_notifications.py @@ -0,0 +1,1033 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import re +import typing +from textwrap import dedent +from typing import Optional, Protocol, TypedDict + +import jingrow +import jingrow.utils + +from jcloud.jcloud.pagetype.deploy_candidate.utils import ( + BuildValidationError, + get_error_key, +) + +""" +Used to create notifications if the Deploy error is something that can +be handled by the user. + +Ref: http://git.jingrow.com:3000/jingrow/jcloud/pull/1544 + +To handle an error: +1. Create a pg page that helps the user get out of it under: jingrow.com/docs/common-issues +2. Check if the error is the known/expected one in `get_details`. +3. Update the details object with the correct values. +""" + +Details = TypedDict( + "Details", + { + "title": Optional[str], + "message": str, + "traceback": Optional[str], + "is_actionable": bool, + "assistance_url": Optional[str], + }, +) + +# These strings are checked against the traceback or build_output +MatchStrings = str | list[str] + +if typing.TYPE_CHECKING: + from jingrow import Document + + from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate + from jcloud.jcloud.pagetype.deploy_candidate_app.deploy_candidate_app import ( + DeployCandidateApp, + ) + + # TYPE_CHECKING guard for code below cause DeployCandidate + # might cause circular import. + class UserAddressableHandler(Protocol): + def __call__( + self, + details: "Details", + dc: "DeployCandidate", + exc: BaseException, + ) -> bool: # Return True if is_actionable + ... + + class WillFailChecker(Protocol): + def __call__(self, old_dc: "DeployCandidate", new_dc: "DeployCandidate") -> None: + ... + + UserAddressableHandlerTuple = tuple[ + MatchStrings, + UserAddressableHandler, + WillFailChecker | None, + ] + + +DOC_URLS = { + "app-installation-issue": "https://jingrow.com/docs/faq/app-installation-issue", + "invalid-pyproject-file": "https://jingrow.com/docs/common-issues/invalid-pyprojecttoml-file", + "incompatible-node-version": "https://jingrow.com/docs/common-issues/incompatible-node-version", + "incompatible-dependency-version": "https://jingrow.com/docs/common-issues/incompatible-dependency-version", + "incompatible-app-version": "https://jingrow.com/docs/common-issues/incompatible-app-version", + "required-app-not-found": "https://jingrow.com/docs/common-issues/required-app-not-found", + "debugging-app-installs-locally": "https://jingrow.com/docs/common-issues/debugging-app-installs-locally", + "vite-not-found": "https://jingrow.com/docs/common-issues/vite-not-found", +} + + +def handlers() -> "list[UserAddressableHandlerTuple]": + """ + Before adding anything here, view the type: + `UserAddressableHandlerTuple` + + The first value of the tuple is `MatchStrings` which + a list of strings (or a single string) which if they + are present in the `traceback` or the `build_output` + then then second value i.e. `UserAddressableHandler` + is called. + + `UserAddressableHandler` is used to update the details + used to create the Jcloud Notification + + `UserAddressableHandler` can return False if it isn't + user addressable, in this case the remaining handler + tuple will be checked. + + Due to this order of the tuples matter. + + The third value is the `WillFailChecker` which is called + when a new Deploy Candidate is to be made and the previous + Deploy Candidate suffered a User Addressable Failure. + + The `WillFailChecker` is placed in proximity with + notification handlers because that's where the error is + evaluated and it's key stored on a Deploy Candidate + as `error_key`. + """ + return [ + ( + "App installation token could not be fetched", + update_with_app_not_fetchable, + None, + ), + ( + "Repository could not be fetched", + update_with_app_not_fetchable, + None, + ), + ( + "App has invalid pyproject.toml file", + update_with_invalid_pyproject_error, + None, + ), + ( + "App has invalid package.json file", + update_with_invalid_package_json_error, + None, + ), + ( + 'engine "node" is incompatible with this module', + update_with_incompatible_node, + check_incompatible_node, + ), + ( + "Incompatible Node version found", + update_with_incompatible_node, + check_incompatible_node, + ), + ( + "Incompatible Python version found", + update_with_incompatible_python_prebuild, + None, + ), + ( + "Incompatible app version found", + update_with_incompatible_app_prebuild, + None, + ), + ( + "Invalid release found", + update_with_invalid_release_prebuild, + None, + ), + ( + "Required app not found", + update_with_required_app_not_found_prebuild, + None, + ), + ( + "ModuleNotFoundError: No module named", + update_with_module_not_found, + check_if_app_updated, + ), + ( + "ImportError: cannot import name", + update_with_import_error, + check_if_app_updated, + ), + ( + "No matching distribution found for", + update_with_dependency_not_found, + check_if_app_updated, + ), + ( + "[ERROR] [plugin vue]", + update_with_vue_build_failed, + check_if_app_updated, + ), + ( + "[ERROR] [plugin jingrow-vue-style]", + update_with_vue_build_failed, + check_if_app_updated, + ), + ( + "vite: not found", + update_with_vite_not_found, + check_if_app_updated, + ), + ( + "FileNotFoundError: [Errno 2] No such file or directory", + update_with_file_not_found, + check_if_app_updated, + ), + ( + "minimum supported Python version is", + update_with_incompatible_python, + check_incompatible_python, + ), + ( + "pip._vendor.packaging.version.InvalidVersion: Invalid version", + update_with_error_on_pip_install, + check_if_app_updated, + ), + # Below three are catch all fallback handlers for `yarn build`, + # `yarn install` and `pip install` errors originating due to + # issues in an app. + # + # They should always be at the end. + ( + "subprocess.CalledProcessError: Command 'bench build --app", + update_with_yarn_build_failed, + check_if_app_updated, + ), + ( + "This error originates from a subprocess, and is likely not a problem with pip", + update_with_error_on_pip_install, + check_if_app_updated, + ), + ( + "ERROR: yarn install --check-files", + update_with_yarn_install_failed, + check_if_app_updated, + ), + ] + + +def create_build_failed_notification( + dc: "DeployCandidate", + exc: BaseException | None, +) -> bool: + """ + Used to create jcloud notifications on Build failures. If the notification + is actionable then it will be displayed on the dashboard and will block + further builds until the user has resolved it. + + Returns True if build failure is_actionable + """ + if exc is None: + # Exception is not passed if called from + # build agent job update handler + exc = Exception("PLACEHOLDER_EXCEPTION") + + details = get_details(dc, exc) + pg_dict = { + "pagetype": "Jcloud Notification", + "team": dc.team, + "type": "Bench Deploy", + "document_type": dc.pagetype, + "document_name": dc.name, + "class": "Error", + **details, + } + pg = jingrow.get_pg(pg_dict) + pg.insert() + jingrow.db.commit() + + jingrow.publish_realtime( + "jcloud_notification", pagetype="Jcloud Notification", message={"team": dc.team} + ) + + return details["is_actionable"] + + +def get_details(dc: "DeployCandidate", exc: BaseException) -> "Details": + tb = jingrow.get_traceback(with_context=False) + default_title = get_default_title(dc) + default_message = get_default_message(dc) + + details: "Details" = dict( + title=default_title, + message=default_message, + traceback=tb, + is_actionable=False, + assistance_url=None, + ) + + for strs, handler, _ in handlers(): + if isinstance(strs, str): + strs = [strs] + + if not (is_match := all(s in tb for s in strs)): + is_match = all(s in dc.build_output for s in strs) + + if not is_match: + continue + + if handler(details, dc, exc): + details["is_actionable"] = True + dc.error_key = get_error_key(strs) + break + else: + details["title"] = default_title + details["message"] = default_message + details["traceback"] = tb + details["is_actionable"] = False + details["assistance_url"] = None + + return details + + +def update_with_vue_build_failed( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + failed_step = get_failed_step(dc) + app_name = None + + details["title"] = "App installation failed due to errors in frontend code" + + if failed_step.stage_slug == "apps": + app_name = failed_step.step + message = f""" +

{app_name} installation has failed due to errors in its + frontend (Vue.js) code.

+ +

Please view the failing step {failed_step.stage} - {failed_step.step} + output to debug and fix the error before retrying build.

+ """ + else: + message = """ +

App installation has failed due to errors in its frontend (Vue.js) code.

+ +

Please view the build output to debug and fix the error before retrying + build.

+ """ + + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["debugging-app-installs-locally"] + return True + + +def update_with_import_error( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + failed_step = get_failed_step(dc) + app_name = None + + details["title"] = "App installation failed due to invalid import" + + lines = [ + line + for line in dc.build_output.split("\n") + if "ImportError: cannot import name" in line + ] + invalid_import = None + if len(lines) > 1 and len(parts := lines[0].split("From")) > 1: + imported = parts[0].strip().split(" ")[-1][1:-1] + module = parts[1].strip().split(" ")[0][1:-1] + invalid_import = f"{imported} from {module}" + + if failed_step.stage_slug == "apps" and invalid_import: + app_name = failed_step.step + message = f""" +

{app_name} installation has failed due to invalid import + {invalid_import}.

+ +

Please ensure all Python dependencies are of the required + versions.

+ +

Please view the failing step {failed_step.stage} - {failed_step.step} + output to debug and fix the error before retrying build.

+ """ + else: + message = """ +

App installation failed due to an invalid import.

+ +

Please view the build output to debug and fix the error + before retrying build.

+ """ + + details["assistance_url"] = DOC_URLS["debugging-app-installs-locally"] + details["message"] = fmt(message) + return True + + +def update_with_module_not_found( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + failed_step = get_failed_step(dc) + app_name = None + + details["title"] = "App installation failed due to missing module" + + lines = [ + line + for line in dc.build_output.split("\n") + if "ModuleNotFoundError: No module named" in line + ] + missing_module = None + if len(lines) > 1: + missing_module = lines[0].split(" ")[-1][1:-1] + + if failed_step.stage_slug == "apps" and missing_module: + app_name = failed_step.step + message = f""" +

{app_name} installation has failed due to imported module + {missing_module} not being found.

+ +

Please ensure all imported Jingrow app dependencies have been added + to your bench and all Python dependencies have been added to your app's + requirements.txt or pyproject.toml file before retrying + the build.

+ """ + else: + message = """ +

App installation failed due to an imported module not being found.

+ +

Please view the failing step output to debug and fix the error + before retrying build.

+ """ + + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["debugging-app-installs-locally"] + return True + + +def update_with_dependency_not_found( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + failed_step = get_failed_step(dc) + app_name = None + + details["title"] = "App installation failed due to dependency not being found" + + lines = [ + line + for line in dc.build_output.split("\n") + if "No matching distribution found for" in line + ] + missing_dep = None + if len(lines) > 1: + missing_dep = lines[0].split(" ")[-1] + + if failed_step.stage_slug == "apps" and missing_dep: + app_name = failed_step.step + message = f""" +

{app_name} installation has failed due to dependency + {missing_dep} not being found.

+ +

Please specify a version of {missing_dep} installable by + pip.

+ +

Please view the failing step output for more info.

+ """ + else: + message = """ +

App installation failed due to pip not being able to find a + distribution of a dependency in your app.

+ +

Please view the build output to debug and fix the error before + retrying build.

+ """ + + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["debugging-app-installs-locally"] + return True + + +def update_with_error_on_pip_install( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + failed_step = get_failed_step(dc) + app_name = None + + details["title"] = "App installation failed due to errors" + + if failed_step.stage_slug == "apps": + app_name = failed_step.step + message = f""" +

App setup for {app_name} using pip failed due to + errors originating in the app.

+ +

Please view the failing step {failed_step.stage} - {failed_step.step} + output to debug and fix the error before retrying build.

+ """ + else: + message = """ +

App setup using pip failed due to errors originating in an + app on your Bench.

+ +

Please view the build output to debug and fix the error before retrying + build.

+ """ + + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["debugging-app-installs-locally"] + return True + + +def update_with_invalid_pyproject_error( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + if len(exc.args) <= 1 or not (app := exc.args[1]): + return False + + build_step = get_ct_row(dc, app, "build_steps", "step_slug") + app_name = build_step.step + + details["title"] = "Invalid pyproject.toml file found" + message = f""" +

The pyproject.toml file in the {app_name} repository could not be + decoded by tomllib due to syntax errors.

+ +

To rectify this issue, please follow the steps mentioned in Help.

+ """ + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["invalid-pyproject-file"] + return True + + +def update_with_invalid_package_json_error( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + if len(exc.args) <= 1 or not (app := exc.args[1]): + return False + + build_step = get_ct_row(dc, app, "build_steps", "step_slug") + app_name = build_step.step + + loc_str = "" + if len(exc.args) >= 2 and isinstance(exc.args[2], str): + loc_str = f"

File was found at path {exc.args[2]}.

" + + details["title"] = "Invalid package.json file found" + message = f""" +

The package.json file in the {app_name} repository could not be + decoded by json.load.

+ {loc_str} + +

To rectify this issue, please fix the pyproject.json file.

+ """ + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["debugging-app-installs-locally"] + return True + + +def update_with_app_not_fetchable( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + failed_step = get_failed_step(dc) + + details["title"] = "App could not be fetched" + if failed_step.stage_slug == "apps": + app_name = failed_step.step + message = f""" +

{app_name} could not be fetched from GitHub.

+ +

This may have been due to an invalid installation id or due + to an invalid repository URL.

+ +

For a possible solutions, please follow the steps mentioned + in Help.

+ """ + else: + message = """ +

App could not be fetched from GitHub.

+ +

This may have been due to an invalid installation id or due + to an invalid repository URL.

+ +

For a possible solutions, please follow the steps mentioned + in Help.

+ """ + + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["app-installation-issue"] + return True + + +def update_with_incompatible_node( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +) -> None: + # Example line: + # `#60 5.030 error customization_forms@1.0.0: The engine "node" is incompatible with this module. Expected version ">=18.0.0". Got "16.16.0"` + if line := get_build_output_line(dc, '"node" is incompatible with this module'): + app = get_app_from_incompatible_build_output_line(line) + version = "" + elif len(exc.args) == 5: + app = exc.args[1] + version = f'Expected "{exc.args[3]}", found "{exc.args[2]}". ' + + details["title"] = "Incompatible Node version" + message = f""" +

{details['message']}

+ +

{app} installation failed due to incompatible Node versions. {version} + Please set the correct Node Version on your Bench.

+ +

To rectify this issue, please follow the the steps mentioned in Help.

+ """ + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["incompatible-node-version"] + + # Traceback is not pertinent to issue + details["traceback"] = None + return True + + +def check_incompatible_node( + old_dc: "DeployCandidate", new_dc: "DeployCandidate" +) -> None: + old_node = old_dc.get_dependency_version("node") + new_node = new_dc.get_dependency_version("node") + + if old_node != new_node: + return + + jingrow.throw( + "Node version not updated since previous failing build.", + BuildValidationError, + ) + + +def update_with_incompatible_python( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + details["title"] = "Incompatible Python version" + message = """ +

App installation has failed due to the Python version on your Bench + being incompatible. Please check build output for more details.

+ +

To rectify this issue, please update the Python version on your bench.

+ +

For reference, you can follow the steps mentioned in Help.

+ """ + + details["message"] = fmt(message) + details["traceback"] = None + details["assistance_url"] = DOC_URLS["incompatible-dependency-version"] + return True + + +def check_incompatible_python( + old_dc: "DeployCandidate", new_dc: "DeployCandidate" +) -> None: + old_node = old_dc.get_dependency_version("python") + new_node = new_dc.get_dependency_version("python") + + if old_node != new_node: + return + + jingrow.throw( + "Python version not updated since previous failing build.", + BuildValidationError, + ) + + +def update_with_incompatible_node_prebuild( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +) -> None: + if len(exc.args) != 5: + return False + + _, app, actual, expected, package_name = exc.args + + package_name_str = "" + if isinstance(package_name, str): + package_name_str = f"Version requirement comes from package {package_name}" + + details["title"] = "Validation Failed: Incompatible Node version" + message = f""" +

{app} requires Node version {expected}, found version is {actual}. + {package_name_str} + + Please set the correct Node version on your Bench.

+ +

To rectify this issue, please follow the the steps mentioned in Help.

+ """ + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["incompatible-node-version"] + + # Traceback is not pertinent to issue + details["traceback"] = None + return True + + +def update_with_incompatible_python_prebuild( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +) -> None: + if len(exc.args) != 4: + return False + + _, app, actual, expected = exc.args + + details["title"] = "Validation Failed: Incompatible Python version" + message = f""" +

{app} requires Python version {expected}, found version is {actual}. + Please set the correct Python version on your Bench.

+ +

To rectify this issue, please follow the the steps mentioned in Help.

+ """ + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["incompatible-dependency-version"] + + # Traceback is not pertinent to issue + details["traceback"] = None + return True + + +def update_with_incompatible_app_prebuild( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +) -> None: + if len(exc.args) != 5: + return False + + _, app, dep_app, actual, expected = exc.args + + details["title"] = "Validation Failed: Incompatible app version" + + message = f""" +

{app} depends on version {expected} of {dep_app}. + Found version is {actual}

+ +

To fix this issue please set {dep_app} to version {expected}.

+ """ + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["incompatible-app-version"] + + # Traceback is not pertinent to issue + details["traceback"] = None + return True + + +def update_with_invalid_release_prebuild( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + if len(exc.args) != 4: + return False + + _, app, hash, invalidation_reason = exc.args + + details["title"] = "Validation Failed: Invalid app release" + message = f""" +

App {app} has an invalid release with the commit hash + {hash[:10]}

+ +

To rectify this, please fix the issue mentioned below and + push a new update.

+ """ + details["traceback"] = invalidation_reason + details["message"] = fmt(message) + return True + + +def update_with_required_app_not_found_prebuild( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + if len(exc.args) != 3: + return False + + _, app, required_app = exc.args + + details["title"] = "Validation Failed: Required app not found" + message = f""" +

{app} has a dependency on the app {required_app} + which was not found on your bench.

+ +

To rectify this issue, please add the required app to your Bench + and try again.

+ """ + details["traceback"] = None + details["message"] = fmt(message) + details["assistance_url"] = DOC_URLS["required-app-not-found"] + return True + + +def update_with_vite_not_found( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + details["title"] = "Vite not found" + failed_step = get_failed_step(dc) + if failed_step.stage_slug == "apps": + app_name = failed_step.step + message = f""" +

{app_name} installation has failed due the build + dependency Vite not being found.

+ +

To rectify this issue, please follow the steps mentioned + in Help.

+ """ + else: + message = """ +

App installation has failed due the build dependency Vite + not being found.

+ +

To rectify this issue, please follow the steps mentioned + in Help.

+ """ + + details["message"] = fmt(message) + details["traceback"] = None + details["assistance_url"] = DOC_URLS["vite-not-found"] + return True + + +def update_with_yarn_install_failed( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + details["title"] = "App frontend dependency install failed" + failed_step = get_failed_step(dc) + if failed_step.stage_slug == "apps": + app_name = failed_step.step + message = f""" +

{app_name} dependencies could not be installed.

+ +

Please view the failing step {failed_step.stage} - {failed_step.step} + output to debug and fix the error before retrying build.

+ +

This may be due to issues with the app being installed + and not Jingrow.

+ """ + + else: + message = """ +

App dependencies could not be installed.

+ +

Please view the failing step output to debug and fix the error + before retrying build.

+ +

This may be due to issues with the app being installed + and not Jingrow.

+ """ + + details["message"] = fmt(message) + details["traceback"] = None + return True + + +def update_with_yarn_build_failed( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + details["title"] = "App frontend build failed" + failed_step = get_failed_step(dc) + if failed_step.stage_slug == "apps": + app_name = failed_step.step + message = f""" +

{app_name} assets have failed to build.

+ +

Please view the failing step {failed_step.stage} - {failed_step.step} + output to debug and fix the error before retrying build.

+ +

This may be due to issues with the app being installed + and not Jingrow.

+ """ + + else: + message = """ +

App assets have failed to build.

+ +

Please view the failing step output to debug and fix the error + before retrying build.

+ +

This may be due to issues with the app being installed + and not Jingrow.

+ """ + + details["message"] = fmt(message) + details["traceback"] = None + return True + + +def update_with_file_not_found( + details: "Details", + dc: "DeployCandidate", + exc: BaseException, +): + details["title"] = "File not found in app" + + if not (failed_step := get_failed_step(dc)): + return False + + if failed_step.stage_slug != "apps": + return False + + app_name = failed_step.step + + # Non exact check for whether file not found originates in the + # app being installed. If file not found is not in the app then + # this is an unknown and not a user error. + for line in dc.build_output.split("\n"): + if "FileNotFoundError: [Errno 2] No such file or directory" not in line: + continue + if app_name in line: + break + else: + return False + + message = f""" +

{app_name} has a missing file.

+ +

Please view the failing step {failed_step.stage} - {failed_step.step} + output to find and add the missing file before retrying the build.

+ +

This may be due to issues with the app being installed + and not Jingrow.

+ """ + + details["message"] = fmt(message) + details["traceback"] = None + return True + + +def check_if_app_updated(old_dc: "DeployCandidate", new_dc: "DeployCandidate") -> None: + if not (failed_step := old_dc.get_first_step("status", "Failure")): + return + + if failed_step.stage_slug != "apps": + return + + app_name = failed_step.step_slug + old_app = get_dc_app(old_dc, app_name) + new_app = get_dc_app(new_dc, app_name) + + if new_app is None or old_app is None: + return + + old_hash = old_app.hash or old_app.pullable_hash + new_hash = new_app.hash or new_app.pullable_hash + + if old_hash != new_hash: + return + + title = new_app.title or old_app.title + jingrow.throw( + f"App {title} has not been updated since previous failing build. Release hash is {new_hash[:10]}.", + BuildValidationError, + ) + + +def get_dc_app(dc: "DeployCandidate", app_name: str) -> "DeployCandidateApp | None": + for app in dc.apps: + if app.app == app_name: + return app + + +def fmt(message: str) -> str: + message = message.strip() + message = dedent(message) + return re.sub(r"\s+", " ", message) + + +def get_build_output_line(dc: "DeployCandidate", needle: str): + for line in dc.build_output.split("\n"): + if needle in line: + return line.strip() + return "" + + +def get_app_from_incompatible_build_output_line(line: str): + splits = line.split() + if "error" not in splits: + return "" + + idx = splits.index("error") + 1 + if len(splits) <= idx: + return "" + + return splits[idx][:-1].split("@")[0] + + +def get_default_title(dc: "DeployCandidate") -> str: + return "Build Failed" + + +def get_default_message(dc: "DeployCandidate") -> str: + failed_step = dc.get_first_step("status", "Failure") + if failed_step: + return f"Image build failed at step {failed_step.stage} - {failed_step.step}." + return "Image build failed." + + +def get_is_actionable(dc: "DeployCandidate", tb: str) -> bool: + return False + + +def get_ct_row( + dc: "DeployCandidate", + match_value: str, + field: str, + ct_field: str, +) -> Optional["Document"]: + ct = dc.get(field) + if not ct: + return + + for row in ct: + if row.get(ct_field) == match_value: + return row + + +def get_failed_step(dc: "DeployCandidate"): + return dc.get_first_step("status", "Failure") or jingrow._dict() diff --git a/jcloud/jcloud/pagetype/deploy_candidate/docker_output_parsers.py b/jcloud/jcloud/pagetype/deploy_candidate/docker_output_parsers.py new file mode 100644 index 0000000..7dea4d2 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/docker_output_parsers.py @@ -0,0 +1,329 @@ +import re +import typing + +import dockerfile +import jingrow +from jingrow.core.utils import find +from jingrow.utils import now_datetime, rounded + +# Reference: +# https://stackoverflow.com/questions/14693701/how-can-i-remove-the-ansi-escape-sequences-from-a-string-in-python +ansi_escape_rx = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])") +done_check_rx = re.compile(r"#\d+\sDONE\s\d+\.\d+") + +if typing.TYPE_CHECKING: + from typing import Any, Generator, Optional, TypedDict + + from jingrow.types import DF + + from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate + from jcloud.jcloud.pagetype.deploy_candidate_build_step.deploy_candidate_build_step import ( + DeployCandidateBuildStep, + ) + + BuildOutput = list[str] | Generator[str, Any, None] + PushOutput = list[dict] | Generator[dict, Any, None] + IndexSplit = TypedDict( + "IndexSplit", + { + "index": int, + "line": str, + "is_unusual": bool, + }, + ) + + +class DockerBuildOutputParser: + """ + Parses `docker build` raw output and updates Deploy Candidate. + + Due to the way agent updates are propagated, all lines are updated + when agent is polled, and so output is looped N! times. + """ + + _steps_by_step_slug: "Optional[dict[tuple[str, str], DeployCandidateBuildStep]]" + + def __init__(self, dc: "DeployCandidate") -> None: + self.dc = dc + self.last_updated = now_datetime() + + # Used to generate output and track parser state + self.lines: list[str] = [] + self.error_lines: list[str] = [] + self.steps: dict[int, "DeployCandidateBuildStep"] = jingrow._dict() + self._steps_by_step_slug = None + + # Convenience map used to update build steps + @property + def steps_by_step_slug(self): + if not self._steps_by_step_slug: + self._steps_by_step_slug = { + (bs.stage_slug, bs.step_slug): bs for bs in self.dc.build_steps + } + return self._steps_by_step_slug + + def parse_and_update(self, output: "BuildOutput"): + for raw_line in output: + self._parse_line_handle_exc(raw_line) + self._end_parsing() + + def _parse_line_handle_exc(self, raw_line: str): + self._parse_line(raw_line) + + def flush_output(self, commit: bool = True): + self.dc.build_output = "".join(self.lines) + self.dc.build_error = "".join(self.error_lines) + + self.dc.save(ignore_version=True, ignore_permissions=True) + if commit: + jingrow.db.commit() + + def _parse_line(self, raw_line: str): + escaped_line = ansi_escape(raw_line) + + # append before stripping to preserve '\n' + self.lines.append(escaped_line) + + # check if line is part of build error and append + self._append_error_line(escaped_line) + + stripped_line = escaped_line.strip() + if not stripped_line: + return + + # Separate step index from line + if not (split := self._get_step_index_split(stripped_line)): + return + + line = split["line"] + + # Final stage of the build + if line.startswith("writing image"): + self._set_docker_image_id(line) + + # Updates build step properties + elif split["index"] in self.steps: + self._update_dc_build_step(split) + + # Sets build step to running and adds it to self.steps + else: + self._add_step_to_steps_dict(split) + + def _append_error_line(self, escaped_line: str): + no_errors = len(self.error_lines) == 0 + + # Recorded errors not build failing errors + if not no_errors and re.match(done_check_rx, escaped_line): + self.error_lines = [] + return + + if no_errors and "ERROR:" not in escaped_line: + return + + splits = escaped_line.split(" ", maxsplit=1) + + # If no_errors then first "ERROR:" is the start of error log. + if no_errors and len(splits) > 1 and splits[1].startswith("ERROR:"): + self.error_lines.append(splits[1]) + + # Build error ends the build, once an error is encountered + # remaining build output lines belong to the error log. + else: + self.error_lines.append(escaped_line) + + def _end_parsing(self): + self.dc.last_updated = now_datetime() + self.flush_output(True) + + def _set_docker_image_id(self, line: str): + self.dc.docker_image_id = line.split()[2].split(":")[1] + + def _update_dc_build_step(self, split: "IndexSplit"): + step = self.steps.get(split["index"]) + if not step: + return + + line = split["line"] + if split["is_unusual"]: + step.output += line + "\n" + elif line.startswith("sha256:"): + step.hash = line[7:] + elif line.startswith("DONE"): + step.status = "Success" + step.duration = float(line.split()[1][:-1]) + elif line == "CACHED": + step.status = "Success" + step.cached = True + elif line.startswith("ERROR"): + step.status = "Failure" + step.output += line[7:] + "\n" + else: + _, _, output = line.partition(" ") + step.output += output + "\n" + + def _add_step_to_steps_dict(self, split: "IndexSplit"): + line = split["line"] + if not line.startswith("[stage-"): + return + + name = line.split("]", maxsplit=1)[1].strip() + if not name.startswith("RUN"): + return + + if not (match := re.search("`#stage-(.*)`", name)): + return + + stage_slug, step_slug = match.group(1).split("-", maxsplit=1) + step = self.steps_by_step_slug.get((stage_slug, step_slug)) + if not step: + return + + index = split["index"] + step.step_index = index + step.command = get_command(name) + step.status = "Running" + step.output = "" + + self.steps[index] = step + + def _get_step_index_split(self, line: str) -> "Optional[IndexSplit]": + splits = line.split(maxsplit=1) + keys = sorted(self.steps) + if len(splits) != 2 and len(keys) == 0: + return None + + try: + index_str, line = splits + is_unusual = False + index = int(index_str[1:]) + except ValueError: + is_unusual = True + index = keys[-1] if len(keys) else -1 + + if index == -1: + return None + + return dict(index=index, line=line, is_unusual=is_unusual) + + +def ansi_escape(text: str) -> str: + return ansi_escape_rx.sub("", text) + + +def get_command(name: str) -> str: + # Strip docker flags and commands from the line + line = dockerfile.parse_string(name)[0] + command = " ".join(line.value).strip() + if not command: + command: str = line.original.split(maxsplit=1)[1] + command = command.split("`#stage-", maxsplit=1)[0] + + # Remove line fold slashes + splits = [p.strip() for p in command.split(" \\\n")] + + # Strip multiple internal whitespaces + for i in range(len(splits)): + s = splits[i] + splits[i] = " ".join([p.strip() for p in s.split() if len(p)]) + + return "\n".join([p for p in splits if len(p)]) + + +class UploadStepUpdater: + """ + Processes the output of `client.images.push` and uses it to update + the last `build_step` which pertains to uploading the image to the + registry. + + Similar to DockerBuildOutputParser, this can process the output from + a remote (agent) or local (jcloud) builder docker push. + """ + + _upload_step: "DeployCandidateBuildStep | None" + + def __init__(self, dc: "DeployCandidate") -> None: + self.dc = dc + self.output: list[dict] = [] + + # Used only if not remote + self.start_time = now_datetime() + self.last_updated = now_datetime() + self._upload_step = None + + @property + def upload_step(self) -> "DeployCandidateBuildStep | None": + if not self._upload_step: + self._upload_step = self.dc.get_step("upload", "image") + return self._upload_step + + def start(self): + if not self.upload_step: + return + + if self.upload_step.status == "Running": + return + + self.upload_step.status = "Running" + self.flush_output() + + def process(self, output: "PushOutput"): + if not self.upload_step: + return + + for line in output: + self._update_output(line) + + last_update = self.dc.last_updated + duration = (now_datetime() - last_update).total_seconds() + self.upload_step.duration = rounded(duration, 1) + self.flush_output() + + def end(self, status: 'Optional[DF.Literal["Success", "Failure"]]'): + if not self.upload_step: + return + + # Used only if the build is running locally + self.upload_step.status = status + self.flush_output() + + def _update_output(self, line: dict): + if error := line.get("error"): + message = line.get("errorDetail", {}).get("message", error) + line_str = f"no_id: Error {message}" + self.output.append({"id": "no_id", "output": line_str, "status": "Error"}) + return + + line_id = line.get("id") + if not line_id: + return + + line_status = line.get("status", "") + line_progress = line.get("progress", "") + line_str = f"{line_id}: {line_status} {line_progress}" + + if existing := find( + self.output, + lambda x: x["id"] == line_id, + ): + existing["output"] = line_str + else: + self.output.append({"id": line_id, "output": line_str, "status": line_status}) + + def flush_output(self, commit: bool = True): + if not self.upload_step: + return + + output_lines = [] + for line in self.output: + output_lines.append(line["output"]) + status = line.get("status") + + if status == "Error": + self.upload_step.status = "Failure" + elif status == "Pushed": + self.upload_step.status = "Success" + + self.upload_step.output = "\n".join(output_lines) + self.dc.save(ignore_version=True, ignore_permissions=True) + if commit: + jingrow.db.commit() diff --git a/jcloud/jcloud/pagetype/deploy_candidate/test_deploy_candidate.py b/jcloud/jcloud/pagetype/deploy_candidate/test_deploy_candidate.py new file mode 100644 index 0000000..0dac4d5 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/test_deploy_candidate.py @@ -0,0 +1,357 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + +import random +import typing +import unittest +from unittest import skip +from unittest.mock import Mock, patch + +import jingrow + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.app_release.test_app_release import create_test_app_release +from jcloud.jcloud.pagetype.app_source.test_app_source import create_test_app_source +from jcloud.jcloud.pagetype.bench.test_bench import create_test_bench +from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate +from jcloud.jcloud.pagetype.release_group.release_group import ReleaseGroup +from jcloud.jcloud.pagetype.release_group.test_release_group import ( + create_test_release_group, +) +from jcloud.jcloud.pagetype.team.test_team import ( + create_test_jcloud_admin_team, + create_test_team, +) +from jcloud.utils.test import foreground_enqueue_pg + +if typing.TYPE_CHECKING: + from typing import TypedDict + + from jcloud.jcloud.pagetype.app.app import App + from jcloud.jcloud.pagetype.app_release.app_release import AppRelease + from jcloud.jcloud.pagetype.app_source.app_source import AppSource + from jcloud.jcloud.pagetype.team.team import Team + + AppInfo = TypedDict( + "AppInfo", + app=App, + source=AppSource, + release=AppRelease, + ) + + +def create_test_deploy_candidate(group: ReleaseGroup) -> DeployCandidate: + """ + Create Test Deploy Candidate pg + """ + return group.create_deploy_candidate() + + +@patch("jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.db.commit") +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +class TestDeployCandidate(unittest.TestCase): + def setUp(self): + self.team = create_test_jcloud_admin_team() + self.user: str = self.team.user + + def tearDown(self): + jingrow.db.rollback() + jingrow.set_user("Administrator") + + @patch("jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg") + @patch.object(DeployCandidate, "_build", new=Mock()) + def test_if_new_jcloud_admin_team_can_pre_build(self, mock_enqueue_pg, mock_commit): + """ + Test if new jcloud admin team user can pre build + + Checks permission. Make sure no PermissionError is raised + """ + app = create_test_app() + group = create_test_release_group([app], self.user) + group.db_set("team", self.team.name) + jingrow.set_user(self.user) + deploy_candidate = create_test_deploy_candidate(group) + try: + deploy_candidate.pre_build(method="_build", no_build=True) + except jingrow.PermissionError: + self.fail("PermissionError raised in pre_build") + + @patch("jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg") + @patch.object(DeployCandidate, "_build", new=Mock()) + def test_old_style_jcloud_admin_team_can_pre_build(self, mock_enqueue_pg, mock_commit): + """ + Test if old style jcloud admin team can pre build + + Checks permission. Make sure no PermissionError is raised + """ + app = create_test_app() + group = create_test_release_group([app], self.user) + group.db_set("team", self.team.name) + jingrow.rename_pg("Team", self.team.name, self.user) + jingrow.set_user(self.user) + deploy_candidate = create_test_deploy_candidate(group) + try: + deploy_candidate.pre_build(method="_build", no_build=True) + except jingrow.PermissionError: + self.fail("PermissionError raised in pre_build") + + @patch("jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg") + def test_first_deploy_creates_draft_deploy_candidate( + self, mock_enqueue_pg, mock_commit + ): + """ + Test if first deploy creates Deploy Candidate pg + """ + app = create_test_app() + source = create_test_app_source("Nightly", app) + create_test_app_release(source) + group = create_test_release_group([app]) + candidate = group.create_deploy_candidate() + self.assertEqual(candidate.status, "Draft") + + @patch("jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg") + def test_deploy_with_empty_apps_creates_deploy_candidate_with_same_release( + self, mock_enqueue_pg, mock_commit + ): + """ + Test if another deploy with empty apps_to_update creates Deploy Candidate with same release + """ + bench = create_test_bench() + # Create another release + source = jingrow.get_pg("App Source", bench.apps[0].source) + create_test_app_release(source) + group = jingrow.get_pg("Release Group", bench.group) + first_candidate = jingrow.get_pg("Deploy Candidate", bench.candidate) + second_candidate = group.create_deploy_candidate([]) + self.assertEqual(first_candidate.apps[0].release, second_candidate.apps[0].release) + + @patch("jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg") + def test_deploy_with_no_arguments_creates_deploy_candidate_with_newer_release( + self, mock_enqueue_pg, mock_commit + ): + """ + Test if another deploy with apps_to_update=None creates Deploy Candidate with newer release + """ + bench = create_test_bench() + # Create another release + source = jingrow.get_pg("App Source", bench.apps[0].source) + release = create_test_app_release(source) + group = jingrow.get_pg("Release Group", bench.group) + first_candidate = jingrow.get_pg("Deploy Candidate", bench.candidate) + second_candidate = group.create_deploy_candidate() + self.assertNotEqual(first_candidate.apps[0].release, second_candidate.apps[0].release) + self.assertEqual(second_candidate.apps[0].release, release.name) + + @patch("jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg") + def test_deploy_with_specific_release_creates_deploy_candidate_with_that_release( + self, mock_enqueue_pg, mock_commit + ): + """ + Test if another deploy with specific release creates Deploy Candidate with that release release + """ + bench = create_test_bench() + # Create another release + source = jingrow.get_pg("App Source", bench.apps[0].source) + second_release = create_test_app_release(source) + third_release = create_test_app_release(source) + group = jingrow.get_pg("Release Group", bench.group) + candidate = group.create_deploy_candidate( + [{"app": source.app, "release": second_release.name}] + ) + self.assertEqual(candidate.apps[0].release, second_release.name) + self.assertNotEqual(candidate.apps[0].release, third_release.name) + + @patch("jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg") + def test_deploy_with_new_app_creates_deploy_candidate_with_new_app( + self, mock_enqueue_pg, mock_commit + ): + """ + Test if another deploy with new app creates Deploy Candidate with new app + """ + bench = create_test_bench() + # Create another app + group = jingrow.get_pg("Release Group", bench.group) + app = create_test_app("jerp", "JERP") + source = create_test_app_source(group.version, app) + release = create_test_app_release(source) + group.update_source(source) + candidate = group.create_deploy_candidate([{"app": app.name}]) + self.assertEqual(candidate.apps[1].app, app.name) + self.assertEqual(candidate.apps[1].release, release.name) + + @patch("jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg") + @patch.object(DeployCandidate, "schedule_build_and_deploy", new=Mock()) + def test_creating_new_app_release_with_auto_deploy_deploys_that_app( + self, mock_enqueue_pg, mock_commit + ): + """ + Test if creating a new app release with auto deploy creates a Deploy Candidate with most recent release of that app + """ + bench = create_test_bench() + # Create another app + group = jingrow.get_pg("Release Group", bench.group) + app = create_test_app("jerp", "JERP") + jerp_source = create_test_app_source(group.version, app) + group.update_source(jerp_source) + first_candidate = group.create_deploy_candidate(group.apps) + + dc_count_before = jingrow.db.count("Deploy Candidate", filters={"group": group.name}) + + # Enable auto deploy on JERP app + group.apps[1].enable_auto_deploy = True + group.save() + + # Create releases for both the apps + jingrow_source = jingrow.get_pg("App Source", group.apps[0].source) + create_test_app_release(jingrow_source) + create_test_app_release(jerp_source) + + dc_count_after = jingrow.db.count("Deploy Candidate", filters={"group": group.name}) + # We should have a new Deploy Candidate + self.assertEqual(dc_count_after, dc_count_before + 1) + + second_candidate = jingrow.get_last_pg("Deploy Candidate", {"group": group.name}) + # Only the app with auto deploy enabled should be updated + self.assertEqual(second_candidate.apps[0].release, first_candidate.apps[0].release) + self.assertNotEqual(second_candidate.apps[1].release, first_candidate.apps[1].release) + + @skip("Docker Build broken with `duplicate cache exports [gha]`") + @patch( + "jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate.jingrow.enqueue_pg", + new=foreground_enqueue_pg, + ) + def test_app_cache_usage_on_subsequent_build(self): + """ + Tests if app cache is being used by a subsequent build, + i.e. after cache has been set by a previous one. + + Creates two Deploy Candidates: + 1. apps: jingrow, raven + 2. apps: jingrow, wiki, raven + + When building the image of the second Deploy Candidate, + raven should be fetched from app cache. + """ + from jcloud.api.tests.test_bench import ( + set_jcloud_settings_for_docker_build, + ) + from jcloud.jcloud.pagetype.bench_get_app_cache.bench_get_app_cache import ( + BenchGetAppCache, + ) + + team = create_test_team() + apps = create_cache_test_apps(team) + + set_jcloud_settings_for_docker_build() + BenchGetAppCache.clear_app_cache() + + app_info_lists = [ + [apps["jingrow"], apps["raven"]], + [apps["jingrow"], apps["wiki"], apps["raven"]], + ] + + dcs: list[DeployCandidate] = [] + for ail in app_info_lists: + rg = create_cache_test_release_group(ail, team) + dc = rg.create_deploy_candidate() + dcs.append(dc) + dc.build() + + """ + Check if app cache was populated with apps included in + the builds. + """ + cache_items = {v.app: v for v in BenchGetAppCache.get_data()} + for name in ["raven", "wiki"]: + file_name = cache_items.get(name, {}).get("file_name") + self.assertTrue(file_name, f"app {name} not found in bench get-app cache") + + hash_stub = apps[name]["release"].hash[:10] + self.assertTrue(hash_stub in file_name, "app found in cache does not match") + + """ + Check if raven in the second Deploy Candidate was fetched + from bench app cache. + """ + build_output = dcs[1].build_output + if build_output: + self.assertTrue("Getting raven from cache" in build_output) + + +def create_cache_test_release_group( + app_info_list: list["AppInfo"], team: "Team" +) -> "ReleaseGroup": + title = f"Test App Cache RG {random.getrandbits(20):x}" + pg_dict = { + "pagetype": "Release Group", + "version": "Nightly", + "enabled": True, + "title": title, + "team": team, + "public": False, + "use_app_cache": True, + "compress_app_cache": True, + } + release_group: "ReleaseGroup" = jingrow.get_pg(pg_dict) + + # Set apps + for info in app_info_list: + value = dict(app=info["app"].name, source=info["source"].name) + release_group.append("apps", value) + + # Set BENCH_VERSION + release_group.fetch_dependencies() + for dep in release_group.dependencies: + if dep.dependency != "BENCH_VERSION": + continue + dep.version = "5.22.6" + + release_group.insert(ignore_if_duplicate=True) + release_group.reload() + return release_group + + +def create_cache_test_apps(team: "Team") -> dict[str, "AppInfo"]: + info = [ + ( + "http://git.jingrow.com:3000/jingrow/jingrow", + "Jingrow Framework", + "Nightly", + "develop", + "d26c67df75a95ef43d329eadd48d7998ea656856", + ), + ( + "http://git.jingrow.com:3000/jingrow/wiki", + "Jingrow Wiki", + "Nightly", + "master", + "8b369c63dd90b4f36195844d4a84e2aaa3b8f39a", + ), + ( + "https://github.com/The-Commit-Company/raven", + "Raven", + "Nightly", + "develop", + "317de412bc4b66c21052a929021c1013bbe31335", + ), + ] + + apps = dict() + for url, title, version, branch, hash in info: + parts = url.split("/") + name = parts[-1] + app = create_test_app(name, title) + source = app.add_source( + version, + url, + branch, + team.name, + repository_owner=parts[-2], + ) + + release = create_test_app_release(source, hash) + apps[name] = dict(app=app, source=source, release=release) + + return apps diff --git a/jcloud/jcloud/pagetype/deploy_candidate/utils.py b/jcloud/jcloud/pagetype/deploy_candidate/utils.py new file mode 100644 index 0000000..b5a6b17 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/utils.py @@ -0,0 +1,186 @@ +import json +import re +from collections import Counter +from datetime import timedelta +from pathlib import Path +from typing import Any, Optional, TypedDict + +import jingrow + +PackageManagers = TypedDict( + "PackageManagers", + { + "repo_path": str, + "pyproject": Optional[dict[str, Any]], + "packagejsons": list[dict[str, Any]], + }, +) +PackageManagerFiles = dict[str, PackageManagers] + + +def get_package_manager_files(repo_path_map: dict[str, str]) -> PackageManagerFiles: + # Return pyproject.toml and package.json files + pfiles_map = {} + for app, repo_path in repo_path_map.items(): + pfiles_map[app] = get_package_manager_files_from_repo(app, repo_path) + + return pfiles_map + + +def get_package_manager_files_from_repo(app: str, repo_path: str): + pypt, pckjs = _get_package_manager_files_from_repo( + repo_path, + True, + ) + + pm: PackageManagers = { + "repo_path": repo_path, + "pyproject": None, + "packagejsons": [], + } + + if pypt is not None: + pm["pyproject"] = load_pyproject(app, pypt.absolute().as_posix()) + + for pckj in pckjs: + package_json = load_package_json( + app, + pckj.absolute().as_posix(), + ) + pm["packagejsons"].append(package_json) + + return pm + + +def _get_package_manager_files_from_repo( + repo_path: str, + recursive: bool, +) -> tuple[Path | None, list[Path]]: + pyproject_toml: Optional[Path] = None + package_jsons: list[Path] = [] # An app can have multiple + + for p in Path(repo_path).iterdir(): + if p.name == "pyproject.toml": + pyproject_toml = p + elif p.name == "package.json": + package_jsons.append(p) + + if not (recursive and p.is_dir()): + continue + + pypt, pckjs = _get_package_manager_files_from_repo(p, False) + if pypt is not None and pyproject_toml is None: + pyproject_toml = pypt + + package_jsons.extend(pckjs) + + return pyproject_toml, package_jsons + + +def load_pyproject(app: str, pyproject_path: str): + try: + from tomli import TOMLDecodeError, load + except ImportError: + from tomllib import TOMLDecodeError, load + + with open(pyproject_path, "rb") as f: + try: + return load(f) + except TOMLDecodeError: + # Do not edit without updating deploy_notifications.py + raise Exception("App has invalid pyproject.toml file", app) from None + + +def load_package_json(app: str, package_json_path: str): + with open(package_json_path, "rb") as f: + try: + return json.load(f) + except json.JSONDecodeError: + # Do not edit without updating deploy_notifications.py + raise Exception( + "App has invalid package.json file", app, package_json_path + ) from None + + +def get_error_key(error_substring: str | list[str]) -> str: + if isinstance(error_substring, list): + error_substring = " ".join(error_substring) + """ + Converts `MatchStrings` into error keys, these are set on + DeployCandidates on UA Failures for two reasons: + 1. To check if a subsequent deploy will fail for the same reasons. + 2. To track the kind of UA errors the users are facing. + """ + + return re.sub( + r"[\"'\[\],:]|\.$", + "", + error_substring.lower(), + ) + + +def get_will_fail_checker(error_key: str): + from jcloud.jcloud.pagetype.deploy_candidate.deploy_notifications import handlers + + for error_substring, _, will_fail_checker in handlers(): + if get_error_key(error_substring) == error_key: + return will_fail_checker + + +def is_suspended() -> bool: + return bool(jingrow.db.get_single_value("Jcloud Settings", "suspend_builds")) + + +class BuildValidationError(jingrow.ValidationError): + ... + + +def get_build_server(group: str | None = None) -> str | None: + """ + Order of build server selection precedence: + 1. Build Server set on Release Group + 2. Build Server with least active builds + 3. Build Server set in Jcloud Settings + """ + + if group and (server := jingrow.get_value("Release Group", group, "build_server")): + return server + + if server := get_build_server_with_least_active_builds(): + return server + + return jingrow.get_value("Jcloud Settings", None, "build_server") + + +def get_build_server_with_least_active_builds() -> str | None: + build_servers = jingrow.get_all( + "Server", + filters={"use_for_build": True, "status": "Active"}, + pluck="name", + ) + + if not build_servers: + return None + + if len(build_servers) == 1: + return build_servers[0] + + build_count = get_active_build_count_by_build_server() + + # Build server might not be in build_count, or might be inactive + build_count_tuples = [(s, build_count[s]) for s in build_servers] + build_count_tuples.sort(key=lambda x: x[1]) + return build_count_tuples[0][0] + + +def get_active_build_count_by_build_server(): + build_servers = jingrow.get_all( + "Deploy Candidate", + fields=["build_server"], + filters={ + "status": ["in", ["Running", "Preparing"]], + "modified": [">", jingrow.utils.now_datetime() - timedelta(hours=4)], + }, + pluck="build_server", + ) + return Counter(build_servers) diff --git a/jcloud/jcloud/pagetype/deploy_candidate/validations.py b/jcloud/jcloud/pagetype/deploy_candidate/validations.py new file mode 100644 index 0000000..6a373c1 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate/validations.py @@ -0,0 +1,223 @@ +import ast +from pathlib import Path +from typing import TYPE_CHECKING, Optional + +import jingrow +import semantic_version as sv + +from jcloud.jcloud.pagetype.deploy_candidate.utils import ( + PackageManagerFiles, + PackageManagers, + get_will_fail_checker, +) +from jcloud.utils import get_filepath, log_error + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate + from jcloud.jcloud.pagetype.release_group.release_group import ReleaseGroup + + +class PreBuildValidations: + dc: "DeployCandidate" + pmf: PackageManagerFiles + + def __init__(self, dc: "DeployCandidate", pmf: PackageManagerFiles): + self.dc = dc + self.pmf = pmf + + def validate(self): + self._validate_repos() + self._validate_python_requirement() + self._validate_node_requirement() + self._validate_jingrow_dependencies() + self._validate_required_apps() + + def _validate_repos(self): + for app in self.dc.apps: + if jingrow.get_value("App Release", app.release, "invalid_release"): + reason = jingrow.get_value("App Release", app.release, "invalidation_reason") + + # Do not change message without updating deploy_notifications.py + raise Exception( + "Invalid release found", + app.app, + app.hash, + reason, + ) + + def _validate_python_requirement(self): + actual = self.dc.get_dependency_version("python") + for app, pm in self.pmf.items(): + self._validate_python_version(app, actual, pm) + + def _validate_python_version(self, app: str, actual: str, pm: PackageManagers): + expected = (pm["pyproject"] or {}).get("project", {}).get("requires-python") + if expected is None or check_version(actual, expected): + return + + # Do not change args without updating deploy_notifications.py + raise Exception( + "Incompatible Python version found", + app, + actual, + expected, + ) + + def _validate_node_requirement(self): + actual = self.dc.get_dependency_version("node") + for app, pm in self.pmf.items(): + self._validate_node_version(app, actual, pm) + + def _validate_node_version(self, app: str, actual: str, pm: PackageManagers): + for pckj in pm["packagejsons"]: + expected = pckj.get("engines", {}).get("node") + if expected is None or check_version(actual, expected): + continue + + package_name = pckj.get("name") + + # Do not change args without updating deploy_notifications.py + raise Exception( + "Incompatible Node version found", + app, + actual, + expected, + package_name, + ) + + def _validate_jingrow_dependencies(self): + for app, pm in self.pmf.items(): + if (pypr := pm["pyproject"]) is None: + continue + + jingrow_deps = pypr.get("tool", {}).get("bench", {}).get("jingrow-dependencies") + if not jingrow_deps: + continue + + self._check_jingrow_dependencies(app, jingrow_deps) + + def _validate_required_apps(self): + for app, pm in self.pmf.items(): + hooks_path = get_filepath( + pm["repo_path"], + "hooks.py", + 2, + ) + if hooks_path is None: + continue + + try: + required_apps = get_required_apps_from_hookpy(hooks_path) + except Exception: + log_error( + "Failed to get required apps from hooks.py", + hooks_path=hooks_path, + pg=self.dc, + ) + continue + + self._check_required_apps(app, required_apps) + + def _check_required_apps(self, app: str, required_apps: list[str]): + for ra in required_apps: + if self.dc.has_app(ra): + continue + + # Do not change args without updating deploy_notifications.py + raise Exception( + "Required app not found", + app, + ra, + ) + + def _check_jingrow_dependencies(self, app: str, jingrow_deps: dict[str, str]): + for dep_app, expected in jingrow_deps.items(): + actual = self._get_app_version(dep_app) + if not actual or sv.Version(actual) in sv.SimpleSpec(expected): + continue + + # Do not change args without updating deploy_notifications.py + raise Exception( + "Incompatible app version found", + app, + dep_app, + actual, + expected, + ) + + def _get_app_version(self, app: str) -> Optional[str]: + pm = self.pmf.get(app) + if not pm: + return None + + pyproject = pm["pyproject"] or {} + version = pyproject.get("project", {}).get("version") + + if isinstance(version, str): + return version + + init_path = Path(pm["repo_path"]) / app / "__init__.py" + if not init_path.is_file(): + return None + + with init_path.open("r", encoding="utf-8") as init: + for line in init: + if not (line.startswith("__version__ =") or line.startswith("VERSION =")): + continue + + if version := line.split("=")[1].strip().strip("\"'"): + return version + + break + + return None + + +def check_version(actual: str, expected: str) -> bool: + # Python version mentions on jcloud dont mention the patch version. + if actual.count(".") == 1: + actual += ".0" + + sv_actual = sv.Version(actual) + sv_expected = sv.SimpleSpec(expected) + + return sv_actual in sv_expected + + +def get_required_apps_from_hookpy(hooks_path: str) -> list[str]: + """ + Returns required_apps from an app's hooks.py file. + """ + + with open(hooks_path) as f: + hooks = f.read() + + for assign in ast.parse(hooks).body: + if not hasattr(assign, "targets") or not len(assign.targets): + continue + + if not hasattr(assign.targets[0], "id"): + continue + + if not assign.targets[0].id == "required_apps": + continue + + if not isinstance(assign.value, ast.List): + return [] + + return [v.value for v in assign.value.elts] + + return [] + + +def check_if_update_will_fail(rg: "ReleaseGroup", new_dc: "DeployCandidate"): + if not (old_dc := rg.get_last_deploy_candidate()): + return + + if not old_dc.error_key: + return + + if not (checker := get_will_fail_checker(old_dc.error_key)): + return + + checker(old_dc, new_dc) diff --git a/jcloud/jcloud/pagetype/deploy_candidate_app/__init__.py b/jcloud/jcloud/pagetype/deploy_candidate_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/deploy_candidate_app/deploy_candidate_app.json b/jcloud/jcloud/pagetype/deploy_candidate_app/deploy_candidate_app.json new file mode 100644 index 0000000..232e03c --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_app/deploy_candidate_app.json @@ -0,0 +1,118 @@ +{ + "actions": [], + "creation": "2020-01-13 16:20:49.495751", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "app", + "title", + "use_cached", + "column_break_3", + "source", + "app_name", + "release_section", + "release", + "pullable_release", + "column_break_azgf", + "hash", + "pullable_hash" + ], + "fields": [ + { + "fieldname": "release", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Release", + "options": "App Release", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "hash", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Hash", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App", + "options": "App", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "source", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Source", + "options": "App Source", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "app.title", + "fieldname": "title", + "fieldtype": "Data", + "label": "Title", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "app_name", + "fieldtype": "Data", + "label": "App Name", + "read_only": 1 + }, + { + "fieldname": "release_section", + "fieldtype": "Section Break", + "label": "Release" + }, + { + "fieldname": "pullable_release", + "fieldtype": "Link", + "label": "Pullable Release", + "options": "App Release", + "read_only": 1 + }, + { + "fieldname": "column_break_azgf", + "fieldtype": "Column Break" + }, + { + "fieldname": "pullable_hash", + "fieldtype": "Data", + "label": "Pullable Hash", + "read_only": 1 + }, + { + "default": "0", + "description": "Auto set if app is present in cache and deploy candidate can use app cache.", + "fieldname": "use_cached", + "fieldtype": "Check", + "label": "Use Cached", + "read_only": 1 + } + ], + "istable": 1, + "links": [], + "modified": "2024-01-30 12:41:26.703875", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Deploy Candidate App", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/deploy_candidate_app/deploy_candidate_app.py b/jcloud/jcloud/pagetype/deploy_candidate_app/deploy_candidate_app.py new file mode 100644 index 0000000..3577fd3 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_app/deploy_candidate_app.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class DeployCandidateApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + app_name: DF.Data | None + hash: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + pullable_hash: DF.Data | None + pullable_release: DF.Link | None + release: DF.Link + source: DF.Link + title: DF.Data + use_cached: DF.Check + # end: auto-generated types + + dashboard_fields = ["app"] diff --git a/jcloud/jcloud/pagetype/deploy_candidate_app/patches/set_app_name_to_app.py b/jcloud/jcloud/pagetype/deploy_candidate_app/patches/set_app_name_to_app.py new file mode 100644 index 0000000..ff00cab --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_app/patches/set_app_name_to_app.py @@ -0,0 +1,13 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "deploy_candidate_app") + jingrow.db.sql( + """ + UPDATE `tabDeploy Candidate App` + SET app_name = app + """ + ) diff --git a/jcloud/jcloud/pagetype/deploy_candidate_build_step/__init__.py b/jcloud/jcloud/pagetype/deploy_candidate_build_step/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/deploy_candidate_build_step/deploy_candidate_build_step.json b/jcloud/jcloud/pagetype/deploy_candidate_build_step/deploy_candidate_build_step.json new file mode 100644 index 0000000..ae3fc4e --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_build_step/deploy_candidate_build_step.json @@ -0,0 +1,137 @@ +{ + "actions": [], + "creation": "2020-11-26 10:03:32.946618", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "stage", + "stage_slug", + "step", + "step_slug", + "column_break_3", + "status", + "duration", + "cached", + "step_index", + "hash", + "section_break_7", + "command", + "output", + "lines" + ], + "fields": [ + { + "fieldname": "stage", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Stage", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "default": "0", + "fieldname": "cached", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Cached", + "read_only": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Float", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "section_break_7", + "fieldtype": "Section Break" + }, + { + "fieldname": "command", + "fieldtype": "Code", + "label": "Command", + "read_only": 1 + }, + { + "fieldname": "output", + "fieldtype": "Code", + "label": "Output", + "read_only": 1 + }, + { + "fieldname": "stage_slug", + "fieldtype": "Data", + "label": "Stage Slug", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "step", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Step", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "step_slug", + "fieldtype": "Data", + "label": "Step Slug", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "step_index", + "fieldtype": "Int", + "label": "Step Index", + "read_only": 1 + }, + { + "default": "[]", + "fieldname": "lines", + "fieldtype": "Code", + "hidden": 1, + "label": "Lines" + }, + { + "fieldname": "hash", + "fieldtype": "Data", + "label": "Hash", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2021-06-07 11:34:27.499558", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Deploy Candidate Build Step", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/deploy_candidate_build_step/deploy_candidate_build_step.py b/jcloud/jcloud/pagetype/deploy_candidate_build_step/deploy_candidate_build_step.py new file mode 100644 index 0000000..06c3b9c --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_build_step/deploy_candidate_build_step.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.document import Document + + +class DeployCandidateBuildStep(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + cached: DF.Check + command: DF.Code | None + duration: DF.Float + hash: DF.Data | None + lines: DF.Code | None + output: DF.Code | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + stage: DF.Data + stage_slug: DF.Data + status: DF.Literal["Pending", "Running", "Success", "Failure"] + step: DF.Data + step_index: DF.Int + step_slug: DF.Data + # end: auto-generated types + + pass + + +def on_pagetype_update(): + jingrow.db.add_index("Deploy Candidate Build Step", ["stage_slug", "step_slug"]) + jingrow.db.add_index("Deploy Candidate Build Step", ["creation"]) + jingrow.db.add_index("Deploy Candidate Build Step", ["cached"]) diff --git a/jcloud/jcloud/pagetype/deploy_candidate_dependency/__init__.py b/jcloud/jcloud/pagetype/deploy_candidate_dependency/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/deploy_candidate_dependency/deploy_candidate_dependency.json b/jcloud/jcloud/pagetype/deploy_candidate_dependency/deploy_candidate_dependency.json new file mode 100644 index 0000000..2355a0b --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_dependency/deploy_candidate_dependency.json @@ -0,0 +1,38 @@ +{ + "actions": [], + "creation": "2021-05-18 18:28:47.923629", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "dependency", + "version" + ], + "fields": [ + { + "fieldname": "dependency", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Dependency", + "reqd": 1 + }, + { + "fieldname": "version", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Version", + "reqd": 1 + } + ], + "istable": 1, + "links": [], + "modified": "2021-05-18 18:28:47.923629", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Deploy Candidate Dependency", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/deploy_candidate_dependency/deploy_candidate_dependency.py b/jcloud/jcloud/pagetype/deploy_candidate_dependency/deploy_candidate_dependency.py new file mode 100644 index 0000000..2f8a1ed --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_dependency/deploy_candidate_dependency.py @@ -0,0 +1,24 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class DeployCandidateDependency(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + dependency: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + version: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/deploy_candidate_difference/__init__.py b/jcloud/jcloud/pagetype/deploy_candidate_difference/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/deploy_candidate_difference/deploy_candidate_difference.js b/jcloud/jcloud/pagetype/deploy_candidate_difference/deploy_candidate_difference.js new file mode 100644 index 0000000..06d1c6f --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_difference/deploy_candidate_difference.js @@ -0,0 +1,24 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Deploy Candidate Difference', { + onload: function (frm) { + frm.set_query('source', function () { + return { + filters: { + group: frm.pg.group, + }, + }; + }); + frm.set_query('destination', function () { + return { + filters: { + group: frm.pg.group, + }, + }; + }); + }, + // refresh: function(frm) { + + // } +}); diff --git a/jcloud/jcloud/pagetype/deploy_candidate_difference/deploy_candidate_difference.json b/jcloud/jcloud/pagetype/deploy_candidate_difference/deploy_candidate_difference.json new file mode 100644 index 0000000..1365fd4 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_difference/deploy_candidate_difference.json @@ -0,0 +1,120 @@ +{ + "actions": [], + "creation": "2020-04-06 13:24:05.363904", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "group", + "team", + "deploy_type", + "column_break_3", + "source", + "destination", + "section_break_6", + "apps" + ], + "fields": [ + { + "fetch_from": "source.group", + "fieldname": "group", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Release Group", + "options": "Release Group", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "source", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Source Candidate", + "options": "Deploy Candidate", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "destination", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Destination Candidate", + "options": "Deploy Candidate", + "reqd": 1, + "set_only_once": 1 + }, + { + "default": "Pull", + "fieldname": "deploy_type", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Deploy Type", + "options": "Pull\nMigrate", + "set_only_once": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_6", + "fieldtype": "Section Break" + }, + { + "fieldname": "apps", + "fieldtype": "Table", + "label": "Apps", + "options": "Deploy Candidate Difference App", + "read_only": 1 + }, + { + "fetch_from": "group.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1, + "reqd": 1 + } + ], + "links": [], + "modified": "2021-02-15 11:25:01.985359", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Deploy Candidate Difference", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "group", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/deploy_candidate_difference/deploy_candidate_difference.py b/jcloud/jcloud/pagetype/deploy_candidate_difference/deploy_candidate_difference.py new file mode 100644 index 0000000..8e57c1c --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_difference/deploy_candidate_difference.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.core.utils import find +from jingrow.model.document import Document + +from jcloud.overrides import get_permission_query_conditions_for_pagetype + + +class DeployCandidateDifference(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.deploy_candidate_difference_app.deploy_candidate_difference_app import ( + DeployCandidateDifferenceApp, + ) + + apps: DF.Table[DeployCandidateDifferenceApp] + deploy_type: DF.Literal["Pull", "Migrate"] + destination: DF.Link + group: DF.Link + source: DF.Link + team: DF.Link + # end: auto-generated types + + def validate(self): + if self.source == self.destination: + jingrow.throw( + "Destination Candidate must be different from Source Candidate", + jingrow.ValidationError, + ) + + source_creation = jingrow.db.get_value("Deploy Candidate", self.source, "creation") + destination_creation = jingrow.db.get_value( + "Deploy Candidate", self.destination, "creation" + ) + if source_creation > destination_creation: + jingrow.throw( + "Destination Candidate must be created after Source Candidate", + jingrow.ValidationError, + ) + + if jingrow.get_all( + "Deploy Candidate Difference", + filters={ + "group": self.group, + "source": self.source, + "destination": self.destination, + "name": ("!=", self.name), + }, + ): + jingrow.throw( + "Deploy Candidate Difference already exists for Release Group: {} " + ", Source Release: {} and Destination Release: {}".format( + self.group, self.source, self.destination + ), + jingrow.ValidationError, + ) + + self.populate_apps_table() + + def populate_apps_table(self): + source_candidate = jingrow.get_pg("Deploy Candidate", self.source) + destination_candidate = jingrow.get_pg("Deploy Candidate", self.destination) + for destination in destination_candidate.apps: + source = find(source_candidate.apps, lambda x: x.app == destination.app) + if not source or source.release == destination.release: + continue + differences = jingrow.get_all( + "App Release Difference", + ["name"], + {"source_release": source.release, "destination_release": destination.release}, + limit=1, + ) + if not differences: + difference = jingrow.get_pg( + { + "pagetype": "App Release Difference", + "app": destination.app, + "source": destination.source, + "source_release": source.release, + "destination_release": destination.release, + } + ) + difference.insert() + difference.set_deploy_type() + else: + difference = jingrow.get_pg( + "App Release Difference", differences[0].name, for_update=True + ) + difference.reload() + difference.set_deploy_type() + self.append( + "apps", + { + "app": destination.app, + "destination_release": destination.release, + "source_release": source.release, + "difference": difference.name, + "deploy_type": difference.deploy_type, + }, + ) + + if difference.deploy_type == "Migrate": + self.deploy_type = "Migrate" + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype( + "Deploy Candidate Difference" +) diff --git a/jcloud/jcloud/pagetype/deploy_candidate_difference/test_deploy_candidate_difference.py b/jcloud/jcloud/pagetype/deploy_candidate_difference/test_deploy_candidate_difference.py new file mode 100644 index 0000000..a99aa54 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_difference/test_deploy_candidate_difference.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest +from unittest.mock import Mock, patch + +from jcloud.jcloud.pagetype.deploy.deploy import create_deploy_candidate_differences + + +@patch("jcloud.jcloud.pagetype.deploy.deploy.jingrow.db.commit", new=Mock()) +def create_test_deploy_candidate_differences(*args, **kwargs): + return create_deploy_candidate_differences(*args, **kwargs) + + +class TestDeployCandidateDifference(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/deploy_candidate_difference_app/__init__.py b/jcloud/jcloud/pagetype/deploy_candidate_difference_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/deploy_candidate_difference_app/deploy_candidate_difference_app.json b/jcloud/jcloud/pagetype/deploy_candidate_difference_app/deploy_candidate_difference_app.json new file mode 100644 index 0000000..3a84b6b --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_difference_app/deploy_candidate_difference_app.json @@ -0,0 +1,75 @@ +{ + "actions": [], + "creation": "2020-04-06 13:23:22.345482", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "app", + "deploy_type", + "difference", + "column_break_3", + "source_release", + "destination_release" + ], + "fields": [ + { + "default": "Pull", + "fieldname": "deploy_type", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Deploy Type", + "options": "Pull\nMigrate", + "read_only": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "source_release", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Source Release", + "options": "App Release", + "read_only": 1 + }, + { + "fieldname": "destination_release", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Destination Release", + "options": "App Release", + "read_only": 1 + }, + { + "fieldname": "difference", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Difference", + "options": "App Release Difference", + "read_only": 1 + }, + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "App", + "options": "App", + "read_only": 1, + "reqd": 1 + } + ], + "istable": 1, + "links": [], + "modified": "2020-12-15 22:42:32.347582", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Deploy Candidate Difference App", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/deploy_candidate_difference_app/deploy_candidate_difference_app.py b/jcloud/jcloud/pagetype/deploy_candidate_difference_app/deploy_candidate_difference_app.py new file mode 100644 index 0000000..6c0484b --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_difference_app/deploy_candidate_difference_app.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.document import Document + + +class DeployCandidateDifferenceApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + deploy_type: DF.Literal["Pull", "Migrate"] + destination_release: DF.Link | None + difference: DF.Link | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + source_release: DF.Link | None + # end: auto-generated types + + dashboard_fields = ["difference", "app"] + + def get_list_query(query): + apps = query.run(as_dict=True) + for app in apps: + app["source_tag"] = jingrow.db.get_value( + "App Tag", {"hash": app["source_hash"]}, "tag" + ) + app["destination_tag"] = jingrow.db.get_value( + "App Tag", {"hash": app["destination_hash"]}, "tag" + ) + return apps diff --git a/jcloud/jcloud/pagetype/deploy_candidate_package/__init__.py b/jcloud/jcloud/pagetype/deploy_candidate_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/deploy_candidate_package/deploy_candidate_package.json b/jcloud/jcloud/pagetype/deploy_candidate_package/deploy_candidate_package.json new file mode 100644 index 0000000..77518b5 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_package/deploy_candidate_package.json @@ -0,0 +1,59 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-05-12 15:27:48.023378", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "package_manager", + "package_prerequisites", + "column_break_jhn9", + "package", + "after_install" + ], + "fields": [ + { + "default": "apt", + "fieldname": "package_manager", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Package Manager", + "reqd": 1 + }, + { + "fieldname": "package", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Package", + "reqd": 1 + }, + { + "fieldname": "column_break_jhn9", + "fieldtype": "Column Break" + }, + { + "fieldname": "package_prerequisites", + "fieldtype": "Text", + "label": "Package Prerequisites" + }, + { + "fieldname": "after_install", + "fieldtype": "Text", + "label": "After Install" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-12-14 16:22:57.324933", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Deploy Candidate Package", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/deploy_candidate_package/deploy_candidate_package.py b/jcloud/jcloud/pagetype/deploy_candidate_package/deploy_candidate_package.py new file mode 100644 index 0000000..95a5798 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_package/deploy_candidate_package.py @@ -0,0 +1,26 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class DeployCandidatePackage(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + after_install: DF.Text | None + package: DF.Data + package_manager: DF.Data + package_prerequisites: DF.Text | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/deploy_candidate_variable/__init__.py b/jcloud/jcloud/pagetype/deploy_candidate_variable/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/deploy_candidate_variable/deploy_candidate_variable.json b/jcloud/jcloud/pagetype/deploy_candidate_variable/deploy_candidate_variable.json new file mode 100644 index 0000000..88e53ac --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_variable/deploy_candidate_variable.json @@ -0,0 +1,41 @@ +{ + "actions": [], + "creation": "2023-06-13 16:16:51.962602", + "default_view": "List", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "key", + "value" + ], + "fields": [ + { + "fieldname": "key", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Key", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "value", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Value", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-06-13 19:07:58.471385", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Deploy Candidate Variable", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/deploy_candidate_variable/deploy_candidate_variable.py b/jcloud/jcloud/pagetype/deploy_candidate_variable/deploy_candidate_variable.py new file mode 100644 index 0000000..95cd5c8 --- /dev/null +++ b/jcloud/jcloud/pagetype/deploy_candidate_variable/deploy_candidate_variable.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class DeployCandidateVariable(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + key: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + value: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/drip_email/__init__.py b/jcloud/jcloud/pagetype/drip_email/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/drip_email/drip_email.js b/jcloud/jcloud/pagetype/drip_email/drip_email.js new file mode 100644 index 0000000..7143fea --- /dev/null +++ b/jcloud/jcloud/pagetype/drip_email/drip_email.js @@ -0,0 +1,6 @@ +// Copyright (c) 2016, Web Notes and contributors +// For license information, please see license.txt + +jingrow.ui.form.on('Drip Email', { + refresh: function (frm) {}, +}); diff --git a/jcloud/jcloud/pagetype/drip_email/drip_email.json b/jcloud/jcloud/pagetype/drip_email/drip_email.json new file mode 100644 index 0000000..0c133e9 --- /dev/null +++ b/jcloud/jcloud/pagetype/drip_email/drip_email.json @@ -0,0 +1,227 @@ +{ + "actions": [], + "allow_import": 1, + "allow_rename": 1, + "autoname": "drip_email.####", + "creation": "2016-02-29 04:37:05.108042", + "pagetype": "PageType", + "document_type": "Setup", + "engine": "InnoDB", + "field_order": [ + "enabled", + "email_type", + "saas_app", + "subject", + "column_break_7", + "send_by_consultant", + "sender_name", + "sender", + "reply_to", + "pre_header", + "section_break_ehlw", + "condition", + "column_break_jext", + "html_pzsv", + "section_break_9", + "content_type", + "message_html", + "message_markdown", + "message_rich_text", + "section_break_4", + "skip_sites_with_paid_plan", + "send_after", + "send_after_payment", + "column_break_2", + "section_break_25", + "module_setup_guide" + ], + "fields": [ + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "default": "Drip", + "fieldname": "email_type", + "fieldtype": "Select", + "label": "Email Type", + "options": "Drip\nSign Up\nSubscription Activation\nWhitepaper Feedback\nOnboarding" + }, + { + "fieldname": "subject", + "fieldtype": "Small Text", + "in_list_view": 1, + "label": "Subject", + "reqd": 1 + }, + { + "fieldname": "column_break_7", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "send_by_consultant", + "fieldtype": "Check", + "label": "Send By Consultant" + }, + { + "fieldname": "sender_name", + "fieldtype": "Data", + "label": "Sender Name", + "reqd": 1 + }, + { + "fieldname": "sender", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Sender", + "options": "Email", + "reqd": 1 + }, + { + "fieldname": "reply_to", + "fieldtype": "Data", + "label": "Reply To", + "options": "Email" + }, + { + "fieldname": "section_break_9", + "fieldtype": "Section Break", + "label": "Content" + }, + { + "fieldname": "section_break_4", + "fieldtype": "Section Break" + }, + { + "fieldname": "send_after", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Send After (Days)", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "send_after_payment", + "fieldtype": "Check", + "label": "Send After Payment" + }, + { + "default": "(1-7)", + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "pre_header", + "fieldtype": "Data", + "label": "Pre Header" + }, + { + "fieldname": "section_break_25", + "fieldtype": "Section Break" + }, + { + "fieldname": "module_setup_guide", + "fieldtype": "Table", + "label": "Module Setup Guide", + "options": "Module Setup Guide" + }, + { + "fieldname": "saas_app", + "fieldtype": "Link", + "label": "Saas App", + "options": "Marketplace App" + }, + { + "default": "0", + "fieldname": "skip_sites_with_paid_plan", + "fieldtype": "Check", + "label": "Skip Sites With Paid Plan" + }, + { + "fieldname": "condition", + "fieldtype": "Code", + "label": "Condition" + }, + { + "fieldname": "html_pzsv", + "fieldtype": "HTML", + "options": "

Condition Examples:

\n
pg.status==\"Open\"
account_request.country==\"Spain\"
pg.total > 40000\n
\n\n

App pg is available as app, Account Request as account_request and the current pg as just pg" + }, + { + "fieldname": "section_break_ehlw", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_jext", + "fieldtype": "Column Break" + }, + { + "fieldname": "content_type", + "fieldtype": "Select", + "label": "Content Type", + "options": "Rich Text\nMarkdown\nHTML" + }, + { + "depends_on": "eval: pg.content_type === 'Markdown'", + "fieldname": "message_markdown", + "fieldtype": "Markdown Editor", + "in_list_view": 1, + "label": "Message (Markdown)" + }, + { + "depends_on": "eval: pg.content_type === 'Rich Text'", + "fieldname": "message_rich_text", + "fieldtype": "Text Editor", + "in_list_view": 1, + "label": "Message (Rich Text)" + }, + { + "depends_on": "eval: pg.content_type === 'HTML'", + "fieldname": "message_html", + "fieldtype": "HTML Editor", + "in_list_view": 1, + "label": "Message (HTML)" + } + ], + "icon": "icon-envelope", + "links": [], + "modified": "2024-11-25 09:50:47.777689", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Drip Email", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Consultant", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "ASC", + "states": [], + "title_field": "subject" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/drip_email/drip_email.py b/jcloud/jcloud/pagetype/drip_email/drip_email.py new file mode 100644 index 0000000..5d7e9bc --- /dev/null +++ b/jcloud/jcloud/pagetype/drip_email/drip_email.py @@ -0,0 +1,254 @@ +# Copyright (c) 2015, Web Notes and contributors +# For license information, please see license.txt + +from __future__ import annotations + +from datetime import timedelta + +import jingrow +import rq +import rq.exceptions +import rq.timeouts +from jingrow.model.document import Document +from jingrow.utils.make_random import get_random + +from jcloud.utils import log_error + + +class DripEmail(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.module_setup_guide.module_setup_guide import ModuleSetupGuide + + condition: DF.Code | None + content_type: DF.Literal["Rich Text", "Markdown", "HTML"] + email_type: DF.Literal[ + "Drip", "Sign Up", "Subscription Activation", "Whitepaper Feedback", "Onboarding" + ] + enabled: DF.Check + message_html: DF.HTMLEditor | None + message_markdown: DF.MarkdownEditor | None + message_rich_text: DF.TextEditor | None + module_setup_guide: DF.Table[ModuleSetupGuide] + pre_header: DF.Data | None + reply_to: DF.Data | None + saas_app: DF.Link | None + send_after: DF.Int + send_after_payment: DF.Check + send_by_consultant: DF.Check + sender: DF.Data + sender_name: DF.Data + skip_sites_with_paid_plan: DF.Check + subject: DF.SmallText + # end: auto-generated types + + def send(self, site_name=None): + if self.evaluate_condition(site_name) and self.email_type in ["Drip", "Sign Up"] and site_name: + self.send_drip_email(site_name) + + def send_drip_email(self, site_name): + site = jingrow.get_pg("Site", site_name) + if self.email_type == "Drip" and site.status in ["Pending", "Broken"]: + return + + if not self.send_after_payment and site.has_paid: + return + + account_request = jingrow.get_pg("Account Request", site.account_request) + + if self.send_by_consultant: + consultant = self.select_consultant(site) + else: + consultant = "" + + self.send_mail( + context=dict( + full_name=account_request.full_name, + email=account_request.email, + domain=site.name, + consultant=consultant, + site=site, + account_request=account_request, + ), + recipient=account_request.email, + ) + + def send_mail(self, context, recipient): + # build the message + message = jingrow.render_template(self.message, context) + title = jingrow.db.get_value("Marketplace App", self.saas_app, "title") + + # add to queue + jingrow.sendmail( + subject=self.subject, + recipients=[recipient], + sender=f"{self.sender_name} <{self.sender}>", + reply_to=self.reply_to, + reference_pagetype="Drip Email", + reference_name=self.name, + unsubscribe_message="Don't send me help messages", + attachments=self.get_setup_guides(context.get("account_request", "")), + template="drip_email", + args={"message": message, "title": title}, + ) + + @property + def message(self): + if self.content_type == "Markdown": + return jingrow.utils.md_to_html(self.message_markdown) + if self.content_type == "Rich Text": + return self.message_rich_text + return self.message_html + + def evaluate_condition(self, site_name: str) -> bool: + """ + Evaluate the condition to check if the email should be sent. + """ + if not self.condition: + return True + + saas_app = jingrow.get_pg("Marketplace App", self.saas_app) + site_account_request = jingrow.db.get_value("Site", site_name, "account_request") + account_request = jingrow.get_pg("Account Request", site_account_request) + + eval_locals = dict( + app=saas_app, + pg=self, + account_request=account_request, + ) + + return jingrow.safe_eval(self.condition, None, eval_locals) + + def select_consultant(self, site) -> str: + """ + Select random JERP Consultant to send email. + + Also set sender details. + """ + if not site.jerp_consultant: + # set a random consultant for the site for the first time + site.jerp_consultant = get_random("JERP Consultant", dict(active=1)) + jingrow.db.set_value("Site", site.name, "jerp_consultant", site.jerp_consultant) + + consultant = jingrow.get_pg("JERP Consultant", site.jerp_consultant) + self.sender = consultant.name + self.sender_name = consultant.full_name + return consultant + + def get_setup_guides(self, account_request) -> list[dict[str, str]]: + if not account_request: + return [] + + attachments = [] + for guide in self.module_setup_guide: + if account_request.industry == guide.industry: + attachments.append( + jingrow.db.get_value("File", {"file_url": guide.setup_guide}, ["name as fid"], as_dict=1) + ) + + return attachments + + @property + def sites_to_send_drip(self): + signup_date = jingrow.utils.getdate() - timedelta(days=self.send_after) + + conditions = "" + + if self.saas_app: + conditions += f'AND site.standby_for = "{self.saas_app}"' + + if self.skip_sites_with_paid_plan: + paid_site_plans = jingrow.get_all( + "Site Plan", {"enabled": True, "is_trial_plan": False, "document_type": "Site"}, pluck="name" + ) + + if paid_site_plans: + paid_site_plans_str = ", ".join(f"'{plan}'" for plan in paid_site_plans) + conditions += f" AND site.plan NOT IN ({paid_site_plans_str})" + + sites = jingrow.db.sql( + f""" + SELECT + site.name + FROM + tabSite site + JOIN + `tabAccount Request` account_request + ON + site.account_request = account_request.name + WHERE + site.status = "Active" AND + DATE(account_request.creation) = "{signup_date}" + {conditions} + """ + ) + return [t[0] for t in sites] # site names + + def send_to_sites(self): + sites = self.sites_to_send_drip + for site in sites: + try: + # TODO: only send `Onboarding` mails to partners <19-04-21, Balamurali M> # + self.send(site) + jingrow.db.commit() + except rq.timeouts.JobTimeoutException: + log_error( + "Drip Email Timeout", + drip_email=self.name, + site=site, + total_sites=len(self.sites), + ) + jingrow.db.rollback() + return + except Exception: + jingrow.db.rollback() + log_error("Drip Email Error", drip_email=self.name, site=site) + + +def send_drip_emails(): + """Send out enabled drip emails.""" + drip_emails = jingrow.db.get_all( + "Drip Email", {"enabled": 1, "email_type": ("in", ("Drip", "Onboarding"))} + ) + for drip_email_name in drip_emails: + jingrow.enqueue_pg( + "Drip Email", + drip_email_name, + "send_to_sites", + queue="long", + job_id=f"drip_email_send_to_sites:{drip_email_name}", + deduplicate=True, + ) + + +def send_welcome_email(): + """Send welcome email to sites created in last 15 minutes.""" + welcome_drips = jingrow.db.get_all("Drip Email", {"email_type": "Sign Up", "enabled": 1}, pluck="name") + for drip in welcome_drips: + welcome_email = jingrow.get_pg("Drip Email", drip) + _15_mins_ago = jingrow.utils.add_to_date(None, minutes=-15) + tuples = jingrow.db.sql( + f""" + SELECT + site.name + FROM + tabSite site + JOIN + `tabAccount Request` account_request + ON + site.account_request = account_request.name + WHERE + site.status = "Active" and + site.standby_for = "{welcome_email.saas_app}" and + account_request.creation > "{_15_mins_ago}" + """ + ) + sites_in_last_15_mins = [t[0] for t in tuples] + for site in sites_in_last_15_mins: + welcome_email.send(site) diff --git a/jcloud/jcloud/pagetype/drip_email/patches/set_correct_field_for_html.py b/jcloud/jcloud/pagetype/drip_email/patches/set_correct_field_for_html.py new file mode 100644 index 0000000..601f2be --- /dev/null +++ b/jcloud/jcloud/pagetype/drip_email/patches/set_correct_field_for_html.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + jingrow.reload_pagetype("Drip Email") + jingrow.db.sql("UPDATE `tabDrip Email` SET message_html = message, content_type = 'HTML'") diff --git a/jcloud/jcloud/pagetype/drip_email/templates/pre_header.html b/jcloud/jcloud/pagetype/drip_email/templates/pre_header.html new file mode 100644 index 0000000..30d7ab0 --- /dev/null +++ b/jcloud/jcloud/pagetype/drip_email/templates/pre_header.html @@ -0,0 +1,39 @@ + + {{ pre_header }} + + + +  ‌ ‌ ‌ ‌ +  ‌  ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + ‌ ‌ ‌ ‌  + \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/drip_email/test_drip_email.py b/jcloud/jcloud/pagetype/drip_email/test_drip_email.py new file mode 100644 index 0000000..6a270e9 --- /dev/null +++ b/jcloud/jcloud/pagetype/drip_email/test_drip_email.py @@ -0,0 +1,124 @@ +# Copyright (c) 2015, Web Notes and Contributors +# See license.txt + +from __future__ import annotations + +import unittest +from datetime import date, timedelta +from typing import TYPE_CHECKING + +import jingrow + +from jcloud.jcloud.pagetype.account_request.test_account_request import ( + create_test_account_request, +) +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.marketplace_app.test_marketplace_app import ( + create_test_marketplace_app, +) +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.site_plan_change.test_site_plan_change import create_test_plan + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.drip_email.drip_email import DripEmail + + +def create_test_drip_email( + send_after: int, saas_app: str | None = None, skip_sites_with_paid_plan: bool = False +) -> DripEmail: + drip_email = jingrow.get_pg( + { + "pagetype": "Drip Email", + "sender": "test@test.com", + "sender_name": "Test User", + "subject": "Drip Test", + "message": "Drip Top, Drop Top", + "send_after": send_after, + "saas_app": saas_app, + "skip_sites_with_paid_plan": skip_sites_with_paid_plan, + } + ).insert(ignore_if_duplicate=True) + drip_email.reload() + return drip_email + + +class TestDripEmail(unittest.TestCase): + def setUp(self) -> None: + self.trial_site_plan = create_test_plan("Site", is_trial_plan=True) + self.paid_site_plan = create_test_plan("Site", is_trial_plan=False) + + def tearDown(self): + jingrow.db.rollback() + + def test_correct_sites_are_selected_for_drip_email(self): + test_app = create_test_app() + test_marketplace_app = create_test_marketplace_app(test_app.name) + + drip_email = create_test_drip_email(0, saas_app=test_marketplace_app.name) + + site1 = create_test_site( + "site1", + standby_for=test_marketplace_app.name, + account_request=create_test_account_request( + "site1", saas=True, saas_app=test_marketplace_app.name + ).name, + ) + site1.save() + + site2 = create_test_site("site2", account_request=create_test_account_request("site2").name) + site2.save() + + create_test_site("site3") # Note: site is not created + + self.assertEqual(drip_email.sites_to_send_drip, [site1.name]) + + def test_older_site_isnt_selected(self): + drip_email = create_test_drip_email(0) + site = create_test_site("site1") + site.account_request = create_test_account_request("site1", creation=date.today() - timedelta(1)).name + site.save() + self.assertNotEqual(drip_email.sites_to_send_drip, [site.name]) + + def test_drip_emails_not_sent_to_sites_with_paid_plan_having_special_flag(self): + """ + If you enable `skip_sites_with_paid_plan` flag, drip emails should not be sent to sites with paid plan set + No matter whether they have paid for any invoice or not + """ + test_app = create_test_app() + test_marketplace_app = create_test_marketplace_app(test_app.name) + + drip_email = create_test_drip_email( + 0, saas_app=test_marketplace_app.name, skip_sites_with_paid_plan=True + ) + + site1 = create_test_site( + "site1", + standby_for=test_marketplace_app.name, + account_request=create_test_account_request( + "site1", saas=True, saas_app=test_marketplace_app.name + ).name, + plan=self.trial_site_plan.name, + ) + site1.save() + + site2 = create_test_site( + "site2", + standby_for=test_marketplace_app.name, + account_request=create_test_account_request( + "site2", saas=True, saas_app=test_marketplace_app.name + ).name, + plan=self.paid_site_plan.name, + ) + site2.save() + + site3 = create_test_site( + "site3", + standby_for=test_marketplace_app.name, + account_request=create_test_account_request( + "site3", saas=True, saas_app=test_marketplace_app.name + ).name, + plan=self.trial_site_plan.name, + ) + site3.save() + + self.assertEqual(drip_email.sites_to_send_drip, [site1.name, site3.name]) diff --git a/jcloud/jcloud/pagetype/github_webhook_log/__init__.py b/jcloud/jcloud/pagetype/github_webhook_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/github_webhook_log/github_webhook_log.js b/jcloud/jcloud/pagetype/github_webhook_log/github_webhook_log.js new file mode 100644 index 0000000..70c7ca9 --- /dev/null +++ b/jcloud/jcloud/pagetype/github_webhook_log/github_webhook_log.js @@ -0,0 +1,8 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('GitHub Webhook Log', { + // refresh: function(frm) { + + // } +}); diff --git a/jcloud/jcloud/pagetype/github_webhook_log/github_webhook_log.json b/jcloud/jcloud/pagetype/github_webhook_log/github_webhook_log.json new file mode 100644 index 0000000..0a5c0f1 --- /dev/null +++ b/jcloud/jcloud/pagetype/github_webhook_log/github_webhook_log.json @@ -0,0 +1,134 @@ +{ + "actions": [], + "autoname": "Prompt", + "creation": "2020-05-25 18:47:51.890465", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "event", + "column_break_2", + "signature", + "data_4", + "repository", + "repository_owner", + "column_break_7", + "git_reference_type", + "branch", + "tag", + "github_installation_id", + "data_10", + "payload" + ], + "fields": [ + { + "fieldname": "event", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Event", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "signature", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Signature", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "payload", + "fieldtype": "Code", + "label": "Payload", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "repository", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Repository", + "read_only": 1 + }, + { + "fieldname": "repository_owner", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Repository Owner", + "read_only": 1 + }, + { + "fieldname": "git_reference_type", + "fieldtype": "Select", + "label": "Git Reference Type", + "options": "tag\nbranch", + "read_only": 1 + }, + { + "fieldname": "branch", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Branch", + "read_only": 1 + }, + { + "fieldname": "tag", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Tag", + "read_only": 1 + }, + { + "fieldname": "data_4", + "fieldtype": "Section Break" + }, + { + "fieldname": "data_10", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_7", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "github_installation_id", + "fieldtype": "Data", + "label": "GitHub Installation ID", + "read_only": 1 + } + ], + "in_create": 1, + "links": [], + "modified": "2021-01-18 10:13:12.602898", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "GitHub Webhook Log", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/github_webhook_log/github_webhook_log.py b/jcloud/jcloud/pagetype/github_webhook_log/github_webhook_log.py new file mode 100644 index 0000000..7fcecb1 --- /dev/null +++ b/jcloud/jcloud/pagetype/github_webhook_log/github_webhook_log.py @@ -0,0 +1,248 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import hashlib +import hmac +import json +from typing import TYPE_CHECKING, Optional + +import jingrow +from jingrow.model.document import Document +from jingrow.query_builder import Interval +from jingrow.query_builder.functions import Now + +from jcloud.utils import log_error + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.app_source.app_source import AppSource + + +class GitHubWebhookLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + branch: DF.Data | None + event: DF.Data + git_reference_type: DF.Literal["tag", "branch"] + github_installation_id: DF.Data | None + payload: DF.Code + repository: DF.Data | None + repository_owner: DF.Data | None + signature: DF.Data + tag: DF.Data | None + # end: auto-generated types + + def validate(self): + secret = jingrow.db.get_single_value("Jcloud Settings", "github_webhook_secret") + digest = hmac.HMAC(secret.encode(), self.payload.encode(), hashlib.sha1) + if not hmac.compare_digest(digest.hexdigest(), self.signature): + jingrow.throw("Invalid Signature") + + payload = self.get_parsed_payload() + self.github_installation_id = payload.get("installation", {}).get("id") + + repository_detail = get_repository_details_from_payload(payload) + self.repository = repository_detail["name"] + self.repository_owner = repository_detail["owner"] + + if self.event == "push": + ref_types = {"tags": "tag", "heads": "branch"} + self.git_reference_type = ref_types[payload.ref.split("/")[1]] + ref = payload.ref.split("/", 2)[2] + if self.git_reference_type == "tag": + self.tag = ref + elif self.git_reference_type == "branch": + self.branch = ref + elif self.event == "create": + self.git_reference_type = payload.ref_type + if self.git_reference_type == "tag": + self.tag = payload.ref + elif self.git_reference_type == "branch": + self.branch = payload.ref + + self.payload = json.dumps(payload, indent=4, sort_keys=True) + + def handle_events(self): + if self.event == "push": + self.handle_push_event() + elif self.event == "installation": + self.handle_installation_event() + elif self.event == "installation_repositories": + self.handle_repository_installation_event() + jingrow.db.commit() + + def handle_push_event(self): + payload = self.get_parsed_payload() + if self.git_reference_type == "branch": + self.create_app_releases(payload) + elif self.git_reference_type == "tag": + self.create_app_tag(payload) + + def handle_installation_event(self): + payload = self.get_parsed_payload() + action = payload.get("action") + if action == "created" or action == "unsuspend": + self.handle_installation_created(payload) + elif action == "deleted" or action == "suspend": + self.handle_installation_deletion(payload) + + def handle_repository_installation_event(self): + payload = self.get_parsed_payload() + if payload["action"] not in ["added", "removed"]: + return + owner = payload["installation"]["account"]["login"] + self.update_installation_ids(owner) + + for repo in payload.get("repositories_removed", []): + set_uninstalled(owner, repo["name"]) + + def handle_installation_created(self, payload): + owner = payload["installation"]["account"]["login"] + self.update_installation_ids(owner) + + def handle_installation_deletion(self, payload): + owner = payload["installation"]["account"]["login"] + repositories = payload.get("repositories", []) + + for repo in repositories: + set_uninstalled(owner, repo["name"]) + + if len(repositories) == 0: + # Set all sources as uninstalled + set_uninstalled(owner) + + def update_installation_ids(self, owner: str): + for name in get_sources(owner): + pg: "AppSource" = jingrow.get_pg("App Source", name) + if not self.should_update_app_source(pg): + continue + + self.update_app_source_installation_id(pg) + + def update_app_source_installation_id(self, pg: "AppSource"): + pg.github_installation_id = self.github_installation_id + """ + These two are assumptions, they will be resolved when + `pg.create_release` is called. + + It is not called here, because it requires polling GitHub + which if the repository owner has several apps gets us + rate limited. + """ + pg.uninstalled = False + pg.last_github_poll_failed = False + pg.db_update() + + def should_update_app_source(self, pg: "AppSource"): + if pg.uninstalled or pg.last_github_poll_failed: + return True + + return pg.github_installation_id != self.github_installation_id + + def get_parsed_payload(self): + return jingrow.parse_json(self.payload) + + def create_app_releases(self, payload): + sources = jingrow.db.get_all( + "App Source", + filters={ + "branch": self.branch, + "repository": self.repository, + "repository_owner": self.repository_owner, + "enabled": 1, + }, + fields=["name", "app"], + ) + + commit = payload.get("head_commit", {}) + if len(sources) == 0 or not commit or not commit.get("id"): + return + + for source in sources: + try: + create_app_release(source.name, source.app, commit) + except Exception: + log_error("App Release Creation Error", payload=payload, pg=self) + + def create_app_tag(self, payload): + commit = payload.get("head_commit", {}) + if not commit or not commit.get("id"): + return + + tag = jingrow.get_pg( + { + "pagetype": "App Tag", + "tag": self.tag, + "hash": commit.get("id"), + "timestamp": commit.get("timestamp"), + "repository": self.repository, + "repository_owner": self.repository_owner, + "github_installation_id": self.github_installation_id, + } + ) + + try: + tag.insert() + except Exception: + log_error("App Tag Creation Error", payload=payload, pg=self) + + @staticmethod + def clear_old_logs(days=30): + table = jingrow.qb.PageType("GitHub Webhook Log") + jingrow.db.delete(table, filters=(table.creation < (Now() - Interval(days=days)))) + + +def set_uninstalled(owner: str, repository: Optional[str] = None): + for name in get_sources(owner, repository): + jingrow.db.set_value("App Source", name, "uninstalled", True) + + +def get_sources(owner: str, repository: Optional[str] = None) -> "list[str]": + filters = {"repository_owner": owner} + if repository: + filters["repository"] = repository + + return jingrow.db.get_all( + "App Source", + filters=filters, + pluck="name", + ) + + +def get_repository_details_from_payload(payload: dict): + r = payload.get("repository", {}) + repo = r.get("name") + owner = r.get("owner", {}).get("login") + + repos = payload.get("repositories_added", []) + if not repo and len(repos) == 1: + repo = repos[0].get("name") + + if not owner and repos: + owner = repos[0].get("full_name", "").split("/")[0] or None + + if not owner: + owner = payload.get("installation", {}).get("account", {}).get("login") + + return dict(name=repo, owner=owner) + + +def create_app_release(source: str, app: str, commit: dict): + release = jingrow.get_pg( + { + "pagetype": "App Release", + "app": app, + "source": source, + "hash": commit.get("id"), + "message": commit.get("message", "MESSAGE NOT FOUND"), + "author": commit.get("author", {}).get("name", "AUTHOR NOT FOUND"), + } + ) + release.insert(ignore_permissions=True) diff --git a/jcloud/jcloud/pagetype/github_webhook_log/test_github_webhook_log.py b/jcloud/jcloud/pagetype/github_webhook_log/test_github_webhook_log.py new file mode 100644 index 0000000..855da87 --- /dev/null +++ b/jcloud/jcloud/pagetype/github_webhook_log/test_github_webhook_log.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + +import unittest + + +class TestGitHubWebhookLog(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/incident/__init__.py b/jcloud/jcloud/pagetype/incident/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/incident/incident.js b/jcloud/jcloud/pagetype/incident/incident.js new file mode 100644 index 0000000..18a0b7a --- /dev/null +++ b/jcloud/jcloud/pagetype/incident/incident.js @@ -0,0 +1,23 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Incident', { + refresh(frm) { + [[__('Ignore Incidents on Server'), 'ignore_for_server']].forEach( + ([label, method, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()} this site?`, + () => frm.call(method).then((r) => frm.refresh()), + ); + }, + __('Actions'), + ); + } + }, + ); + }, +}); diff --git a/jcloud/jcloud/pagetype/incident/incident.json b/jcloud/jcloud/pagetype/incident/incident.json new file mode 100644 index 0000000..03c88f6 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident/incident.json @@ -0,0 +1,235 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-10-16 18:45:05.744563", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "alerts_tab", + "phone_call", + "alert", + "status", + "type", + "subtype", + "acknowledged_by", + "column_break_smnd", + "server", + "resource_type", + "resource", + "cluster", + "resolved_by", + "section_break_kjey", + "subject", + "description", + "preliminary_investigation_section", + "likely_cause", + "column_break_jrzi", + "suggestions", + "preventive_suggestions", + "section_break_bjjy", + "updates", + "updates_tab", + "alerts", + "section_break_aevb", + "column_break_rbwa", + "route", + "sms_sent", + "show_in_website" + ], + "fields": [ + { + "fieldname": "server", + "fieldtype": "Link", + "label": "Server", + "options": "Server" + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster" + }, + { + "fieldname": "column_break_smnd", + "fieldtype": "Column Break" + }, + { + "default": "Validating", + "fieldname": "status", + "fieldtype": "Select", + "label": "Status", + "options": "Validating\nConfirmed\nAcknowledged\nInvestigating\nResolved\nAuto-Resolved\nJcloud-Resolved" + }, + { + "default": "Database Down", + "fieldname": "type", + "fieldtype": "Select", + "label": "Type", + "options": "Database Down\nServer Down\nProxy Down" + }, + { + "fieldname": "section_break_bjjy", + "fieldtype": "Section Break" + }, + { + "fieldname": "alerts", + "fieldtype": "Table", + "label": "Alerts", + "options": "Incident Alerts" + }, + { + "default": "0", + "fieldname": "sms_sent", + "fieldtype": "Check", + "label": "SMS Sent" + }, + { + "fieldname": "alerts_tab", + "fieldtype": "Tab Break", + "label": "Overview" + }, + { + "fieldname": "updates_tab", + "fieldtype": "Tab Break", + "label": "Alerts" + }, + { + "fieldname": "acknowledged_by", + "fieldtype": "Link", + "label": "Acknowledged By", + "mandatory_depends_on": "eval: pg.status==\"Acknowledged\";", + "options": "User" + }, + { + "fieldname": "column_break_rbwa", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_aevb", + "fieldtype": "Section Break" + }, + { + "fieldname": "updates", + "fieldtype": "Table", + "label": "Updates", + "options": "Incident Updates" + }, + { + "fieldname": "route", + "fieldtype": "Data", + "label": "Route" + }, + { + "default": "0", + "fieldname": "show_in_website", + "fieldtype": "Check", + "label": "Show in Website" + }, + { + "fieldname": "section_break_kjey", + "fieldtype": "Section Break", + "label": "Details" + }, + { + "fieldname": "subject", + "fieldtype": "Data", + "label": "Subject" + }, + { + "fieldname": "description", + "fieldtype": "Text Editor", + "label": "Description" + }, + { + "fieldname": "resolved_by", + "fieldtype": "Link", + "label": "Resolved By", + "options": "User" + }, + { + "default": "1", + "fieldname": "phone_call", + "fieldtype": "Check", + "label": "Phone Call" + }, + { + "fieldname": "alert", + "fieldtype": "Link", + "label": "Alert", + "options": "Prometheus Alert Rule" + }, + { + "fieldname": "resource_type", + "fieldtype": "Link", + "label": "Resource Type", + "options": "PageType" + }, + { + "fieldname": "resource", + "fieldtype": "Dynamic Link", + "label": "Resource", + "options": "resource_type" + }, + { + "fieldname": "subtype", + "fieldtype": "Select", + "label": "Subtype", + "options": "High CPU: user\nHigh CPU: iowait\nDisk full" + }, + { + "fieldname": "preliminary_investigation_section", + "fieldtype": "Section Break", + "label": "Preliminary Investigation" + }, + { + "fieldname": "likely_cause", + "fieldtype": "Text", + "label": "Likely Causes" + }, + { + "fieldname": "column_break_jrzi", + "fieldtype": "Column Break" + }, + { + "fieldname": "suggestions", + "fieldtype": "Table", + "label": "Corrective Suggestions", + "options": "Incident Suggestion" + }, + { + "fieldname": "preventive_suggestions", + "fieldtype": "Table", + "label": "Preventive Suggestions", + "options": "Incident Suggestion" + } + ], + "has_web_view": 1, + "index_web_pages_for_search": 1, + "is_published_field": "show_in_website", + "links": [], + "modified": "2025-01-17 18:37:16.040363", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Incident", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "route": "incidents", + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/incident/incident.py b/jcloud/jcloud/pagetype/incident/incident.py new file mode 100644 index 0000000..3cb08f1 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident/incident.py @@ -0,0 +1,677 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +from base64 import b64encode +from datetime import timedelta +from functools import cached_property +from typing import TYPE_CHECKING + +import jingrow +from jingrow.types.DF import Phone +from jingrow.utils import cint +from jingrow.utils.background_jobs import enqueue_pg +from jingrow.utils.synchronization import filelock +from jingrow.website.website_generator import WebsiteGenerator +from playwright.sync_api import Page, sync_playwright +from tenacity import RetryError, retry, stop_after_attempt, wait_fixed +from tenacity.retry import retry_if_not_result +from twilio.base.exceptions import TwilioRestException + +from jcloud.api.server import prometheus_query +from jcloud.telegram_utils import Telegram +from jcloud.utils import log_error + +if TYPE_CHECKING: + from twilio.rest.api.v2010.account.call import CallInstance + + from jcloud.jcloud.pagetype.incident_settings.incident_settings import IncidentSettings + from jcloud.jcloud.pagetype.incident_settings_self_hosted_user.incident_settings_self_hosted_user import ( + IncidentSettingsSelfHostedUser, + ) + from jcloud.jcloud.pagetype.incident_settings_user.incident_settings_user import ( + IncidentSettingsUser, + ) + from jcloud.jcloud.pagetype.monitor_server.monitor_server import MonitorServer + from jcloud.jcloud.pagetype.jcloud_settings.jcloud_settings import JcloudSettings + +INCIDENT_ALERT = "Sites Down" # TODO: make it a field or child table somewhere # +INCIDENT_SCOPE = ( + "server" # can be bench, cluster, server, etc. Not site, minor code changes required for that +) + +DAY_HOURS = range(9, 18) +CONFIRMATION_THRESHOLD_SECONDS_DAY = 5 * 60 # 5 minutes;time after which humans are called +CONFIRMATION_THRESHOLD_SECONDS_NIGHT = ( + 10 * 60 # 10 minutes; time after which humans are called +) +CALL_THRESHOLD_SECONDS_DAY = 0 # 0 minutes;time after which humans are called +CALL_THRESHOLD_SECONDS_NIGHT = ( + 15 * 60 # 15 minutes; time after confirmation after which humans are called +) +CALL_REPEAT_INTERVAL_DAY = 15 * 60 +CALL_REPEAT_INTERVAL_NIGHT = 20 * 60 +PAST_ALERT_COVER_MINUTES = 15 # to cover alerts that fired before/triggered the incident + + +class Incident(WebsiteGenerator): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.incident_alerts.incident_alerts import IncidentAlerts + from jcloud.jcloud.pagetype.incident_suggestion.incident_suggestion import IncidentSuggestion + from jcloud.jcloud.pagetype.incident_updates.incident_updates import IncidentUpdates + + acknowledged_by: DF.Link | None + alert: DF.Link | None + alerts: DF.Table[IncidentAlerts] + cluster: DF.Link | None + description: DF.TextEditor | None + likely_cause: DF.Text | None + phone_call: DF.Check + preventive_suggestions: DF.Table[IncidentSuggestion] + resolved_by: DF.Link | None + resource: DF.DynamicLink | None + resource_type: DF.Link | None + route: DF.Data | None + server: DF.Link | None + show_in_website: DF.Check + sms_sent: DF.Check + status: DF.Literal[ + "Validating", + "Confirmed", + "Acknowledged", + "Investigating", + "Resolved", + "Auto-Resolved", + "Jcloud-Resolved", + ] + subject: DF.Data | None + subtype: DF.Literal["High CPU: user", "High CPU: iowait", "Disk full"] + suggestions: DF.Table[IncidentSuggestion] + type: DF.Literal["Database Down", "Server Down", "Proxy Down"] + updates: DF.Table[IncidentUpdates] + # end: auto-generated types + + def validate(self): + if not hasattr(self, "phone_call") and self.global_phone_call_enabled: + self.phone_call = True + + @property + def global_phone_call_enabled(self) -> bool: + return bool(jingrow.db.get_single_value("Incident Settings", "phone_call_alerts", cache=True)) + + @property + def global_email_alerts_enabled(self) -> bool: + return bool(jingrow.db.get_single_value("Incident Settings", "email_alerts", cache=True)) + + def after_insert(self): + self.send_sms_via_twilio() + self.send_email_notification() + + def on_update(self): + if self.has_value_changed("status"): + self.send_email_notification() + + def vcpu(self, server_type, server_name): + vm_name = jingrow.db.get_value(server_type, server_name, "virtual_machine") + return int( + jingrow.db.get_value("Virtual Machine", vm_name, "vcpu") or 16 + ) # 16 as DO and scaleway servers have high CPU; Add a CPU field everywhere later + + @cached_property + def database_server(self): + return str(jingrow.db.get_value("Server", self.server, "database_server")) + + @cached_property + def proxy_server(self): + return str(jingrow.db.get_value("Server", self.server, "proxy_server")) + + def get_load(self, name) -> float: + timespan = get_confirmation_threshold_duration() + load = prometheus_query( + f"""avg_over_time(node_load5{{instance="{name}", job="node"}}[{timespan}s])""", + lambda x: x, + "Asia/Kolkata", + timespan, + timespan + 1, + )["datasets"] + if load == []: + ret = -1 # no response + else: + ret = load[0]["values"][-1] + self.add_description(f"{name} load avg(5m): {ret if ret != -1 else 'No data'}") + return ret + + def check_high_load(self, resource_type: str, resource: str): + load = self.get_load(resource) + if load < 0: # no response, likely down + return resource_type, resource + if load > 3 * self.vcpu(resource_type, resource): + return resource_type, resource + return False, False + + def identify_affected_resource(self): + """ + Identify the affected resource and set the resource field + """ + + for resource_type, resource in [ + ("Database Server", self.database_server), + ("Server", self.server), + ("Proxy Server", self.proxy_server), + ]: + if self.check_high_load(resource_type, resource) != (False, False): + self.resource_type = resource_type + self.resource = resource + return + + def confirm(self): + self.status = "Confirmed" + self.identify_affected_resource() # assume 1 resource; Occam's razor + self.identify_problem() + self.take_grafana_screenshots() + self.save() + + def get_cpu_state(self, resource: str): + timespan = get_confirmation_threshold_duration() + cpu_info = prometheus_query( + f"""avg by (mode)(rate(node_cpu_seconds_total{{instance="{resource}", job="node"}}[{timespan}s])) * 100""", + lambda x: x["mode"], + "Asia/Kolkata", + timespan, + timespan + 1, + )["datasets"] + mode_cpus = {x["name"]: x["values"][-1] for x in cpu_info} or { + "user": -1, + "idle": -1, + "softirq": -1, + "iowait": -1, + } # no info; + max_mode = max(mode_cpus, key=mode_cpus.get) + max_cpu = mode_cpus[max_mode] + self.add_description(f"CPU Usage: {max_mode} {max_cpu if max_cpu > 0 else 'No data'}") + return max_mode, mode_cpus[max_mode] + + def add_description(self, description): + if not self.description: + self.description = "" + self.description += "

" + description + "

" + + def add_corrective_suggestion(self, suggestion): + self.append( + "corrective_suggestions", + { + "suggestion": suggestion, + }, + ) + + def add_preventive_suggestion(self, suggestion): + self.append( + "preventive_suggestions", + { + "suggestion": suggestion, + }, + ) + + def update_user_db_issue(self): + self.subtype = "High CPU: user" + self.likely_causes = "Likely slow queries or many queries." + self.add_corrective_suggestion("Kill long running queries") + self.add_preventive_suggestion("Contact user to reduce queries") + + def update_high_io_db_issue(self): + self.subtype = "High CPU: iowait" + self.likely_causes = "Not enough memory" + self.add_corrective_suggestion("Reboot Server") + self.add_preventive_suggestion("Upgrade database server for more memory") + + def categorize_db_issues(self, cpu_state): + self.type = "Database Down" + if cpu_state == "user": + self.update_user_db_issue() + elif cpu_state == "iowait": + self.update_high_io_db_issue() + + def update_user_server_issue(self): + pass + + def update_high_io_server_issue(self): + pass + + def categorize_server_issues(self, cpu_state): + self.type = "Server Down" + if cpu_state == "user": + self.update_user_server_issue() + elif cpu_state == "iowait": + self.update_high_io_server_issue() + + def identify_problem(self): + if not self.resource: + return + # TODO: Try random shit if resource isn't identified + # Eg: Check mysql up/ docker up/ container up + # Ping site for error code to guess more accurately + # 500 would mean mysql down or bug in app/config + # 502 would mean server/bench down + # 504 overloaded workers + + state, percent = self.get_cpu_state(self.resource) + if state == "idle" or percent < 70: + return + + if self.resource_type == "Database Server": + self.categorize_db_issues(state) + elif self.resource_type == "Server": + self.categorize_server_issues(state) + + # TODO: categorize proxy issues # + + @property + def other_resource(self): + if self.resource_type == "Database Server": + return str(self.server) + if self.resource_type == "Server": + return str(jingrow.db.get_value("Server", self.resource, "database_server")) + return None + + def add_node_exporter_screenshot(self, page: Page, instance: str | None): + if not instance: + return + + page.goto( + f"https://{self.monitor_server.name}{self.monitor_server.node_exporter_dashboard_path}&refresh=5m&var-DS_PROMETHEUS=Prometheus&var-job=node&var-node={instance}&from=now-1h&to=now" + ) + page.wait_for_load_state("networkidle") + + image = b64encode(page.screenshot()).decode("ascii") + self.add_description(f'grafana-image') + + @cached_property + def monitor_server(self) -> MonitorServer: + jcloud_settings: JcloudSettings = jingrow.get_cached_pg("Jcloud Settings") + if not (monitor_url := jcloud_settings.monitor_server): + jingrow.throw("Monitor Server not set in Jcloud Settings") + return jingrow.get_cached_pg("Monitor Server", monitor_url) + + def get_grafana_auth_header(self): + username = str(self.monitor_server.grafana_username) + password = str(self.monitor_server.get_password("grafana_password")) + token = b64encode(f"{username}:{password}".encode()).decode("ascii") + return f"Basic {token}" + + @filelock("grafana_screenshots") # prevent 100 chromes from opening + def take_grafana_screenshots(self): + if not jingrow.db.get_single_value("Incident Settings", "grafana_screenshots"): + return + with sync_playwright() as p: + browser = p.chromium.launch(headless=True, channel="chromium") + page = browser.new_page() + page.set_extra_http_headers({"Authorization": self.get_grafana_auth_header()}) + + self.add_node_exporter_screenshot(page, self.resource or self.server) + self.add_node_exporter_screenshot(page, self.other_resource) + + self.save() + + @jingrow.whitelist() + def ignore_for_server(self): + """ + Ignore incidents on server (Don't call) + """ + jingrow.db.set_value("Server", self.server, "ignore_incidents_since", jingrow.utils.now_datetime()) + + def call_humans(self): + enqueue_pg( + self.pagetype, + self.name, + "_call_humans", + queue="default", + timeout=1800, + enqueue_after_commit=True, + at_front=True, + job_id=f"call_humans:{self.name}", + deduplicate=True, + ) + + def get_humans( + self, + ): + """ + Returns a list of users who are in the incident team + """ + incident_settings: IncidentSettings = jingrow.get_cached_pg("Incident Settings") + users = incident_settings.users + if jingrow.db.exists("Self Hosted Server", {"server": self.server}) or jingrow.db.get_value( + "Server", self.server, "is_self_hosted" + ): + users = incident_settings.self_hosted_users + ret: list[IncidentSettingsUser | IncidentSettingsSelfHostedUser] = users + if self.status == "Acknowledged": # repeat the acknowledged user to be the first + for user in users: + if user.user == self.acknowledged_by: + ret.remove(user) + ret.insert(0, user) + return ret + + @property + def twilio_phone_number(self): + jcloud_settings: JcloudSettings = jingrow.get_cached_pg("Jcloud Settings") + return Phone(jcloud_settings.twilio_phone_number) + + @property + def twilio_client(self): + jcloud_settings: JcloudSettings = jingrow.get_cached_pg("Jcloud Settings") + try: + return jcloud_settings.twilio_client + except Exception: + log_error("Twilio Client not configured in Jcloud Settings") + jingrow.db.commit() + raise + + @retry( + retry=retry_if_not_result( + lambda result: result in ["canceled", "completed", "failed", "busy", "no-answer", "in-progress"] + ), + wait=wait_fixed(1), + stop=stop_after_attempt(30), + ) + def wait_for_pickup(self, call: CallInstance): + return call.fetch().status # will eventually be no-answer + + def notify_unable_to_reach_twilio(self): + telegram = Telegram() + telegram.send( + f"""Unable to reach Twilio for Incident in {self.server} + +Likely due to insufficient balance or incorrect credentials""", + reraise=True, + ) + + def call_human(self, human: IncidentSettingsUser | IncidentSettingsSelfHostedUser): + try: + return self.twilio_client.calls.create( + url="http://demo.twilio.com/docs/voice.xml", + to=human.phone, + from_=self.twilio_phone_number, + ) + except TwilioRestException: + self.notify_unable_to_reach_twilio() + raise + + def _call_humans(self): + if not self.phone_call or not self.global_phone_call_enabled: + return + if ( + ignore_since := jingrow.db.get_value("Server", self.server, "ignore_incidents_since") + ) and ignore_since < jingrow.utils.now_datetime(): + return + for human in self.get_humans(): + if not (call := self.call_human(human)): + return # can't twilio + acknowledged = False + status = str(call.status) + try: + status = str(self.wait_for_pickup(call)) + except RetryError: + status = "timeout" # not Twilio's status; mostly translates to no-answer + else: + if status in ["in-progress", "completed"]: # call was picked up + acknowledged = True + self.status = "Acknowledged" + self.acknowledged_by = human.user + break + finally: + self.add_acknowledgment_update(human, acknowledged=acknowledged, call_status=status) + + def send_sms_via_twilio(self): + """ + Sends an SMS to the members in the Incident team + Uses Twilio for sending the SMS. + Fetches all the Numbers and makes it a generator object for memory efficiency and + Runs them through a loop since Twilio Requires a single API call for + Sending one SMS to one number + Ref: https://support.twilio.com/hc/en-us/articles/223181548-Can-I-set-up-one-API-call-to-send-messages-to-a-list-of-people- + """ + domain = jingrow.db.get_value("Jcloud Settings", None, "domain") + incident_link = f"{domain}{self.get_url()}" + + message_body = f"""New Incident {self.name} Reported + +Hosted on: {self.server} + +Incident URL: {incident_link}""" + for human in self.get_humans(): + self.twilio_client.messages.create( + to=human.phone, from_=self.twilio_phone_number, body=message_body + ) + self.sms_sent = 1 + self.save() + + def send_email_notification(self): + if not self.global_email_alerts_enabled: + return + + if self.status == "Investigating": + return + + # Notifications are only meaningful for incidents that are linked to a server and a team + team = jingrow.db.get_value("Server", self.server, "team") + if (not self.server) or (not team): + return + try: + subject = self.get_email_subject() + message = self.get_email_message() + jingrow.sendmail( + recipients=[jingrow.db.get_value("Team", team, "notify_email")], + subject=subject, + template="incident", + args={ + "message": message, + "link": f"dashboard/servers/{self.server}/analytics/", + }, + now=True, + ) + except Exception: + # Swallow the exception to avoid breaking the Incident creation + log_error("Incident Notification Email Failed") + + def get_email_subject(self): + title = str(jingrow.db.get_value("Server", self.server, "title")) + name = title.removesuffix(" - Application") or self.server + return f"Incident on {name} - {self.alert}" + + def get_email_message(self): + acknowledged_by = "An engineer" + if self.acknowledged_by: + acknowledged_by = jingrow.db.get_value("User", self.acknowledged_by, "first_name") + return { + "Validating": "We are noticing some issues with sites on your server. We are giving it a few minutes to confirm before escalating this incident to our engineers.", + "Auto-Resolved": "Your sites are now up! This incident has been auto-resolved. We will keep monitoring your sites for any further issues.", + "Confirmed": "We are still noticing issues with your sites. We are escalating this incident to our engineers.", + "Acknowledged": f"{acknowledged_by} from our team has acknowledged the incident and is actively investigating. Please allow them some time to diagnose and address the issue.", + "Resolved": f"Your sites are now up! {acknowledged_by} has resolved this incident. We will keep monitoring your sites for any further issues", + }[self.status] + + def add_acknowledgment_update( + self, + human: IncidentSettingsUser | IncidentSettingsSelfHostedUser, + call_status: str | None = None, + acknowledged=False, + ): + """ + Adds a new update to the Incident Document + """ + if acknowledged: + update_note = f"Acknowledged by {human.user}" + else: + update_note = f"Acknowledgement failed for {human.user}" + if call_status: + update_note += f" with call status {call_status}" + self.append( + "updates", + { + "update_note": update_note, + "update_time": jingrow.utils.jingrow.utils.now(), + }, + ) + self.save() + + def set_acknowledgement(self, acknowledged_by): + """ + Sets the Incident status to Acknowledged + """ + self.status = "Acknowledged" + self.acknowledged_by = acknowledged_by + self.save() + + @property + def incident_scope(self): + return getattr(self, INCIDENT_SCOPE) + + def get_last_alert_status_for_each_group(self): + return jingrow.db.sql_list( + f""" +select + last_alert_per_group.status +from + ( + select + name, + status, + group_key, + modified, + ROW_NUMBER() OVER ( + PARTITION BY + `group_key` + ORDER BY + `modified` DESC + ) AS rank + from + `tabAlertmanager Webhook Log` + where + modified >= "{self.creation - timedelta(minutes=PAST_ALERT_COVER_MINUTES)}" + and group_key like "%%{self.incident_scope}%%" + ) last_alert_per_group +where + last_alert_per_group.rank = 1 + """ + ) # status of the sites down in each bench + + def check_resolved(self): + if "Firing" in self.get_last_alert_status_for_each_group(): + # all should be "resolved" for auto-resolve + return + if self.status == "Validating": + self.status = "Auto-Resolved" + else: + self.status = "Resolved" + self.save() + + @property + def time_to_call_for_help(self) -> bool: + return self.status == "Confirmed" and jingrow.utils.now_datetime() - self.creation > timedelta( + seconds=get_confirmation_threshold_duration() + get_call_threshold_duration() + ) + + @property + def time_to_call_for_help_again(self) -> bool: + return self.status == "Acknowledged" and jingrow.utils.now_datetime() - self.modified > timedelta( + seconds=get_call_repeat_interval() + ) + + +def get_confirmation_threshold_duration(): + if jingrow.utils.now_datetime().hour in DAY_HOURS: + return ( + cint(jingrow.db.get_value("Incident Settings", None, "confirmation_threshold_day")) + or CONFIRMATION_THRESHOLD_SECONDS_DAY + ) + return ( + cint(jingrow.db.get_value("Incident Settings", None, "confirmation_threshold_night")) + or CONFIRMATION_THRESHOLD_SECONDS_NIGHT + ) + + +def get_call_threshold_duration(): + if jingrow.utils.now_datetime().hour in DAY_HOURS: + return ( + cint(jingrow.db.get_value("Incident Settings", None, "call_threshold_day")) + or CALL_THRESHOLD_SECONDS_DAY + ) + return ( + cint(jingrow.db.get_value("Incident Settings", None, "call_threshold_night")) + or CALL_THRESHOLD_SECONDS_NIGHT + ) + + +def get_call_repeat_interval(): + if jingrow.utils.now_datetime().hour in DAY_HOURS: + return ( + cint(jingrow.db.get_value("Incident Settings", None, "call_repeat_interval_day")) + or CALL_REPEAT_INTERVAL_DAY + ) + return ( + cint(jingrow.db.get_value("Incident Settings", None, "call_repeat_interval_night")) + or CALL_REPEAT_INTERVAL_NIGHT + ) + + +def validate_incidents(): + validating_incidents = jingrow.get_all( + "Incident", + filters={ + "status": "Validating", + }, + fields=["name", "creation"], + ) + for incident_dict in validating_incidents: + if jingrow.utils.now_datetime() - incident_dict.creation > timedelta( + seconds=get_confirmation_threshold_duration() + ): + incident = Incident("Incident", incident_dict.name) + incident.confirm() + + +def resolve_incidents(): + ongoing_incidents = jingrow.get_all( + "Incident", + filters={ + "status": ("in", ["Validating", "Confirmed", "Acknowledged"]), + }, + pluck="name", + ) + for incident_name in ongoing_incidents: + incident = Incident("Incident", incident_name) + incident.check_resolved() + if incident.time_to_call_for_help or incident.time_to_call_for_help_again: + incident.call_humans() + + +def notify_ignored_servers(): + servers = jingrow.qb.PageType("Server") + if not ( + ignored_servers := jingrow.qb.from_(servers) + .select(servers.name, servers.ignore_incidents_since) + .where(servers.status == "Active") + .where(servers.ignore_incidents_since.isnotnull()) + .run(as_dict=True) + ): + return + + message = "The following servers are being ignored for incidents:\n\n" + for server in ignored_servers: + message += f"{server.name} since {jingrow.utils.pretty_date(server.ignore_incidents_since)}\n" + message += "\n@adityahase @balamurali27 @saurabh6790\n" + telegram = Telegram() + telegram.send(message) + + +def on_pagetype_update(): + jingrow.db.add_index("Incident", ["alert", "server", "status"]) diff --git a/jcloud/jcloud/pagetype/incident/templates/incident.html b/jcloud/jcloud/pagetype/incident/templates/incident.html new file mode 100644 index 0000000..de77f57 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident/templates/incident.html @@ -0,0 +1,30 @@ +{% extends "templates/web.html" %} + +{% block page_content %} +
+
+
+

{{ title }}

+
+
+
+
+

{{status}}

+
+
+
+ + + + + +{% for update in updates %} + + + + +{% endfor %} +
TimeUpdate
{{ update.update_time }}{{ update.update_note }}
+{% endblock %} + + \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/incident/templates/incident_row.html b/jcloud/jcloud/pagetype/incident/templates/incident_row.html new file mode 100644 index 0000000..8ef1ac6 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident/templates/incident_row.html @@ -0,0 +1,4 @@ + + diff --git a/jcloud/jcloud/pagetype/incident/test_incident.py b/jcloud/jcloud/pagetype/incident/test_incident.py new file mode 100644 index 0000000..5c3bb76 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident/test_incident.py @@ -0,0 +1,446 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +from contextlib import suppress +from datetime import datetime, timedelta +from unittest.mock import Mock, patch + +import jingrow +import zoneinfo +from jingrow.tests.utils import JingrowTestCase +from twilio.base.exceptions import TwilioRestException + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.alertmanager_webhook_log.alertmanager_webhook_log import ( + AlertmanagerWebhookLog, +) +from jcloud.jcloud.pagetype.alertmanager_webhook_log.test_alertmanager_webhook_log import ( + create_test_alertmanager_webhook_log, +) +from jcloud.jcloud.pagetype.incident.incident import ( + CALL_REPEAT_INTERVAL_NIGHT, + CALL_THRESHOLD_SECONDS_NIGHT, + CONFIRMATION_THRESHOLD_SECONDS_NIGHT, + Incident, + resolve_incidents, + validate_incidents, +) +from jcloud.jcloud.pagetype.prometheus_alert_rule.test_prometheus_alert_rule import ( + create_test_prometheus_alert_rule, +) +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.team.test_team import create_test_jcloud_admin_team +from jcloud.telegram_utils import Telegram +from jcloud.utils.test import foreground_enqueue_pg + + +class MockTwilioCallInstance: + def __init__(self, sid="test", status="queued"): + self.sid = sid + self.status = status + + def fetch(self): + return self + + +class MockTwilioCallList: + def __init__(self, status="queued", *args, **kwargs): + self.status = status + + def create(self, *args, **kwargs): + return MockTwilioCallInstance(status=self.status) + + +class MockTwilioMessageInstance: + def __init__(self, *args, **kwargs): + pass + + +class MockTwilioMessageList: + def __init__(self, *args, **kwargs): + pass + + def create(self, *args, **kwargs): + return MockTwilioMessageInstance() + + +class MockTwilioClient: + def __init__(self, *args, **kwargs): + pass + + @property + def calls(self): + return MockTwilioCallList() + + @property + def messages(self): + return MockTwilioMessageList() + + +@patch( + "jcloud.jcloud.pagetype.alertmanager_webhook_log.alertmanager_webhook_log.enqueue_pg", + new=foreground_enqueue_pg, +) +@patch.object(AlertmanagerWebhookLog, "send_telegram_notification", new=Mock()) +@patch.object(AlertmanagerWebhookLog, "react", new=Mock()) +@patch("jcloud.jcloud.pagetype.incident.incident.jingrow.db.commit", new=Mock()) +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +@patch("jcloud.jcloud.pagetype.site.site._change_dns_record", new=Mock()) +@patch("jcloud.jcloud.pagetype.jcloud_settings.jcloud_settings.Client", new=MockTwilioClient) +@patch("jcloud.jcloud.pagetype.incident.incident.enqueue_pg", new=foreground_enqueue_pg) +class TestIncident(JingrowTestCase): + def setUp(self): + self.from_ = "+911234567892" + jingrow.db.set_value("Jcloud Settings", None, "twilio_account_sid", "test") + jingrow.db.set_value("Jcloud Settings", None, "twilio_api_key_sid", "test") + jingrow.db.set_value("Jcloud Settings", None, "twilio_api_key_secret", "test") + jingrow.db.set_value("Jcloud Settings", None, "twilio_phone_number", self.from_) + + self._create_test_incident_settings() + + def tearDown(self): + jingrow.db.rollback() + + def _create_test_incident_settings(self): + user1 = create_test_jcloud_admin_team().user + user2 = create_test_jcloud_admin_team().user + self.test_phno_1 = "+911234567890" + self.test_phno_2 = "+911234567891" + jingrow.get_pg( + { + "pagetype": "Incident Settings", + "users": [ + { + "user": user1, + "phone": self.test_phno_1, + }, + { + "user": user2, + "phone": self.test_phno_2, + }, + ], + } + ).insert() + + @patch("tenacity.nap.time", new=Mock()) # no sleep + @patch.object( + MockTwilioCallList, + "create", + wraps=MockTwilioCallList("busy").create, + ) + def test_incident_creation_places_phone_call_to_all_humans_in_incident_team_if_no_one_picks_up( + self, mock_calls_create: Mock + ): + jingrow.get_pg( + { + "pagetype": "Incident", + "alertname": "Test Alert", + } + ).insert().call_humans() + self.assertEqual(mock_calls_create.call_count, 2) + mock_calls_create.assert_any_call( + from_=self.from_, + to=self.test_phno_1, + url="http://demo.twilio.com/docs/voice.xml", + ) + mock_calls_create.assert_any_call( + from_=self.from_, + to=self.test_phno_2, + url="http://demo.twilio.com/docs/voice.xml", + ) + + @patch("tenacity.nap.time", new=Mock()) # no sleep + @patch.object(MockTwilioCallList, "create", wraps=MockTwilioCallList("completed").create) + def test_incident_calls_only_one_person_if_first_person_picks_up(self, mock_calls_create: Mock): + jingrow.get_pg( + { + "pagetype": "Incident", + "alertname": "Test Alert", + } + ).insert().call_humans() + self.assertEqual(mock_calls_create.call_count, 1) + + @patch("tenacity.nap.time", new=Mock()) # no sleep + @patch.object(MockTwilioCallList, "create", wraps=MockTwilioCallList("completed").create) + def test_incident_calls_stop_for_in_progress_state(self, mock_calls_create): + incident = jingrow.get_pg( + { + "pagetype": "Incident", + "alertname": "Test Alert", + } + ).insert() + incident.call_humans() + self.assertEqual(mock_calls_create.call_count, 1) + incident.reload() + self.assertEqual(len(incident.updates), 1) + + @patch("tenacity.nap.time", new=Mock()) # no sleep + @patch.object(MockTwilioCallList, "create", wraps=MockTwilioCallList("ringing").create) + def test_incident_calls_next_person_after_retry_limit(self, mock_calls_create): + jingrow.get_pg( + { + "pagetype": "Incident", + "alertname": "Test Alert", + } + ).insert().call_humans() + self.assertEqual(mock_calls_create.call_count, 2) + + @patch("jcloud.jcloud.pagetype.incident.incident.Incident.wait_for_pickup", new=Mock()) + def test_incident_gets_created_on_alert_that_meets_conditions(self): + incident_count = jingrow.db.count("Incident") + create_test_alertmanager_webhook_log() + self.assertEqual(jingrow.db.count("Incident") - incident_count, 1) + + def test_incident_not_created_when_sites_very_less_than_scope_is_down(self): + """1 out of 3 sites on server down""" + incident_count_before = jingrow.db.count("Incident") + site = create_test_site() + create_test_site(server=site.server) + create_test_site(server=site.server) + create_test_alertmanager_webhook_log(site=site) + self.assertEqual(jingrow.db.count("Incident"), incident_count_before) + + def test_incident_created_when_sites_within_scope_is_down(self): + """3 out of 3 sites on server down""" + incident_count_before = jingrow.db.count("Incident") + site = create_test_site() + site2 = create_test_site(server=site.server) + site3 = create_test_site(server=site.server) + create_test_alertmanager_webhook_log(site=site) + create_test_alertmanager_webhook_log(site=site2) + create_test_alertmanager_webhook_log(site=site3) + self.assertEqual(jingrow.db.count("Incident") - incident_count_before, 1) + + @patch("tenacity.nap.time", new=Mock()) # no sleep + def test_call_event_creates_acknowledgement_update(self): + with patch.object(MockTwilioCallList, "create", new=MockTwilioCallList("completed").create): + incident = jingrow.get_pg( + { + "pagetype": "Incident", + "alertname": "Test Alert", + } + ).insert() + incident.call_humans() + incident.reload() + self.assertEqual(incident.status, "Acknowledged") + self.assertEqual(len(incident.updates), 1) + with patch.object(MockTwilioCallList, "create", new=MockTwilioCallList("no-answer").create): + incident = jingrow.get_pg( + { + "pagetype": "Incident", + "alertname": "Test Alert", + } + ).insert() + incident.call_humans() + incident.reload() + self.assertEqual(len(incident.updates), 2) + + @patch("tenacity.nap.time", new=Mock()) # no sleep + @patch.object(MockTwilioCallList, "create", wraps=MockTwilioCallList("completed").create) + def test_global_phone_call_alerts_disabled_wont_create_phone_calls(self, mock_calls_create): + jingrow.db.set_value("Incident Settings", None, "phone_call_alerts", 0) + jingrow.get_pg( + { + "pagetype": "Incident", + "alertname": "Test Alert", + } + ).insert().call_humans() + mock_calls_create.assert_not_called() + jingrow.get_pg( + { + "pagetype": "Incident", + "alertname": "Test Alert", + "phone_call": False, + } + ).insert().call_humans() + mock_calls_create.assert_not_called() + jingrow.db.set_value("Incident Settings", None, "phone_call_alerts", 1) + jingrow.get_pg( + { + "pagetype": "Incident", + "alertname": "Test Alert", + "phone_call": False, + } + ).insert().call_humans() + mock_calls_create.assert_not_called() + + def test_duplicate_incidents_arent_created_for_same_alert(self): + incident_count_before = jingrow.db.count("Incident") + site = create_test_site() + site2 = create_test_site(server=site.server) + create_test_alertmanager_webhook_log(site=site) + create_test_alertmanager_webhook_log(site=site2) + self.assertEqual(jingrow.db.count("Incident") - 1, incident_count_before) + site3 = create_test_site() # new server + create_test_alertmanager_webhook_log(site=site3) + self.assertEqual(jingrow.db.count("Incident") - 2, incident_count_before) + + @patch.object( + MockTwilioMessageList, + "create", + wraps=MockTwilioMessageList().create, + ) + def test_incident_creation_sends_text_message(self, mock_messages_create: Mock): + jingrow.get_pg( + { + "pagetype": "Incident", + "alertname": "Test Alert", + } + ).insert() + self.assertEqual(mock_messages_create.call_count, 2) + + def test_incident_gets_auto_resolved_when_resolved_alerts_fire(self): + site = create_test_site() + alert = create_test_prometheus_alert_rule() + create_test_alertmanager_webhook_log(site=site, alert=alert, status="firing") + incident = jingrow.get_last_pg("Incident") + self.assertEqual(incident.status, "Validating") + create_test_alertmanager_webhook_log(site=site, alert=alert, status="resolved") + resolve_incidents() + incident.reload() + self.assertEqual(incident.status, "Auto-Resolved") + + def test_incident_does_not_auto_resolve_when_other_alerts_are_still_firing(self): + site = create_test_site() + site2 = create_test_site(server=site.server) + alert = create_test_prometheus_alert_rule() + create_test_alertmanager_webhook_log(site=site, alert=alert, status="firing") # 50% sites down + incident = jingrow.get_last_pg("Incident") + self.assertEqual(incident.status, "Validating") + create_test_alertmanager_webhook_log(site=site2, status="firing") # other site down, nothing resolved + resolve_incidents() + incident.reload() + self.assertEqual(incident.status, "Validating") + create_test_alertmanager_webhook_log( + site=site2, status="resolved" + ) # other site resolved, first site still down + resolve_incidents() + incident.reload() + self.assertEqual(incident.status, "Validating") + create_test_alertmanager_webhook_log(site=site, status="resolved") + resolve_incidents() + incident.reload() + self.assertEqual(incident.status, "Auto-Resolved") + + def test_threshold_field_is_checked_before_calling(self): + create_test_alertmanager_webhook_log() + incident = jingrow.get_last_pg("Incident") + incident.db_set("creation", jingrow.utils.add_to_date(jingrow.utils.now(), minutes=-1)) + validate_incidents() + incident.reload() + self.assertEqual(incident.status, "Validating") # default min threshold is 5 mins + incident.db_set("creation", jingrow.utils.add_to_date(jingrow.utils.now(), minutes=-17)) + validate_incidents() + incident.reload() + self.assertEqual(incident.status, "Confirmed") + incident.db_set("status", "Validating") + incident.db_set("creation", jingrow.utils.add_to_date(jingrow.utils.now(), minutes=-19)) + jingrow.db.set_value("Incident Settings", None, "confirmation_threshold_day", str(21 * 60)) + jingrow.db.set_value("Incident Settings", None, "confirmation_threshold_night", str(21 * 60)) + validate_incidents() + incident.reload() + self.assertEqual(incident.status, "Validating") + + @patch.object(MockTwilioCallList, "create", wraps=MockTwilioCallList("completed").create) + def test_calls_repeated_for_acknowledged_incidents(self, mock_calls_create): + create_test_alertmanager_webhook_log() + incident = jingrow.get_last_pg("Incident") + incident.db_set("status", "Acknowledged") + resolve_incidents() + mock_calls_create.assert_not_called() + incident.reload() # datetime conversion + incident.db_set( + "modified", + incident.modified - timedelta(seconds=CALL_REPEAT_INTERVAL_NIGHT + 10), + update_modified=False, + ) # assume night interval is longer + resolve_incidents() + mock_calls_create.assert_called_once() + + def test_repeat_call_calls_acknowledging_person_first(self): + create_test_alertmanager_webhook_log( + creation=jingrow.utils.add_to_date( + jingrow.utils.now(), minutes=-CONFIRMATION_THRESHOLD_SECONDS_NIGHT + ) + ) + incident = jingrow.get_last_pg("Incident") + incident.db_set("status", "Confirmed") + incident.db_set( + "creation", + incident.creation + - timedelta(seconds=CONFIRMATION_THRESHOLD_SECONDS_NIGHT + CALL_THRESHOLD_SECONDS_NIGHT + 10), + ) + with patch.object( + MockTwilioCallList, + "create", + side_effect=[ + MockTwilioCallList("busy").create(), + MockTwilioCallList("completed").create(), + ], + ) as mock_calls_create: + resolve_incidents() # second guy picks up + incident.reload() + incident.db_set( + "modified", + incident.modified - timedelta(seconds=CALL_REPEAT_INTERVAL_NIGHT + 10), + update_modified=False, + ) + with patch.object( + MockTwilioCallList, "create", wraps=MockTwilioCallList("completed").create + ) as mock_calls_create: + resolve_incidents() + mock_calls_create.assert_called_with( + to=self.test_phno_2, from_=self.from_, url="http://demo.twilio.com/docs/voice.xml" + ) + + @patch.object(Telegram, "send") + def test_telegram_message_is_sent_when_unable_to_reach_twilio(self, mock_telegram_send): + create_test_alertmanager_webhook_log() + incident = jingrow.get_last_pg("Incident") + with patch.object( + MockTwilioCallList, "create", side_effect=TwilioRestException("test", 500) + ), suppress(TwilioRestException): + incident.call_humans() + mock_telegram_send.assert_called_once() + + def get_5_min_load_avg_prometheus_response(self, load_avg: float): + return { + "datasets": [ + { + "name": { + "__name__": "node_load5", + "cluster": "Default", + "instance": "n1.local.jingrow.dev", + "job": "node", + }, + "values": [load_avg], + } + ], + "labels": [ + datetime(2025, 1, 17, 12, 40, 41, 241000, tzinfo=zoneinfo.ZoneInfo(key="Asia/Kolkata")), + ], + } + + def test_high_load_avg_on_resource_makes_it_affected(self): + create_test_alertmanager_webhook_log() + incident: Incident = jingrow.get_last_pg("Incident") + with patch( + "jcloud.jcloud.pagetype.incident.incident.prometheus_query", + side_effect=[ + self.get_5_min_load_avg_prometheus_response(2.0), + self.get_5_min_load_avg_prometheus_response(32.0), + self.get_5_min_load_avg_prometheus_response(2.0), + ], + ): + incident.identify_affected_resource() + self.assertEqual(incident.resource, incident.server) + self.assertEqual(incident.resource_type, "Server") + + def test_no_response_from_monitor_on_resource_makes_it_affected(self): + create_test_alertmanager_webhook_log() + incident: Incident = jingrow.get_last_pg("Incident") + incident.identify_affected_resource() + self.assertEqual( + incident.resource, jingrow.get_value("Server", incident.server, "database_server") + ) # database is checked first because history + self.assertEqual(incident.resource_type, "Database Server") diff --git a/jcloud/jcloud/pagetype/incident_alerts/__init__.py b/jcloud/jcloud/pagetype/incident_alerts/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/incident_alerts/incident_alerts.json b/jcloud/jcloud/pagetype/incident_alerts/incident_alerts.json new file mode 100644 index 0000000..cb15c68 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_alerts/incident_alerts.json @@ -0,0 +1,48 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-10-17 13:25:42.597749", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "alert", + "alert_type", + "combined_alerts" + ], + "fields": [ + { + "fieldname": "alert", + "fieldtype": "Link", + "label": "Alert", + "options": "Alertmanager Webhook Log" + }, + { + "fetch_from": "alert.combined_alerts", + "fieldname": "combined_alerts", + "fieldtype": "Data", + "label": "Combined Alerts", + "read_only": 1 + }, + { + "fetch_from": "alert.alert", + "fieldname": "alert_type", + "fieldtype": "Data", + "label": "Alert Type", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-10-18 10:40:59.417560", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Incident Alerts", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/incident_alerts/incident_alerts.py b/jcloud/jcloud/pagetype/incident_alerts/incident_alerts.py new file mode 100644 index 0000000..4f7356a --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_alerts/incident_alerts.py @@ -0,0 +1,25 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class IncidentAlerts(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + alert: DF.Link | None + alert_type: DF.Data | None + combined_alerts: DF.Data | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/incident_settings/__init__.py b/jcloud/jcloud/pagetype/incident_settings/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/incident_settings/incident_settings.js b/jcloud/jcloud/pagetype/incident_settings/incident_settings.js new file mode 100644 index 0000000..dd18f6f --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_settings/incident_settings.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Incident Settings", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/incident_settings/incident_settings.json b/jcloud/jcloud/pagetype/incident_settings/incident_settings.json new file mode 100644 index 0000000..41a970d --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_settings/incident_settings.json @@ -0,0 +1,135 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-12-14 09:23:55.912233", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "users", + "self_hosted_users", + "section_break_rnxb", + "enable_incident_detection", + "phone_call_alerts", + "email_alerts", + "grafana_screenshots", + "column_break_ehby", + "confirmation_threshold_day", + "call_threshold_day", + "call_repeat_interval_day", + "column_break_voyg", + "confirmation_threshold_night", + "call_threshold_night", + "call_repeat_interval_night" + ], + "fields": [ + { + "fieldname": "users", + "fieldtype": "Table", + "label": "Users", + "options": "Incident Settings User" + }, + { + "default": "1", + "fieldname": "phone_call_alerts", + "fieldtype": "Check", + "label": "Phone Call Alerts" + }, + { + "default": "1", + "fieldname": "enable_incident_detection", + "fieldtype": "Check", + "label": "Enable Incident Detection" + }, + { + "fieldname": "self_hosted_users", + "fieldtype": "Table", + "label": "Self Hosted Users", + "options": "Incident Settings Self Hosted User" + }, + { + "fieldname": "section_break_rnxb", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_voyg", + "fieldtype": "Column Break" + }, + { + "fieldname": "confirmation_threshold_day", + "fieldtype": "Duration", + "hide_days": 1, + "label": "Confirmation Threshold Day" + }, + { + "fieldname": "confirmation_threshold_night", + "fieldtype": "Duration", + "hide_days": 1, + "label": "Confirmation Threshold Night" + }, + { + "fieldname": "call_threshold_day", + "fieldtype": "Duration", + "hide_days": 1, + "label": "Call Threshold Day" + }, + { + "fieldname": "call_threshold_night", + "fieldtype": "Duration", + "hide_days": 1, + "label": "Call Threshold Night" + }, + { + "fieldname": "call_repeat_interval_day", + "fieldtype": "Duration", + "hide_days": 1, + "label": "Call Repeat Interval Day" + }, + { + "fieldname": "call_repeat_interval_night", + "fieldtype": "Duration", + "hide_days": 1, + "label": "Call Repeat Interval Night" + }, + { + "fieldname": "column_break_ehby", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "email_alerts", + "fieldtype": "Check", + "label": "Email Alerts" + }, + { + "default": "0", + "fieldname": "grafana_screenshots", + "fieldtype": "Check", + "label": "Grafana Screenshots" + } + ], + "index_web_pages_for_search": 1, + "issingle": 1, + "links": [], + "modified": "2025-01-24 11:07:55.944210", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Incident Settings", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "print": 1, + "read": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/incident_settings/incident_settings.py b/jcloud/jcloud/pagetype/incident_settings/incident_settings.py new file mode 100644 index 0000000..8d8ee60 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_settings/incident_settings.py @@ -0,0 +1,38 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from __future__ import annotations + +from jingrow.model.document import Document + + +class IncidentSettings(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.incident_settings_self_hosted_user.incident_settings_self_hosted_user import ( + IncidentSettingsSelfHostedUser, + ) + from jcloud.jcloud.pagetype.incident_settings_user.incident_settings_user import IncidentSettingsUser + + call_repeat_interval_day: DF.Duration | None + call_repeat_interval_night: DF.Duration | None + call_threshold_day: DF.Duration | None + call_threshold_night: DF.Duration | None + confirmation_threshold_day: DF.Duration | None + confirmation_threshold_night: DF.Duration | None + email_alerts: DF.Check + enable_incident_detection: DF.Check + grafana_screenshots: DF.Check + phone_call_alerts: DF.Check + self_hosted_users: DF.Table[IncidentSettingsSelfHostedUser] + users: DF.Table[IncidentSettingsUser] + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/incident_settings/test_incident_settings.py b/jcloud/jcloud/pagetype/incident_settings/test_incident_settings.py new file mode 100644 index 0000000..140ae47 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_settings/test_incident_settings.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestIncidentSettings(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/incident_settings_self_hosted_user/__init__.py b/jcloud/jcloud/pagetype/incident_settings_self_hosted_user/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/incident_settings_self_hosted_user/incident_settings_self_hosted_user.json b/jcloud/jcloud/pagetype/incident_settings_self_hosted_user/incident_settings_self_hosted_user.json new file mode 100644 index 0000000..8c01c1a --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_settings_self_hosted_user/incident_settings_self_hosted_user.json @@ -0,0 +1,48 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-01-11 19:05:21.521739", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "user", + "column_break_ggho", + "phone" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Link", + "in_list_view": 1, + "label": "User", + "options": "User", + "reqd": 1 + }, + { + "fetch_from": "user.phone", + "fetch_if_empty": 1, + "fieldname": "phone", + "fieldtype": "Phone", + "in_list_view": 1, + "label": "Phone", + "reqd": 1 + }, + { + "fieldname": "column_break_ggho", + "fieldtype": "Column Break" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-01-11 19:09:17.571549", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Incident Settings Self Hosted User", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/incident_settings_self_hosted_user/incident_settings_self_hosted_user.py b/jcloud/jcloud/pagetype/incident_settings_self_hosted_user/incident_settings_self_hosted_user.py new file mode 100644 index 0000000..42bf6fd --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_settings_self_hosted_user/incident_settings_self_hosted_user.py @@ -0,0 +1,24 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class IncidentSettingsSelfHostedUser(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + phone: DF.Phone + user: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/incident_settings_user/__init__.py b/jcloud/jcloud/pagetype/incident_settings_user/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/incident_settings_user/incident_settings_user.json b/jcloud/jcloud/pagetype/incident_settings_user/incident_settings_user.json new file mode 100644 index 0000000..85a2113 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_settings_user/incident_settings_user.json @@ -0,0 +1,49 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-12-14 09:27:19.932797", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "user", + "column_break_fmmd", + "phone" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Link", + "in_list_view": 1, + "label": "User", + "options": "User", + "reqd": 1 + }, + { + "fieldname": "column_break_fmmd", + "fieldtype": "Column Break" + }, + { + "fetch_from": "user.phone", + "fetch_if_empty": 1, + "fieldname": "phone", + "fieldtype": "Phone", + "in_list_view": 1, + "label": "Phone", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-01-11 19:09:25.518329", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Incident Settings User", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/incident_settings_user/incident_settings_user.py b/jcloud/jcloud/pagetype/incident_settings_user/incident_settings_user.py new file mode 100644 index 0000000..9f912c7 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_settings_user/incident_settings_user.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class IncidentSettingsUser(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + phone: DF.Phone + user: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/incident_suggestion/__init__.py b/jcloud/jcloud/pagetype/incident_suggestion/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/incident_suggestion/incident_suggestion.json b/jcloud/jcloud/pagetype/incident_suggestion/incident_suggestion.json new file mode 100644 index 0000000..4a9653a --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_suggestion/incident_suggestion.json @@ -0,0 +1,44 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-01-17 18:18:35.669380", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "title", + "method_name", + "apply" + ], + "fields": [ + { + "fieldname": "title", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Title" + }, + { + "fieldname": "apply", + "fieldtype": "Button", + "in_list_view": 1, + "label": "Apply" + }, + { + "fieldname": "method_name", + "fieldtype": "Data", + "label": "Method Name" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-01-23 22:23:23.513166", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Incident Suggestion", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/incident_suggestion/incident_suggestion.py b/jcloud/jcloud/pagetype/incident_suggestion/incident_suggestion.py new file mode 100644 index 0000000..99396e2 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_suggestion/incident_suggestion.py @@ -0,0 +1,26 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +# import jingrow +from __future__ import annotations + +from jingrow.model.document import Document + + +class IncidentSuggestion(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + method_name: DF.Data | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + title: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/incident_updates/__init__.py b/jcloud/jcloud/pagetype/incident_updates/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/incident_updates/incident_updates.json b/jcloud/jcloud/pagetype/incident_updates/incident_updates.json new file mode 100644 index 0000000..da7f8f2 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_updates/incident_updates.json @@ -0,0 +1,41 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-10-17 18:01:33.787818", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "update_note", + "update_time" + ], + "fields": [ + { + "columns": 6, + "fieldname": "update_note", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Update Note" + }, + { + "columns": 2, + "default": "now", + "fieldname": "update_time", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "Update Time" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-12-22 07:17:00.449557", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Incident Updates", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/incident_updates/incident_updates.py b/jcloud/jcloud/pagetype/incident_updates/incident_updates.py new file mode 100644 index 0000000..1fda771 --- /dev/null +++ b/jcloud/jcloud/pagetype/incident_updates/incident_updates.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class IncidentUpdates(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + update_note: DF.Data | None + update_time: DF.Datetime | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/invoice/__init__.py b/jcloud/jcloud/pagetype/invoice/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/invoice/fixtures/stripe_payment_intent_succeeded_webhook.json b/jcloud/jcloud/pagetype/invoice/fixtures/stripe_payment_intent_succeeded_webhook.json new file mode 100644 index 0000000..dfe0f13 --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice/fixtures/stripe_payment_intent_succeeded_webhook.json @@ -0,0 +1,166 @@ +{ + "api_version": "2020-03-02", + "cmd": "jcloud.jcloud.pagetype.stripe_webhook_log.stripe_webhook_log.stripe_webhook_handler", + "created": 1606748869, + "data": { + "object": { + "amount": 90000, + "amount_capturable": 0, + "amount_received": 90000, + "application": null, + "application_fee_amount": null, + "canceled_at": null, + "cancellation_reason": null, + "capture_method": "automatic", + "charges": { + "data": [ + { + "amount": 90000, + "amount_captured": 90000, + "amount_refunded": 0, + "application": null, + "application_fee": null, + "application_fee_amount": null, + "balance_transaction": "txn_1HtDyjGjnxV0XKmrzExVjDUA", + "billing_details": { + "address": { + "city": null, + "country": null, + "line1": null, + "line2": null, + "postal_code": null, + "state": null + }, + "email": null, + "name": null, + "phone": null + }, + "calculated_statement_descriptor": "Stripe", + "captured": true, + "created": 1606748869, + "currency": "cny", + "customer": "cus_H3L4w6RXJPKLQs", + "description": null, + "destination": null, + "dispute": null, + "disputed": false, + "failure_code": null, + "failure_message": null, + "fraud_details": {}, + "id": "ch_1HtDyjGjnxV0XKmrM13MbJf2", + "invoice": null, + "livemode": false, + "metadata": { + "gst": "144.36", + "payment_for": "prepaid_credits" + }, + "object": "charge", + "on_behalf_of": null, + "order": null, + "outcome": { + "network_status": "approved_by_network", + "reason": null, + "risk_level": "normal", + "risk_score": 59, + "seller_message": "Payment complete.", + "type": "authorized" + }, + "paid": true, + "payment_intent": "pi_1HtDybGjnxV0XKmrq0F7ktU2", + "payment_method": "pm_1HtDyiGjnxV0XKmrj2LbRgZs", + "payment_method_details": { + "card": { + "brand": "visa", + "checks": { + "address_line1_check": null, + "address_postal_code_check": null, + "cvc_check": "pass" + }, + "country": "US", + "exp_month": 2, + "exp_year": 2022, + "fingerprint": "rbydJcL7HczjZDc9", + "funding": "credit", + "installments": null, + "last4": "4242", + "network": "visa", + "three_d_secure": null, + "wallet": null + }, + "type": "card" + }, + "receipt_email": null, + "receipt_number": null, + "receipt_url": "https://pay.stripe.com/receipts/acct_1GSLsTGjnxV0XKmr/ch_1HtDyjGjnxV0XKmrM13MbJf2/rcpt_IUCNHB5rR5pULHIsx4A73o50XriQTEw", + "refunded": false, + "refunds": { + "data": [], + "has_more": false, + "object": "list", + "total_count": 0, + "url": "/v1/charges/ch_1HtDyjGjnxV0XKmrM13MbJf2/refunds" + }, + "review": null, + "shipping": null, + "source": null, + "source_transfer": null, + "statement_descriptor": null, + "statement_descriptor_suffix": null, + "status": "succeeded", + "transfer_data": null, + "transfer_group": null + } + ], + "has_more": false, + "object": "list", + "total_count": 1, + "url": "/v1/charges?payment_intent=pi_1HtDybGjnxV0XKmrq0F7ktU2" + }, + "client_secret": "pi_1HtDybGjnxV0XKmrq0F7ktU2_secret_zS2HILsiJlMa1SdWcrTWzfZgl", + "confirmation_method": "automatic", + "created": 1606748861, + "currency": "cny", + "customer": "cus_H3L4w6RXJPKLQs", + "description": null, + "id": "pi_1HtDybGjnxV0XKmrq0F7ktU2", + "invoice": null, + "last_payment_error": null, + "livemode": false, + "metadata": { + "gst": "144.36", + "payment_for": "prepaid_credits" + }, + "next_action": null, + "object": "payment_intent", + "on_behalf_of": null, + "payment_method": "pm_1HtDyiGjnxV0XKmrj2LbRgZs", + "payment_method_options": { + "card": { + "installments": null, + "network": null, + "request_three_d_secure": "automatic" + } + }, + "payment_method_types": ["card"], + "receipt_email": null, + "review": null, + "setup_future_usage": null, + "shipping": null, + "source": null, + "statement_descriptor": null, + "statement_descriptor_suffix": null, + "status": "succeeded", + "transfer_data": null, + "transfer_group": null + } + }, + "id": "evt_1HtDykGjnxV0XKmrfG08iI3Z", + "livemode": false, + "object": "event", + "pending_webhooks": 2, + "request": { + "id": "req_Vw5ippANYMNBSZ", + "idempotency_key": null + }, + "type": "payment_intent.succeeded" +} diff --git a/jcloud/jcloud/pagetype/invoice/invoice.js b/jcloud/jcloud/pagetype/invoice/invoice.js new file mode 100644 index 0000000..31ed64b --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice/invoice.js @@ -0,0 +1,181 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Invoice', { + refresh: function (frm) { + if (frm.pg.stripe_invoice_id) { + frm.add_web_link( + `https://dashboard.stripe.com/invoices/${frm.pg.stripe_invoice_id}`, + 'View Stripe Invoice', + ); + } + if (frm.pg.jingrow_invoice) { + frm.add_web_link( + `https://jingrow.com/app/sales-invoice/${frm.pg.jingrow_invoice}`, + 'View Jingrow Invoice', + ); + } + + if (frm.pg.jingrow_partner_order) { + frm.add_web_link( + `https://jingrow.com/app/partner-order/${frm.pg.jingrow_partner_order}`, + 'View Jingrow Partner Order', + ); + } + + if (frm.pg.status == 'Paid' && !frm.pg.jingrow_invoice) { + let btn = frm.add_custom_button('Create Invoice on jingrow.com', () => { + frm + .call({ + pg: frm.pg, + method: 'create_invoice_on_jingrowio', + btn, + }) + .then((r) => { + if (r.message) { + jingrow.msgprint( + `Sales Invoice ${r.message} created successfully.`, + ); + } + frm.refresh(); + }); + }); + } + + if (frm.pg.status == 'Paid' && frm.pg.stripe_invoice_id) { + frm.add_custom_button('Refund Invoice', () => { + let d = new jingrow.ui.Dialog({ + title: 'Refund Invoice', + fields: [ + { + label: 'Reason', + fieldname: 'reason', + fieldtype: 'Data', + }, + ], + primary_action({ reason }) { + if (!reason) { + jingrow.msgprint('Please enter a reason for the refund.'); + return; + } + d.hide(); + jingrow + .call({ + pg: frm.pg, + method: 'refund', + args: { + reason, + }, + btn: d.get_primary_btn(), + }) + .then((r) => { + if (r.message) { + jingrow.msgprint('Refunded successfully.'); + d.hide(); + } + frm.refresh(); + }); + }, + }); + d.show(); + }); + } + + if (frm.pg.status == 'Invoice Created') { + let btn = frm.add_custom_button( + 'Finalize Invoice', + () => { + jingrow.confirm( + "This action will finalize the Stripe Invoice and charge the customer's card. Continue?", + () => { + frm + .call({ + pg: frm.pg, + method: 'finalize_stripe_invoice', + btn, + }) + .then((r) => frm.refresh()); + }, + ); + }, + 'Stripe Invoice', + ); + } + + if (frm.pg.stripe_invoice_url) { + let btn = frm.add_custom_button( + 'Refresh Payment Link', + () => { + frm + .call({ + pg: frm.pg, + method: 'refresh_stripe_payment_link', + btn, + }) + .then((r) => { + frm.refresh(); + jingrow.utils.copy_to_clipboard(r.message); + jingrow.msgprint({ + title: 'Stripe Payment Link Updated', + indicator: 'green', + message: 'The Link has been copied to the clipboard.', + }); + }); + }, + 'Stripe Invoice', + ); + } + + if (frm.pg.docstatus == 1 && frm.pg.stripe_invoice_id) { + let btn = frm.add_custom_button( + 'Change Status', + () => { + let d = new jingrow.ui.Dialog({ + title: 'Change Stripe Invoice Status', + fields: [ + { + label: 'Status', + fieldname: 'status', + fieldtype: 'Select', + options: ['Paid', 'Uncollectible', 'Void'], + }, + ], + primary_action({ status }) { + frm + .call({ + pg: frm.pg, + method: 'change_stripe_invoice_status', + args: { + status, + }, + btn, + }) + .then((r) => frm.refresh()); + }, + }); + d.show(); + }, + 'Stripe Invoice', + ); + } + + if (frm.pg.docstatus === 0) { + let btn = frm.add_custom_button('Finalize Invoice', () => + jingrow.confirm( + 'This action will apply credits (if applicable) and generate a Stripe invoice if the amount due is greater than 0. ' + + 'If a Stripe invoice was generated already, it will be voided and a new one will be generated. Continue?', + () => + frm + .call({ + pg: frm.pg, + method: 'finalize_invoice', + btn, + }) + .then(() => { + frm.refresh(); + }), + ), + ); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/invoice/invoice.json b/jcloud/jcloud/pagetype/invoice/invoice.json new file mode 100644 index 0000000..93343f2 --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice/invoice.json @@ -0,0 +1,591 @@ +{ + "actions": [], + "autoname": "format:INV-{YYYY}-{#####}", + "creation": "2020-07-17 18:05:24.317258", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "customer_name", + "customer_email", + "currency", + "partner_email", + "jingrow_partnership_date", + "customer_partnership_date", + "billing_email", + "column_break_4", + "status", + "refund_reason", + "marketplace", + "type", + "period_start", + "period_end", + "due_date", + "amended_from", + "section_break_8", + "items", + "discounts_section", + "discount_note", + "column_break_mqqa", + "total_before_discount", + "total_discount_amount", + "section_break_10", + "payment_date", + "payment_attempt_count", + "payment_attempt_date", + "next_payment_attempt_date", + "payment_mode", + "write_off_amount", + "column_break_15", + "total", + "total_before_tax", + "applied_credits", + "free_credits", + "amount_due", + "gst", + "amount_due_with_tax", + "amount_paid", + "section_break_15", + "credit_allocations", + "stripe_billing_section", + "stripe_invoice_id", + "stripe_payment_intent_id", + "stripe_invoice_url", + "jingrow_invoice", + "invoice_pdf", + "jingrow_partner_order", + "column_break_32", + "transaction_amount", + "transaction_net", + "transaction_fee", + "exchange_rate", + "transaction_fee_details", + "razorpay_billing_section", + "razorpay_order_id", + "razorpay_payment_record", + "column_break_44", + "razorpay_payment_id", + "razorpay_payment_method", + "mpesa_billing_section", + "mpesa_payment_record", + "mpesa_receipt_number", + "mpesa_invoice", + "mpesa_invoice_pdf", + "column_break_ouox", + "mpesa_request_id", + "mpesa_merchant_id", + "section_break_47", + "discounts" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Team", + "options": "Team", + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "customer_name", + "fieldtype": "Data", + "label": "Customer Name" + }, + { + "fetch_from": "team.user", + "fieldname": "customer_email", + "fieldtype": "Data", + "label": "Customer Email", + "options": "Email" + }, + { + "fieldname": "due_date", + "fieldtype": "Date", + "label": "Due Date" + }, + { + "fieldname": "items", + "fieldtype": "Table", + "label": "Items", + "options": "Invoice Item" + }, + { + "allow_on_submit": 1, + "fieldname": "status", + "fieldtype": "Select", + "label": "Status", + "no_copy": 1, + "options": "Draft\nInvoice Created\nUnpaid\nPaid\nRefunded\nUncollectible\nCollected\nEmpty" + }, + { + "fieldname": "total", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Total", + "no_copy": 1, + "options": "currency" + }, + { + "fieldname": "amount_due", + "fieldtype": "Currency", + "label": "Amount Due", + "no_copy": 1, + "options": "currency" + }, + { + "allow_on_submit": 1, + "fieldname": "amount_paid", + "fieldtype": "Currency", + "label": "Amount Paid", + "no_copy": 1, + "options": "currency" + }, + { + "fieldname": "amended_from", + "fieldtype": "Link", + "label": "Amended From", + "no_copy": 1, + "options": "Invoice", + "print_hide": 1, + "read_only": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break" + }, + { + "fieldname": "section_break_10", + "fieldtype": "Section Break", + "hide_border": 1 + }, + { + "fieldname": "section_break_15", + "fieldtype": "Section Break", + "hide_border": 1 + }, + { + "fieldname": "stripe_billing_section", + "fieldtype": "Section Break", + "label": "Stripe Billing" + }, + { + "fieldname": "stripe_invoice_id", + "fieldtype": "Data", + "label": "Stripe Invoice ID", + "no_copy": 1 + }, + { + "allow_on_submit": 1, + "fieldname": "stripe_invoice_url", + "fieldtype": "Text", + "label": "Stripe Invoice URL", + "no_copy": 1, + "read_only": 1 + }, + { + "fetch_from": "team.currency", + "fieldname": "currency", + "fieldtype": "Link", + "in_standard_filter": 1, + "label": "Currency", + "options": "Currency" + }, + { + "fieldname": "column_break_15", + "fieldtype": "Column Break" + }, + { + "fieldname": "period_start", + "fieldtype": "Date", + "label": "Period Start" + }, + { + "fieldname": "period_end", + "fieldtype": "Date", + "label": "Period End" + }, + { + "allow_on_submit": 1, + "fieldname": "payment_date", + "fieldtype": "Date", + "label": "Payment Date", + "no_copy": 1 + }, + { + "allow_on_submit": 1, + "default": "0", + "fieldname": "payment_attempt_count", + "fieldtype": "Int", + "label": "Payment Attempt Count", + "no_copy": 1 + }, + { + "allow_on_submit": 1, + "fieldname": "payment_attempt_date", + "fieldtype": "Date", + "label": "Payment Attempt Date", + "no_copy": 1 + }, + { + "default": "0", + "fieldname": "applied_credits", + "fieldtype": "Currency", + "label": "Applied Credits", + "no_copy": 1, + "options": "currency" + }, + { + "allow_on_submit": 1, + "fieldname": "jingrow_invoice", + "fieldtype": "Data", + "label": "Jingrow Invoice", + "no_copy": 1 + }, + { + "fieldname": "credit_allocations", + "fieldtype": "Table", + "label": "Credit Allocations", + "no_copy": 1, + "options": "Invoice Credit Allocation" + }, + { + "allow_on_submit": 1, + "fieldname": "invoice_pdf", + "fieldtype": "Attach", + "label": "Invoice PDF", + "no_copy": 1 + }, + { + "fieldname": "column_break_32", + "fieldtype": "Column Break" + }, + { + "allow_on_submit": 1, + "fieldname": "transaction_amount", + "fieldtype": "Currency", + "label": "Transaction Amount", + "no_copy": 1, + "options": "CNY" + }, + { + "allow_on_submit": 1, + "fieldname": "transaction_net", + "fieldtype": "Currency", + "label": "Transaction Net", + "no_copy": 1, + "options": "CNY" + }, + { + "allow_on_submit": 1, + "fieldname": "transaction_fee", + "fieldtype": "Currency", + "label": "Transaction Fee", + "no_copy": 1, + "options": "CNY" + }, + { + "allow_on_submit": 1, + "fieldname": "exchange_rate", + "fieldtype": "Float", + "label": "Exchange Rate", + "no_copy": 1 + }, + { + "allow_on_submit": 1, + "fieldname": "transaction_fee_details", + "fieldtype": "Table", + "label": "Transaction Fee Details", + "no_copy": 1, + "options": "Invoice Transaction Fee" + }, + { + "default": "Subscription", + "fieldname": "type", + "fieldtype": "Select", + "label": "Type", + "options": "Subscription\nPrepaid Credits\nService\nSummary\nPartnership Fees" + }, + { + "depends_on": "eval:pg.type == 'Prepaid Credits'", + "fieldname": "stripe_payment_intent_id", + "fieldtype": "Data", + "label": "Stripe Payment Intent ID", + "no_copy": 1 + }, + { + "fieldname": "free_credits", + "fieldtype": "Currency", + "label": "Free Credits", + "options": "currency" + }, + { + "fieldname": "payment_mode", + "fieldtype": "Select", + "label": "Payment Mode", + "options": "\nCard\nPrepaid Credits\nNEFT\nPartner Credits\nPaid By Partner" + }, + { + "fieldname": "jingrow_partner_order", + "fieldtype": "Data", + "label": "Jingrow Partner Order" + }, + { + "fieldname": "razorpay_billing_section", + "fieldtype": "Section Break", + "label": "Razorpay Billing" + }, + { + "fieldname": "razorpay_order_id", + "fieldtype": "Data", + "label": "Razorpay Order ID" + }, + { + "fieldname": "column_break_44", + "fieldtype": "Column Break" + }, + { + "fieldname": "razorpay_payment_record", + "fieldtype": "Link", + "label": "Razorpay Payment Record", + "options": "Razorpay Payment Record" + }, + { + "fieldname": "razorpay_payment_method", + "fieldtype": "Data", + "label": "Razorpay Payment Method", + "read_only": 1 + }, + { + "collapsible": 1, + "fieldname": "section_break_47", + "fieldtype": "Section Break", + "label": "Discounts" + }, + { + "fieldname": "discounts", + "fieldtype": "Table", + "label": "Discounts", + "options": "Invoice Discount" + }, + { + "fieldname": "total_before_discount", + "fieldtype": "Currency", + "label": "Total Before Discount", + "options": "currency" + }, + { + "fieldname": "total_discount_amount", + "fieldtype": "Currency", + "label": "Total Discount Amount", + "options": "currency", + "read_only": 1 + }, + { + "fetch_from": "team.partner_email", + "fieldname": "partner_email", + "fieldtype": "Data", + "label": "Partner Email", + "read_only": 1 + }, + { + "fetch_from": "razorpay_payment_record.payment_id", + "fieldname": "razorpay_payment_id", + "fieldtype": "Data", + "label": "Razorpay Payment ID" + }, + { + "default": "0", + "fieldname": "marketplace", + "fieldtype": "Check", + "label": "Marketplace" + }, + { + "fetch_from": "team.partnership_date", + "fieldname": "customer_partnership_date", + "fieldtype": "Date", + "label": "Customer Partnership Date", + "read_only": 1 + }, + { + "fetch_from": "team.jingrow_partnership_date", + "fieldname": "jingrow_partnership_date", + "fieldtype": "Date", + "label": "Jingrow Partnership Date", + "read_only": 1 + }, + { + "fieldname": "gst", + "fieldtype": "Currency", + "label": "GST", + "options": "currency" + }, + { + "fieldname": "total_before_tax", + "fieldtype": "Currency", + "hidden": 1, + "label": "Total Before Tax", + "options": "currency" + }, + { + "fieldname": "write_off_amount", + "fieldtype": "Float", + "label": "Write-Off Amount", + "precision": "9" + }, + { + "fieldname": "amount_due_with_tax", + "fieldtype": "Currency", + "label": "Amount Due (Including Tax)", + "no_copy": 1, + "options": "currency" + }, + { + "fieldname": "discounts_section", + "fieldtype": "Section Break", + "label": "Discounts" + }, + { + "fieldname": "discount_note", + "fieldtype": "Data", + "label": "Discount Note" + }, + { + "fieldname": "column_break_mqqa", + "fieldtype": "Column Break" + }, + { + "fetch_from": "team.billing_email", + "fieldname": "billing_email", + "fieldtype": "Data", + "label": "Billing Email" + }, + { + "depends_on": "eval:pg.payment_mode == \"Card\" && pg.status == \"Unpaid\"", + "fieldname": "next_payment_attempt_date", + "fieldtype": "Date", + "label": "Next Payment Attempt Date" + }, + { + "depends_on": "eval: pg.status=='Refunded'", + "fieldname": "refund_reason", + "fieldtype": "Data", + "label": "Refund Reason" + }, + { + "fieldname": "mpesa_billing_section", + "fieldtype": "Section Break", + "label": "Mpesa Billing" + }, + { + "fieldname": "mpesa_payment_record", + "fieldtype": "Data", + "label": "Mpesa Payment Record" + }, + { + "fieldname": "mpesa_request_id", + "fieldtype": "Data", + "label": "Mpesa Request ID" + }, + { + "fieldname": "mpesa_receipt_number", + "fieldtype": "Data", + "label": "Mpesa Receipt Number" + }, + { + "fieldname": "column_break_ouox", + "fieldtype": "Column Break" + }, + { + "fieldname": "mpesa_merchant_id", + "fieldtype": "Data", + "label": "Mpesa Merchant ID" + }, + { + "fieldname": "mpesa_invoice", + "fieldtype": "Data", + "label": "Mpesa Invoice" + }, + { + "allow_on_submit": 1, + "fieldname": "mpesa_invoice_pdf", + "fieldtype": "Attach", + "label": "Mpesa Invoice PDF" + } + ], + "is_submittable": 1, + "links": [ + { + "group": "Documents", + "link_pagetype": "Usage Record", + "link_fieldname": "invoice" + }, + { + "group": "Documents", + "link_pagetype": "Balance Transaction", + "link_fieldname": "invoice" + }, + { + "group": "Documents", + "link_pagetype": "Stripe Payment Event", + "link_fieldname": "invoice" + }, + { + "group": "Webhook Logs", + "link_pagetype": "Stripe Webhook Log", + "link_fieldname": "invoice" + } + ], + "modified": "2025-02-02 19:05:05.300386", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Invoice", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "amend": 1, + "cancel": 1, + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "submit": 1, + "write": 1 + }, + { + "amend": 1, + "cancel": 1, + "create": 1, + "print": 1, + "read": 1, + "role": "Jcloud Admin", + "submit": 1, + "write": 1 + }, + { + "read": 1, + "role": "Jcloud Member" + }, + { + "read": 1, + "report": 1, + "role": "Site Manager" + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/invoice/invoice.py b/jcloud/jcloud/pagetype/invoice/invoice.py new file mode 100644 index 0000000..ec55be1 --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice/invoice.py @@ -0,0 +1,1155 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import jingrow +from jingrow import _ +from jingrow.model.document import Document +from jingrow.utils import cint, flt, getdate +from jingrow.utils.data import fmt_money + +from jcloud.api.billing import get_stripe +from jcloud.api.client import dashboard_whitelist +from jcloud.utils import log_error +from jcloud.utils.billing import ( + convert_stripe_money, + get_jingrow_io_connection, + get_gateway_details, + get_partner_external_connection, +) + + +class Invoice(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.invoice_credit_allocation.invoice_credit_allocation import ( + InvoiceCreditAllocation, + ) + from jcloud.jcloud.pagetype.invoice_discount.invoice_discount import InvoiceDiscount + from jcloud.jcloud.pagetype.invoice_item.invoice_item import InvoiceItem + from jcloud.jcloud.pagetype.invoice_transaction_fee.invoice_transaction_fee import InvoiceTransactionFee + + amended_from: DF.Link | None + amount_due: DF.Currency + amount_due_with_tax: DF.Currency + amount_paid: DF.Currency + applied_credits: DF.Currency + billing_email: DF.Data | None + credit_allocations: DF.Table[InvoiceCreditAllocation] + currency: DF.Link | None + customer_email: DF.Data | None + customer_name: DF.Data | None + customer_partnership_date: DF.Date | None + discount_note: DF.Data | None + discounts: DF.Table[InvoiceDiscount] + due_date: DF.Date | None + exchange_rate: DF.Float + jingrow_invoice: DF.Data | None + jingrow_partner_order: DF.Data | None + jingrow_partnership_date: DF.Date | None + free_credits: DF.Currency + gst: DF.Currency + invoice_pdf: DF.Attach | None + items: DF.Table[InvoiceItem] + marketplace: DF.Check + mpesa_invoice: DF.Data | None + mpesa_invoice_pdf: DF.Attach | None + mpesa_merchant_id: DF.Data | None + mpesa_payment_record: DF.Data | None + mpesa_receipt_number: DF.Data | None + mpesa_request_id: DF.Data | None + next_payment_attempt_date: DF.Date | None + partner_email: DF.Data | None + payment_attempt_count: DF.Int + payment_attempt_date: DF.Date | None + payment_date: DF.Date | None + payment_mode: DF.Literal["", "Card", "Prepaid Credits", "NEFT", "Partner Credits", "Paid By Partner"] + period_end: DF.Date | None + period_start: DF.Date | None + razorpay_order_id: DF.Data | None + razorpay_payment_id: DF.Data | None + razorpay_payment_method: DF.Data | None + razorpay_payment_record: DF.Link | None + refund_reason: DF.Data | None + status: DF.Literal[ + "Draft", "Invoice Created", "Unpaid", "Paid", "Refunded", "Uncollectible", "Collected", "Empty" + ] + stripe_invoice_id: DF.Data | None + stripe_invoice_url: DF.Text | None + stripe_payment_intent_id: DF.Data | None + team: DF.Link + total: DF.Currency + total_before_discount: DF.Currency + total_before_tax: DF.Currency + total_discount_amount: DF.Currency + transaction_amount: DF.Currency + transaction_fee: DF.Currency + transaction_fee_details: DF.Table[InvoiceTransactionFee] + transaction_net: DF.Currency + type: DF.Literal["Subscription", "Prepaid Credits", "Service", "Summary", "Partnership Fees"] + write_off_amount: DF.Float + # end: auto-generated types + + dashboard_fields = ( + "period_start", + "period_end", + "team", + "items", + "currency", + "type", + "payment_mode", + "total", + "total_before_discount", + "total_before_tax", + "partner_email", + "amount_due", + "amount_paid", + "docstatus", + "gst", + "applied_credits", + "status", + "due_date", + "total_discount_amount", + "invoice_pdf", + "stripe_invoice_url", + "amount_due_with_tax", + "mpesa_invoice", + "mpesa_invoice_pdf", + ) + + @staticmethod + def get_list_query(query, filters=None, **list_args): + StripeWebhookLog = jingrow.qb.PageType("Stripe Webhook Log") + Invoice = jingrow.qb.PageType("Invoice") + + partner_customer = filters.get("partner_customer") + if partner_customer: + team_name = filters.get("team") + due_date = filters.get("due_date") + filters.pop("partner_customer") + query = ( + jingrow.qb.from_(Invoice) + .select(Invoice.name, Invoice.total, Invoice.amount_due, Invoice.status, Invoice.due_date) + .where( + (Invoice.team == team_name) + & (Invoice.due_date >= due_date[1]) + & (Invoice.type == "Subscription") + ) + ) + + invoices = ( + query.select(StripeWebhookLog.name.as_("stripe_payment_failed")) + .left_join(StripeWebhookLog) + .on( + (Invoice.name == StripeWebhookLog.invoice) + & (StripeWebhookLog.event_type == "payment_intent.payment_failed") + ) + .groupby(Invoice.name) + ).run(as_dict=True) + + for invoice in invoices: + if stripe_log := invoice.stripe_payment_failed: + payload, failed_payment_method = jingrow.db.get_value( + "Stripe Webhook Log", stripe_log, ["payload", "stripe_payment_method"] + ) + payload = jingrow.parse_json(payload) + invoice.stripe_payment_error = ( + payload.get("data", {}).get("object", {}).get("last_payment_error", {}).get("message") + ) + invoice.stripe_payment_failed_card = jingrow.db.get_value( + "Stripe Payment Method", failed_payment_method, "last_4" + ) + + return invoices + + def get_pg(self, pg): + pg.invoice_pdf = self.invoice_pdf or (self.currency == "USD" and self.get_pdf()) + currency = jingrow.get_value("Team", self.team, "currency") + price_field = "price_cny" if currency == "CNY" else "price_usd" + currency_symbol = "¥" if currency == "CNY" else "$" + + for item in pg["items"]: + if item.document_type in ("Server", "Database Server"): + item.document_name = jingrow.get_value(item.document_type, item.document_name, "title") + if server_plan := jingrow.get_value("Server Plan", item.plan, price_field): + item.plan = f"{currency_symbol}{server_plan}" + elif server_plan := jingrow.get_value("Server Storage Plan", item.plan, price_field): + item.plan = f"Storage Add-on {currency_symbol}{server_plan}/GB" + elif item.document_type == "Marketplace App": + item.document_name = jingrow.get_value(item.document_type, item.document_name, "title") + item.plan = ( + f"{currency_symbol}{jingrow.get_value('Marketplace App Plan', item.plan, price_field)}" + ) + + @dashboard_whitelist() + def stripe_payment_url(self): + if not self.stripe_invoice_id: + return + jingrow.response.location = self.get_stripe_payment_url() + jingrow.response.type = "redirect" + + def get_stripe_payment_url(self): + stripe_link_expired = ( + self.status == "Unpaid" and jingrow.utils.date_diff(jingrow.utils.now(), self.due_date) > 30 + ) + if stripe_link_expired: + stripe = get_stripe() + stripe_invoice = stripe.Invoice.retrieve(self.stripe_invoice_id) + url = stripe_invoice.hosted_invoice_url + else: + url = self.stripe_invoice_url + return url + + def validate(self): + self.validate_team() + self.validate_dates() + self.validate_duplicate() + self.validate_items() + self.calculate_values() + self.compute_free_credits() + + def before_submit(self): + if self.total > 0 and self.status != "Paid": + jingrow.throw("Invoice must be Paid to be submitted") + + def calculate_values(self): + if self.status == "Paid" and self.docstatus == 1: + # don't calculate if already invoice is paid and already submitted + return + self.calculate_total() + self.calculate_discounts() + self.calculate_amount_due() + self.apply_taxes_if_applicable() + + @jingrow.whitelist() + def finalize_invoice(self): # noqa: C901 + if self.type == "Prepaid Credits": + return + + self.calculate_values() + + if self.total == 0: + self.status = "Empty" + self.submit() + return + + team = jingrow.get_pg("Team", self.team) + if not team.enabled: + self.add_comment("Info", "Skipping finalize invoice because team is disabled") + self.save() + return + + if self.stripe_invoice_id: + # if stripe invoice is already created and paid, + # then update status and return early + stripe = get_stripe() + invoice = stripe.Invoice.retrieve(self.stripe_invoice_id) + if invoice.status == "paid": + self.status = "Paid" + self.update_transaction_details(invoice.charge) + self.submit() + self.unsuspend_sites_if_applicable() + return + + # set as unpaid by default + self.status = "Unpaid" + self.update_item_descriptions() + + if self.amount_due > 0: + self.apply_credit_balance() + + if self.amount_due == 0: + self.status = "Paid" + + if self.status == "Paid" and self.stripe_invoice_id and self.amount_paid == 0: + stripe = get_stripe() + invoice = stripe.Invoice.retrieve(self.stripe_invoice_id) + payment_intent = stripe.PaymentIntent.retrieve(invoice.payment_intent) + if payment_intent.status == "processing": + # mark the fc invoice as Paid + # if the payment intent is processing, it means the invoice cannot be voided yet + # wait for invoice to be updated and then mark it as void if payment failed + # or issue a refund if succeeded + self.save() # status is already Paid, so no need to set again + else: + self.change_stripe_invoice_status("Void") + self.add_comment( + text=( + f"Stripe Invoice {self.stripe_invoice_id} voided because payment is done via credits." + ) + ) + + self.save() + + if self.amount_due > 0: + if self.payment_mode == "Prepaid Credits": + self.add_comment( + "Comment", + "Not enough credits for this invoice. Change payment mode to Card to pay using Stripe.", + ) + # we shouldn't depend on payment_mode to decide whether to create stripe invoice or not + # there should be a separate field in team to decide whether to create automatic invoices or not + if self.payment_mode == "Card": + self.create_stripe_invoice() + + if self.status == "Paid": + self.submit() + self.unsuspend_sites_if_applicable() + + def unsuspend_sites_if_applicable(self): + if ( + jingrow.db.count( + "Invoice", + { + "status": "Unpaid", + "team": self.team, + "type": "Subscription", + "docstatus": ("<", 2), + }, + ) + == 0 + ): + # unsuspend sites only if all invoices are paid + team = jingrow.get_cached_pg("Team", self.team) + team.unsuspend_sites(f"Invoice {self.name} Payment Successful.") + + def calculate_total(self): + total = 0 + for item in self.items: + total += item.amount + self.total = flt(total, 2) + + def apply_taxes_if_applicable(self): + self.amount_due_with_tax = self.amount_due + self.gst = 0 + + if self.payment_mode == "Prepaid Credits": + return + + if self.currency == "CNY" and self.type == "Subscription": + gst_rate = jingrow.db.get_single_value("Jcloud Settings", "gst_percentage") + self.gst = flt(self.amount_due * gst_rate, 2) + self.amount_due_with_tax = flt(self.amount_due + self.gst, 2) + + def calculate_amount_due(self): + self.amount_due = flt(self.total - self.applied_credits, 2) + if self.amount_due < 0 and self.amount_due > -0.1: + self.write_off_amount = self.amount_due + self.amount_due = 0 + + if self.amount_due > 0 and self.amount_due < 0.1: + self.write_off_amount = self.amount_due + self.amount_due = 0 + + def on_submit(self): + self.create_invoice_on_jingrowio() + self.fetch_mpesa_invoice_pdf() + + def on_update_after_submit(self): + self.create_invoice_on_jingrowio() + self.fetch_mpesa_invoice_pdf() + + def after_insert(self): + if self.get("amended_from"): + values = { + "modified": jingrow.utils.now(), + "modified_by": jingrow.session.user, + "new_invoice": self.name, + "old_invoice": self.amended_from, + } + # link usage records of old cancelled invoice to the new amended invoice + jingrow.db.sql( + """ + UPDATE + `tabUsage Record` + SET + `invoice` = %(new_invoice)s, + `modified` = %(modified)s, + `modified_by` = %(modified_by)s + WHERE + `invoice` = %(old_invoice)s + """, + values=values, + ) + + def create_stripe_invoice(self): + if self.stripe_invoice_id: + invoice = self.get_stripe_invoice() + stripe_invoice_total = convert_stripe_money(invoice.total) + if self.amount_due_with_tax == stripe_invoice_total: + # return if an invoice with the same amount is already created + return + # if the amount is changed, void the stripe invoice and create a new one + self.change_stripe_invoice_status("Void") + formatted_amount = fmt_money(stripe_invoice_total, currency=self.currency) + self.add_comment( + text=(f"Stripe Invoice {self.stripe_invoice_id} of amount {formatted_amount} voided.") + ) + self.stripe_invoice_id = "" + self.stripe_invoice_url = "" + self.save() + + if self.amount_due_with_tax <= 0: + return + + customer_id = jingrow.db.get_value("Team", self.team, "stripe_customer_id") + amount = int(self.amount_due_with_tax * 100) + self._make_stripe_invoice(customer_id, amount) + + def _make_stripe_invoice(self, customer_id, amount): + mandate_id = self.get_mandate_id(customer_id) + try: + stripe = get_stripe() + invoice = stripe.Invoice.create( + customer=customer_id, + pending_invoice_items_behavior="exclude", + collection_method="charge_automatically", + auto_advance=True, + currency=self.currency.lower(), + payment_settings={"default_mandate": mandate_id}, + idempotency_key=f"invoice:{self.name}:amount:{amount}", + ) + stripe.InvoiceItem.create( + customer=customer_id, + invoice=invoice["id"], + description=self.get_stripe_invoice_item_description(), + amount=amount, + currency=self.currency.lower(), + idempotency_key=f"invoiceitem:{self.name}:amount:{amount}", + ) + self.db_set( + { + "stripe_invoice_id": invoice["id"], + "status": "Invoice Created", + }, + commit=True, + ) + self.reload() + return invoice + except Exception: + jingrow.db.rollback() + self.reload() + + # log the traceback as comment + msg = "
" + jingrow.get_traceback() + "
" + self.add_comment("Comment", _("Stripe Invoice Creation Failed") + "

" + msg) + jingrow.db.commit() + + def get_mandate_id(self, customer_id): + mandate_id = jingrow.get_value( + "Stripe Payment Method", {"team": self.team, "is_default": 1}, "stripe_mandate_id" + ) + if not mandate_id: + return "" + return mandate_id + + def find_stripe_invoice_if_not_set(self): + if self.stripe_invoice_id: + return + # if stripe invoice was created, find it and set it + # so that we avoid scenarios where Stripe Invoice was created but not set in Jingrow + stripe = get_stripe() + invoices = stripe.Invoice.list(customer=jingrow.db.get_value("Team", self.team, "stripe_customer_id")) + description = self.get_stripe_invoice_item_description() + for invoice in invoices.data: + line_items = invoice.lines.data + if line_items and line_items[0].description == description and invoice.status != "void": + self.stripe_invoice_id = invoice["id"] + self.status = "Invoice Created" + self.save() + + def get_stripe_invoice_item_description(self): + start = getdate(self.period_start) + end = getdate(self.period_end) + period_string = f"{start.strftime('%b %d')} - {end.strftime('%b %d')} {end.year}" + return f"Jingrow Subscription ({period_string})" + + @jingrow.whitelist() + def finalize_stripe_invoice(self): + stripe = get_stripe() + stripe.Invoice.finalize_invoice(self.stripe_invoice_id) + + def validate_duplicate(self): + invoice_exists = jingrow.db.exists( + "Invoice", + { + "stripe_payment_intent_id": self.stripe_payment_intent_id, + "type": "Prepaid Credits", + "name": ("!=", self.name), + }, + ) + if self.type == "Prepaid Credits" and self.stripe_payment_intent_id and invoice_exists: + jingrow.throw("Invoice with same Stripe payment intent exists", jingrow.DuplicateEntryError) + + if self.type == "Subscription" and self.period_start and self.period_end and self.is_new(): + query = ( + f"select `name` from `tabInvoice` where team = '{self.team}' and" + f" status = 'Draft' and ('{self.period_start}' between `period_start` and" + f" `period_end` or '{self.period_end}' between `period_start` and" + " `period_end`)" + ) + + intersecting_invoices = [x[0] for x in jingrow.db.sql(query, as_list=True)] + + if intersecting_invoices: + jingrow.throw( + f"There are invoices with intersecting periods:{', '.join(intersecting_invoices)}", + jingrow.DuplicateEntryError, + ) + + def validate_team(self): + team = jingrow.get_pg("Team", self.team) + + self.customer_name = team.billing_name or jingrow.utils.get_fullname(self.team) + self.customer_email = ( + jingrow.db.get_value("Communication Email", {"parent": team.user, "type": "invoices"}, ["value"]) + or team.user + ) + self.currency = team.currency + if not self.payment_mode: + self.payment_mode = team.payment_mode + if not self.currency: + jingrow.throw(f"Cannot create Invoice because Currency is not set in Team {self.team}") + + def validate_dates(self): + if not self.period_start: + return + if not self.period_end: + period_start = getdate(self.period_start) + # period ends on last day of month + self.period_end = jingrow.utils.get_last_day(period_start) + + # due date + self.due_date = self.period_end + + def update_item_descriptions(self): + for item in self.items: + if not item.description: + how_many_days = f"{cint(item.quantity)} day{'s' if item.quantity > 1 else ''}" + if item.document_type == "Site" and item.plan: + site_name = item.document_name.split(".archived")[0] + plan = jingrow.get_cached_value("Site Plan", item.plan, "plan_title") + item.description = f"{site_name} active for {how_many_days} on {plan} plan" + elif item.document_type in ["Server", "Database Server"]: + server_title = jingrow.get_cached_value(item.document_type, item.document_name, "title") + if item.plan == "Add-on Storage plan": + item.description = f"{server_title} Storage Add-on for {how_many_days}" + else: + item.description = f"{server_title} active for {how_many_days}" + elif item.document_type == "Marketplace App": + app_title = jingrow.get_cached_value("Marketplace App", item.document_name, "title") + item.description = f"Marketplace app {app_title} active for {how_many_days}" + else: + item.description = "Prepaid Credits" + + def add_usage_record(self, usage_record): + if self.type != "Subscription": + return + # return if this usage_record is already accounted for in an invoice + if usage_record.invoice: + return + + # return if this usage_record does not fall inside period of invoice + usage_record_date = getdate(usage_record.date) + start = getdate(self.period_start) + end = getdate(self.period_end) + if not (start <= usage_record_date <= end): + return + + invoice_item = self.get_invoice_item_for_usage_record(usage_record) + # if not found, create a new invoice item + if not invoice_item: + invoice_item = self.append( + "items", + { + "document_type": usage_record.document_type, + "document_name": usage_record.document_name, + "plan": usage_record.plan, + "quantity": 0, + "rate": usage_record.amount, + "site": usage_record.site, + }, + ) + + invoice_item.quantity = (invoice_item.quantity or 0) + 1 + + if usage_record.payout: + self.payout += usage_record.payout + + self.save() + usage_record.db_set("invoice", self.name) + + def remove_usage_record(self, usage_record): + if self.type != "Subscription": + return + # return if invoice is not in draft mode + if self.docstatus != 0: + return + + # return if this usage_record is of a different invoice + if usage_record.invoice != self.name: + return + + invoice_item = self.get_invoice_item_for_usage_record(usage_record) + if not invoice_item: + return + + if invoice_item.quantity <= 0: + return + + invoice_item.quantity -= 1 + self.save() + usage_record.db_set("invoice", None) + + def get_invoice_item_for_usage_record(self, usage_record): + invoice_item = None + for row in self.items: + conditions = ( + row.document_type == usage_record.document_type + and row.document_name == usage_record.document_name + and row.plan == usage_record.plan + and row.rate == usage_record.amount + ) + if row.document_type == "Marketplace App": + conditions = conditions and row.site == usage_record.site + if conditions: + invoice_item = row + return invoice_item + + def validate_items(self): + items_to_remove = [] + for row in self.items: + if row.quantity == 0: + items_to_remove.append(row) + else: + row.amount = flt((row.quantity * row.rate), 2) + + for item in items_to_remove: + self.remove(item) + + def compute_free_credits(self): + self.free_credits = sum([d.amount for d in self.credit_allocations if d.source == "Free Credits"]) + + def calculate_discounts(self): + for item in self.items: + if item.discount_percentage: + item.discount = flt(item.amount * (item.discount_percentage / 100), 2) + + self.total_discount_amount = sum([item.discount for item in self.items]) + sum( + [d.amount for d in self.discounts] + ) + # TODO: handle percent discount from discount table + + self.total_before_discount = self.total + self.total = flt(self.total_before_discount - self.total_discount_amount, 2) + + def on_cancel(self): + # make reverse entries for credit allocations + for transaction in self.credit_allocations: + pg = jingrow.get_pg( + pagetype="Balance Transaction", + team=self.team, + type="Adjustment", + source=transaction.source, + currency=transaction.currency, + amount=transaction.amount, + description=f"Reversed on cancel of Invoice {self.name}", + ) + pg.insert() + pg.submit() + + def apply_credit_balance(self): + # previously we used to cancel and re-apply credits, but it messed up the balance transaction history + # so now we only do append-only operation while applying credits + + balance = jingrow.get_cached_pg("Team", self.team).get_balance() + if balance <= 0: + return + + unallocated_balances = jingrow.db.get_all( + "Balance Transaction", + filters={ + "team": self.team, + "type": "Adjustment", + "unallocated_amount": (">", 0), + "docstatus": ("<", 2), + }, + fields=["name", "unallocated_amount", "source"], + order_by="creation desc", + ) + # sort by ascending for FIFO + unallocated_balances.reverse() + + total_allocated = 0 + due = self.amount_due + for balance in unallocated_balances: + if due == 0: + break + allocated = min(due, balance.unallocated_amount) + due -= allocated + self.append( + "credit_allocations", + { + "transaction": balance.name, + "amount": allocated, + "currency": self.currency, + "source": balance.source, + }, + ) + pg = jingrow.get_pg("Balance Transaction", balance.name) + pg.append( + "allocated_to", + {"invoice": self.name, "amount": allocated, "currency": self.currency}, + ) + pg.save() + total_allocated += allocated + + balance_transaction = jingrow.get_pg( + pagetype="Balance Transaction", + team=self.team, + type="Applied To Invoice", + amount=total_allocated * -1, + invoice=self.name, + ).insert() + balance_transaction.submit() + + self.applied_credits = sum(row.amount for row in self.credit_allocations) + self.calculate_values() + + def create_next(self): + # the next invoice's period starts after this invoice ends + next_start = jingrow.utils.add_days(self.period_end, 1) + + already_exists = jingrow.db.exists( + "Invoice", + { + "team": self.team, + "period_start": next_start, + "type": "Subscription", + }, # Adding type 'Subscription' to ensure no other type messes with this + ) + + if already_exists: + return None + + return jingrow.get_pg(pagetype="Invoice", team=self.team, period_start=next_start).insert() + + def get_pdf(self): + print_format = self.meta.default_print_format + return jingrow.utils.get_url( + f"/api/method/jingrow.utils.print_format.download_pdf?pagetype=Invoice&name={self.name}&format={print_format}&no_letterhead=0" + ) + + @jingrow.whitelist() + def create_invoice_on_jingrowio(self): # noqa: C901 + if self.flags.skip_jingrow_invoice: + return None + if self.status != "Paid": + return None + if self.amount_paid == 0: + return None + if self.jingrow_invoice or self.jingrow_partner_order or self.mpesa_receipt_number: + return None + + try: + team = jingrow.get_pg("Team", self.team) + address = jingrow.get_pg("Address", team.billing_address) if team.billing_address else None + if not address: + # don't create invoice if address is not set + return None + client = self.get_jingrowio_connection() + response = client.session.post( + f"{client.url}/api/method/create-fc-invoice", + headers=client.headers, + data={ + "team": team.as_json(), + "address": address.as_json() if address else '""', + "invoice": self.as_json(), + }, + ) + if response.ok: + res = response.json() + invoice = res.get("message") + + if invoice: + self.jingrow_invoice = invoice + self.fetch_invoice_pdf() + self.save() + return invoice + else: + from bs4 import BeautifulSoup + + soup = BeautifulSoup(response.text, "html.parser") + self.add_comment( + text="Failed to create invoice on jingrow.com" + "

" + str(soup.find("pre")) + ) + + log_error( + "framework.jingrow.com Invoice Creation Error", + data={"invoice": self.name, "jingrow_io_response": response.text}, + ) + except Exception: + traceback = "
" + jingrow.get_traceback() + "
" + self.add_comment(text="Failed to create invoice on jingrow.com" + "

" + traceback) + + log_error( + "framework.jingrow.com Invoice Creation Error", + data={"invoice": self.name, "traceback": traceback}, + ) + + @jingrow.whitelist() + def fetch_invoice_pdf(self): + if self.jingrow_invoice: + from urllib.parse import urlencode + + client = self.get_jingrowio_connection() + print_format = jingrow.db.get_single_value("Jcloud Settings", "print_format") + params = urlencode( + { + "pagetype": "Sales Invoice", + "name": self.jingrow_invoice, + "format": print_format, + "no_letterhead": 0, + } + ) + url = client.url + "/api/method/jingrow.utils.print_format.download_pdf?" + params + + with client.session.get(url, headers=client.headers, stream=True) as r: + r.raise_for_status() + ret = jingrow.get_pg( + { + "pagetype": "File", + "attached_to_pagetype": "Invoice", + "attached_to_name": self.name, + "attached_to_field": "invoice_pdf", + "folder": "Home/Attachments", + "file_name": self.jingrow_invoice + ".pdf", + "is_private": 1, + "content": r.content, + } + ) + ret.save(ignore_permissions=True) + self.invoice_pdf = ret.file_url + + def get_jingrowio_connection(self): + if not hasattr(self, "jingrowio_connection"): + self.jingrowio_connection = get_jingrow_io_connection() + + return self.jingrowio_connection + + def update_transaction_details(self, stripe_charge=None): + if not stripe_charge: + return + stripe = get_stripe() + charge = stripe.Charge.retrieve(stripe_charge) + if charge.balance_transaction: + balance_transaction = stripe.BalanceTransaction.retrieve(charge.balance_transaction) + self.exchange_rate = balance_transaction.exchange_rate + self.transaction_amount = convert_stripe_money(balance_transaction.amount) + self.transaction_net = convert_stripe_money(balance_transaction.net) + self.transaction_fee = convert_stripe_money(balance_transaction.fee) + self.transaction_fee_details = [] + for row in balance_transaction.fee_details: + self.append( + "transaction_fee_details", + { + "description": row.description, + "amount": convert_stripe_money(row.amount), + "currency": row.currency.upper(), + }, + ) + self.save() + + def update_razorpay_transaction_details(self, payment): + if not (payment["fee"] or payment["tax"]): + return + + self.transaction_amount = convert_stripe_money(payment["amount"]) + self.transaction_net = convert_stripe_money(payment["amount"] - payment["fee"]) + self.transaction_fee = convert_stripe_money(payment["fee"]) + + charges = [ + { + "description": "GST", + "amount": convert_stripe_money(payment["tax"]), + "currency": payment["currency"], + }, + { + "description": "Razorpay Fee", + "amount": convert_stripe_money(payment["fee"] - payment["tax"]), + "currency": payment["currency"], + }, + ] + + for row in charges: + self.append( + "transaction_fee_details", + { + "description": row["description"], + "amount": row["amount"], + "currency": row["currency"].upper(), + }, + ) + + self.save() + + def fetch_mpesa_invoice_pdf(self): + if not (self.mpesa_payment_record and self.mpesa_invoice): + return + gateway_info = get_gateway_details(self.mpesa_payment_record) + client = get_partner_external_connection(gateway_info[0]) + try: + print_format = gateway_info[1] + from urllib.parse import urlencode + + params = urlencode( + { + "pagetype": "Sales Invoice", + "name": self.mpesa_invoice, + "format": print_format, + "no_letterhead": 0, + } + ) + url = f"{client.url}/api/method/jingrow.utils.print_format.download_pdf?{params}" + + with client.session.get(url, headers=client.headers, stream=True) as r: + r.raise_for_status() + file_pg = jingrow.get_pg( + { + "pagetype": "File", + "attached_to_pagetype": "Invoice", + "attached_to_name": self.name, + "attached_to_field": "mpesa_invoice_pdf", + "folder": "Home/Attachments", + "file_name": self.mpesa_invoice + ".pdf", + "is_private": 1, + "content": r.content, + } + ) + file_pg.save(ignore_permissions=True) + self.mpesa_invoice_pdf = file_pg.file_url + self.save(ignore_permissions=True) + + except Exception as e: + jingrow.log_error(str(e), "Error fetching Sales Invoice PDF on external site") + + @jingrow.whitelist() + def refund(self, reason): + stripe = get_stripe() + charge = None + if self.type in ["Subscription", "Service"]: + stripe_invoice = stripe.Invoice.retrieve(self.stripe_invoice_id) + charge = stripe_invoice.charge + elif self.type == "Prepaid Credits": + payment_intent = stripe.PaymentIntent.retrieve(self.stripe_payment_intent_id) + charge = payment_intent["charges"]["data"][0]["id"] + + if not charge: + jingrow.throw("Cannot refund payment because Stripe Charge not found for this invoice") + + stripe.Refund.create(charge=charge) + self.status = "Refunded" + self.refund_reason = reason + self.save() + self.add_comment(text=f"Refund reason: {reason}") + + @jingrow.whitelist() + def change_stripe_invoice_status(self, status): + stripe = get_stripe() + if status == "Paid": + stripe.Invoice.modify(self.stripe_invoice_id, paid=True) + elif status == "Uncollectible": + stripe.Invoice.mark_uncollectible(self.stripe_invoice_id) + elif status == "Void": + stripe.Invoice.void_invoice(self.stripe_invoice_id) + + @jingrow.whitelist() + def refresh_stripe_payment_link(self): + stripe = get_stripe() + stripe_invoice = stripe.Invoice.retrieve(self.stripe_invoice_id) + self.stripe_invoice_url = stripe_invoice.hosted_invoice_url + self.save() + + # Also send back the updated payment link + return self.stripe_invoice_url + + def get_stripe_invoice(self): + if not self.stripe_invoice_id: + return None + stripe = get_stripe() + return stripe.Invoice.retrieve(self.stripe_invoice_id) + + +def finalize_draft_invoices(): + """ + - Runs every hour + - Processes 500 invoices at a time + - Finalizes the invoices whose + - period ends today and time is 6PM or later + - period has ended before + """ + + today = jingrow.utils.today() + # only finalize for enabled teams + # since 'limit' returns the same set of invoices for disabled teams which are ignored + enabled_teams = jingrow.get_all("Team", {"enabled": 1}, pluck="name") + + # get draft invoices whose period has ended or ends today + invoices = jingrow.db.get_all( + "Invoice", + filters={ + "status": "Draft", + "type": "Subscription", + "period_end": ("<=", today), + "team": ("in", enabled_teams), + }, + pluck="name", + limit=500, + order_by="total desc", + ) + + current_time = jingrow.utils.get_datetime().time() + today = jingrow.utils.getdate() + for name in invoices: + invoice = jingrow.get_pg("Invoice", name) + # don't finalize if invoice ends today and time is before 6 PM + if invoice.period_end == today and current_time.hour < 18: + continue + finalize_draft_invoice(invoice) + + +def finalize_unpaid_prepaid_credit_invoices(): + """Should be run daily in contrast to `finalize_draft_invoices`, which runs hourly""" + today = jingrow.utils.today() + + # Invoices with `Prepaid Credits` or `Partner Credits` as mode and unpaid + invoices = jingrow.db.get_all( + "Invoice", + filters={ + "status": "Unpaid", + "type": "Subscription", + "period_end": ("<=", today), + "payment_mode": "Prepaid Credits", + }, + pluck="name", + ) + + current_time = jingrow.utils.get_datetime().time() + today = jingrow.utils.getdate() + for name in invoices: + invoice = jingrow.get_pg("Invoice", name) + # don't finalize if invoice ends today and time is before 6 PM + if invoice.period_end == today and current_time.hour < 18: + continue + finalize_draft_invoice(invoice) + + +def finalize_draft_invoice(invoice): + if isinstance(invoice, str): + invoice = jingrow.get_pg("Invoice", invoice) + + try: + invoice.finalize_invoice() + except Exception: + jingrow.db.rollback() + msg = "
" + jingrow.get_traceback() + "
" + invoice.add_comment(text="Finalize Invoice Failed" + "

" + msg) + finally: + jingrow.db.commit() # For the comment + + try: + invoice.create_next() + except Exception: + jingrow.db.rollback() + log_error("Invoice creation for next month failed", invoice=invoice.name) + + +def calculate_gst(amount): + return amount * 0.18 + + +def get_permission_query_conditions(user): + from jcloud.utils import get_current_team + + if not user: + user = jingrow.session.user + + user_type = jingrow.db.get_value("User", user, "user_type", cache=True) + if user_type == "System User": + return "" + + team = get_current_team() + + return f"(`tabInvoice`.`team` = {jingrow.db.escape(team)})" + + +def has_permission(pg, ptype, user): + from jcloud.utils import get_current_team, has_role + + if not user: + user = jingrow.session.user + + user_type = jingrow.db.get_value("User", user, "user_type", cache=True) + if user_type == "System User": + return True + + if ptype == "create": + return True + + if has_role("Jcloud Support Agent", user) and ptype == "read": + return True + + team = get_current_team(True) + team_members = [ + d.user for d in jingrow.db.get_all("Team Member", {"parenttype": "Team", "parent": pg.team}, ["user"]) + ] + if pg.team == team.name or team.user in team_members: + return True + return False + + +# M-pesa external site for webhook +def create_sales_invoice_on_external_site(transaction_response): + client = get_partner_external_connection() + try: + # Define the necessary data for the Sales Invoice creation + data = { + "customer": transaction_response.get("team"), + "posting_date": jingrow.utils.nowdate(), + "due_date": jingrow.utils.add_days(jingrow.utils.nowdate(), 30), + "items": [ + { + "item_code": "Jingrow Payment", + "qty": 1, + "rate": transaction_response.get("Amount"), + "description": "Payment for Mpesa transaction", + } + ], + "paid_amount": transaction_response.get("Amount"), + "status": "Paid", + } + + # Post to the external site's sales invoice creation API + response = client.session.post( + f"{client.url}/api/method/jingrow.client.insert", + headers=client.headers, + json={"pg": data}, + ) + + if response.ok: + res = response.json() + sales_invoice = res.get("message") + if sales_invoice: + jingrow.msgprint(_("Sales Invoice created successfully on external site.")) + return sales_invoice + else: + jingrow.throw(_("Failed to create Sales Invoice on external site.")) + except Exception as e: + jingrow.log_error(str(e), "Error creating Sales Invoice on external site") diff --git a/jcloud/jcloud/pagetype/invoice/invoice_list.js b/jcloud/jcloud/pagetype/invoice/invoice_list.js new file mode 100644 index 0000000..bfeb769 --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice/invoice_list.js @@ -0,0 +1,14 @@ +// Copyright (c) 2020, JINGROW +// License: GNU General Public License v3. See license.txt + +jingrow.listview_settings['Invoice'] = { + get_indicator: function (pg) { + var status_color = { + Draft: 'darkgrey', + Unpaid: 'orange', + Paid: 'green', + 'Invoice Created': 'blue', + }; + return [__(pg.status), status_color[pg.status], 'status,=,' + pg.status]; + }, +}; diff --git a/jcloud/jcloud/pagetype/invoice/mark_as_uncollectible.py b/jcloud/jcloud/pagetype/invoice/mark_as_uncollectible.py new file mode 100644 index 0000000..4febe15 --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice/mark_as_uncollectible.py @@ -0,0 +1,20 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +# this should probably be run via server scripts +def execute(): + """Mark invoices that are unpaid for more than 6 months as Uncollectible""" + six_months_ago = jingrow.utils.add_to_date(None, months=-6) + invoices = jingrow.db.get_all( + "Invoice", + filters={"due_date": ("<", six_months_ago), "status": "Unpaid", "docstatus": 0}, + ) + + for inv in invoices: + invoice = jingrow.get_pg("Invoice", inv) + invoice.status = "Uncollectible" + invoice.save() diff --git a/jcloud/jcloud/pagetype/invoice/patches/set_free_credits.py b/jcloud/jcloud/pagetype/invoice/patches/set_free_credits.py new file mode 100644 index 0000000..029cc6f --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice/patches/set_free_credits.py @@ -0,0 +1,21 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.utils import update_progress_bar + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "invoice") + # only apply to invoices that has credits applied + invoices = jingrow.db.get_all( + "Invoice", {"docstatus": 1, "applied_credits": (">", 0)}, pluck="name" + ) + + total_invoices = len(invoices) + for i, inv in enumerate(invoices): + update_progress_bar("Updating invoices", i, total_invoices) + invoice = jingrow.get_pg("Invoice", inv) + invoice.compute_free_credits() + invoice.db_set("free_credits", invoice.free_credits, update_modified=False) diff --git a/jcloud/jcloud/pagetype/invoice/patches/set_transaction_details.py b/jcloud/jcloud/pagetype/invoice/patches/set_transaction_details.py new file mode 100644 index 0000000..6e7c09a --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice/patches/set_transaction_details.py @@ -0,0 +1,22 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "invoice") + invoices = jingrow.db.get_all( + "Invoice", + {"status": "Paid", "docstatus": 1, "amount_paid": (">", 0), "transaction_amount": 0}, + pluck="name", + ) + for name in invoices: + print(f"Updating transaction details for {name}") + invoice = jingrow.get_pg("Invoice", name) + invoice.flags.skip_jingrow_invoice = True + updated = invoice.update_transaction_details() + if updated: + print("✅ Done") + jingrow.db.commit() diff --git a/jcloud/jcloud/pagetype/invoice/stripe_webhook_handler.py b/jcloud/jcloud/pagetype/invoice/stripe_webhook_handler.py new file mode 100644 index 0000000..6c24fbb --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice/stripe_webhook_handler.py @@ -0,0 +1,88 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow + +from jcloud.utils import log_error + +EVENT_TYPE_MAP = { + "invoice.finalized": "Finalized", + "invoice.payment_succeeded": "Succeeded", + "invoice.payment_failed": "Failed", +} + +DISPUTE_EVENT_TYPE_MAP = { + "charge.dispute.created": "Created", + "charge.dispute.updated": "Updated", + "charge.dispute.closed": "Closed", +} + + +class StripeWebhookHandler: + """This class handles Stripe Invoice Webhook Events""" + + def __init__(self, webhook_log): + self.webhook_log = webhook_log + + def process(self): + if self.webhook_log.event_type in DISPUTE_EVENT_TYPE_MAP: + event = jingrow.parse_json(self.webhook_log.payload) + id = event["data"]["object"]["id"] + payment_intent = event["data"]["object"]["payment_intent"] + email = event["data"]["object"]["evidence"]["customer_email_address"] + reason = event["data"]["object"]["reason"] + status = event["data"]["object"]["status"] + + try: + jingrow.get_pg( + { + "pagetype": "Payment Dispute", + "event_type": DISPUTE_EVENT_TYPE_MAP[self.webhook_log.event_type], + "dispute_id": id, + "payment_intent": payment_intent, + "email": email, + "reason": reason, + "status": status, + } + ).insert() + except Exception: + log_error("Stripe Payment Dispute Event Error", event=event) + raise + + if self.webhook_log.event_type not in EVENT_TYPE_MAP: + return + + event = jingrow.parse_json(self.webhook_log.payload) + stripe_invoice = event["data"]["object"] + + if not jingrow.db.exists("Invoice", {"stripe_invoice_id": stripe_invoice["id"]}): + return + + self.invoice = jingrow.get_pg("Invoice", {"stripe_invoice_id": stripe_invoice["id"]}) + + event_type = self.webhook_log.event_type + payment_status = "Unpaid" + if event_type == "invoice.payment_succeeded" or ( + event_type == "invoice.finalized" and stripe_invoice["status"] == "paid" + ): + payment_status = "Paid" + + try: + jingrow.get_pg( + { + "pagetype": "Stripe Payment Event", + "invoice": self.invoice.name, + "team": self.invoice.team, + "event_type": EVENT_TYPE_MAP[event_type], + "payment_status": payment_status, + "stripe_invoice_object": jingrow.as_json(stripe_invoice), + "stripe_invoice_id": stripe_invoice["id"], + } + ).insert() + except Exception: + log_error("Stripe Payment Event Error", event=event) + raise + + +def handle_stripe_webhook_events(pg, method): + StripeWebhookHandler(webhook_log=pg).process() diff --git a/jcloud/jcloud/pagetype/invoice/test_invoice.py b/jcloud/jcloud/pagetype/invoice/test_invoice.py new file mode 100644 index 0000000..d204cef --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice/test_invoice.py @@ -0,0 +1,614 @@ +# Copyright (c) 2020, JINGROW +# See license.txt + + +import unittest +from unittest.mock import Mock, patch + +import jingrow +from jingrow.utils.data import add_days, today + +from jcloud.jcloud.pagetype.team.test_team import create_test_team + +from .invoice import Invoice + + +@patch.object(Invoice, "create_invoice_on_jingrowio", new=Mock()) +class TestInvoice(unittest.TestCase): + def setUp(self): + self.team = create_test_team() + + def tearDown(self): + jingrow.db.rollback() + + def test_invoice_add_usage_record(self): + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=today(), + period_end=add_days(today(), 10), + ).insert() + + for amount in [10, 20, 30]: + usage_record = jingrow.get_pg(pagetype="Usage Record", team=self.team.name, amount=amount) + usage_record.insert() + usage_record.submit() + + invoice.reload() + + self.assertEqual(len(invoice.items), 3) + self.assertEqual(invoice.total, 60) + + with patch.object(invoice, "create_stripe_invoice", return_value=None): + invoice.finalize_invoice() + + self.assertEqual(invoice.amount_due, 60) + + def test_invoice_cancel_usage_record(self): + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=today(), + period_end=add_days(today(), 10), + ).insert() + + usage_records = [] + for amount in [10, 20, 30, 40]: + usage_record = jingrow.get_pg(pagetype="Usage Record", team=self.team.name, amount=amount) + usage_record.insert() + usage_record.submit() + usage_records.append(usage_record) + + invoice.reload() + + self.assertEqual(len(invoice.items), 4) + self.assertEqual(invoice.total, 100) + + # cancel usage record + usage_records[0].cancel() + invoice.reload() + + self.assertEqual(len(invoice.items), 3) + self.assertEqual(invoice.total, 90) + self.assertEqual(usage_records[0].invoice, None) + + def test_invoice_with_credits_less_than_total(self): + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=today(), + period_end=add_days(today(), 10), + ).insert() + + for amount in [10, 20, 30]: + usage_record = jingrow.get_pg(pagetype="Usage Record", team=self.team.name, amount=amount) + usage_record.insert() + usage_record.submit() + + self.assertEqual(self.team.get_balance(), 0) + self.team.allocate_credit_amount(10, source="Free Credits") + self.assertEqual(self.team.get_balance(), 10) + + invoice.reload() + + with patch.object(invoice, "create_stripe_invoice", return_value=None): + try: + invoice.finalize_invoice() + except Exception as e: + self.assertEqual( + str(e), + "Not enough credits for this invoice. Change payment mode to Card to" + " pay using Stripe.", + ) + + self.assertEqual(self.team.get_balance(), 0) + self.assertEqual(invoice.total, 60) + self.assertEqual(invoice.amount_due, 50) + self.assertEqual(invoice.applied_credits, 10) + + def test_invoice_with_credits_more_than_total(self): + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=today(), + period_end=add_days(today(), 10), + ).insert() + + for amount in [10, 20, 30]: + usage_record = jingrow.get_pg(pagetype="Usage Record", team=self.team.name, amount=amount) + usage_record.insert() + usage_record.submit() + + self.assertEqual(self.team.get_balance(), 0) + self.team.allocate_credit_amount(70, source="Free Credits") + self.assertEqual(self.team.get_balance(), 70) + + invoice.reload() + + with patch.object(invoice, "create_stripe_invoice", return_value=None): + invoice.finalize_invoice() + + self.assertEqual(self.team.get_balance(), 10) + self.assertEqual(invoice.total, 60) + self.assertEqual(invoice.amount_due, 0) + self.assertEqual(invoice.applied_credits, 60) + + def test_invoice_credit_allocation(self): + # First Invoice + # Total: 600 + # Team has 100 Free Credits and 1000 Prepaid Credits + # Invoice can be paid using credits + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=today(), + period_end=add_days(today(), 10), + items=[{"quantity": 1, "rate": 600}], + ).insert() + + self.assertEqual(self.team.get_balance(), 0) + self.team.allocate_credit_amount(100, source="Free Credits") + self.team.allocate_credit_amount(1000, source="Prepaid Credits") + self.assertEqual(self.team.get_balance(), 1100) + invoice.reload() + + with patch.object(invoice, "create_stripe_invoice", return_value=None): + invoice.finalize_invoice() + + self.assertEqual(invoice.total, 600) + self.assertEqual(self.team.get_balance(), 1100 - 600) + self.assertEqual(invoice.amount_due, 0) + self.assertEqual(invoice.applied_credits, 600) + self.assertDictContainsSubset( + {"amount": 100, "source": "Free Credits"}, invoice.credit_allocations[0].as_dict() + ) + self.assertDictContainsSubset( + {"amount": 500, "source": "Prepaid Credits"}, invoice.credit_allocations[1].as_dict() + ) + + # Second Invoice + # Total: 700 + # Team has 500 Credits left after the first invoice + # Invoice due should be 200 + invoice2 = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=add_days(today(), 11), + items=[{"quantity": 1, "rate": 700}], + ).insert() + invoice2.reload() + + with patch.object(invoice2, "create_stripe_invoice", return_value=None): + try: + invoice2.finalize_invoice() + except Exception as e: + self.assertEqual( + str(e), + "Not enough credits for this invoice. Change payment mode to Card to" + " pay using Stripe.", + ) + + self.assertEqual(invoice2.total, 700) + self.assertEqual(invoice2.applied_credits, 500) + self.assertEqual(invoice2.amount_due, 200) + self.assertDictContainsSubset( + {"amount": 500, "source": "Prepaid Credits"}, + invoice2.credit_allocations[0].as_dict(), + ) + + def test_invoice_cancel_reverse_credit_allocation(self): + # First Invoice + # Total: 600 + # Team has 100 Free Credits and 1000 Prepaid Credits + # Invoice can be paid using credits + self.team.allocate_credit_amount(100, source="Free Credits") + self.team.allocate_credit_amount(1000, source="Prepaid Credits") + self.assertEqual(self.team.get_balance(), 1100) + + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=today(), + period_end=add_days(today(), 10), + items=[{"quantity": 1, "rate": 600}], + ).insert() + + with patch.object(invoice, "create_stripe_invoice", return_value=None): + invoice.finalize_invoice() + + self.assertEqual(invoice.total, 600) + self.assertEqual(self.team.get_balance(), 1100 - 600) + self.assertEqual(invoice.amount_due, 0) + self.assertEqual(invoice.applied_credits, 600) + self.assertDictContainsSubset( + {"amount": 100, "source": "Free Credits"}, invoice.credit_allocations[0].as_dict() + ) + self.assertDictContainsSubset( + {"amount": 500, "source": "Prepaid Credits"}, invoice.credit_allocations[1].as_dict() + ) + + # Cancel Invoice + invoice.cancel() + # Team balance should go back to 1100 + self.assertEqual(self.team.get_balance(), 1100) + + def test_intersecting_invoices(self): + invoice1 = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=jingrow.utils.today(), + period_end=jingrow.utils.add_days(jingrow.utils.today(), 5), + ).insert() + + invoice2 = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=jingrow.utils.add_days(jingrow.utils.today(), 1), + period_end=jingrow.utils.add_days(jingrow.utils.today(), 6), + ) + + invoice3 = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=jingrow.utils.today(), + period_end=jingrow.utils.add_days(jingrow.utils.today(), 5), + ) + + invoice4 = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=jingrow.utils.add_days(jingrow.utils.today(), -2), + period_end=jingrow.utils.add_days(jingrow.utils.today(), 3), + ) + + invoice5 = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=jingrow.utils.add_days(invoice1.period_end, 1), + ) + + self.assertRaises(jingrow.DuplicateEntryError, invoice2.insert) + self.assertRaises(jingrow.DuplicateEntryError, invoice3.insert) + self.assertRaises(jingrow.DuplicateEntryError, invoice4.insert) + + invoice5.insert() + + def test_prepaid_credits(self): + from pathlib import Path + + from jcloud.jcloud.pagetype.team.team import process_stripe_webhook + + self.team.db_set("stripe_customer_id", "cus_H3L4w6RXJPKLQs") + # initial balance is 0 + self.assertEqual(self.team.get_balance(), 0) + + with open( + Path(__file__).parent / "fixtures/stripe_payment_intent_succeeded_webhook.json", "r" + ) as payload: + pg = jingrow._dict({"event_type": "payment_intent.succeeded", "payload": payload.read()}) + + with patch.object(Invoice, "update_transaction_details", return_value=None): + process_stripe_webhook(pg, "") + + # balance should 755.64 after buying prepaid credits with gst applied + self.assertEqual(self.team.get_balance(), 755.64) + + def test_discount_amount(self): + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=today(), + period_end=add_days(today(), 10), + ).insert() + + invoice.append("items", {"quantity": 1, "rate": 1000, "amount": 1000, "discount": 10}) + invoice.append("items", {"quantity": 1, "rate": 1000, "amount": 1000}) + invoice.save() + invoice.reload() + + self.assertEqual(invoice.total_before_discount, 2000) + self.assertEqual(invoice.total_discount_amount, 10) + self.assertEqual(invoice.total, 2000 - 10) + + def test_discount_percentage(self): + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=today(), + period_end=add_days(today(), 10), + ).insert() + + invoice.append("items", {"quantity": 1, "rate": 1000, "amount": 1000, "discount_percentage": 10}) + invoice.append("items", {"quantity": 1, "rate": 1000, "amount": 1000}) + invoice.save() + invoice.reload() + self.assertEqual(invoice.items[0].discount, 100) + self.assertEqual(invoice.total_before_discount, 2000) + self.assertEqual(invoice.total_discount_amount, 100) + self.assertEqual(invoice.total, 2000 - 100) + + def test_finalize_invoice_with_total_zero(self): + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=today(), + period_end=add_days(today(), 10), + ).insert() + + invoice.append("items", {"quantity": 1, "rate": 0, "amount": 0}) + invoice.save() + invoice.reload() + + self.assertEqual(invoice.total, 0) + + invoice.finalize_invoice() + + # After finalize + self.assertEqual(invoice.total, 0) + self.assertEqual(invoice.status, "Empty") + + def test_finalize_invoice_for_disabled_team(self): + self.team.enabled = 0 + self.team.save() + + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + period_start=today(), + period_end=add_days(today(), 10), + ).insert() + + invoice.append("items", {"quantity": 1, "rate": 100, "amount": 100}) + invoice.save() + invoice.reload() + + invoice.finalize_invoice() + + self.assertEqual(invoice.status, "Draft") + + @patch("jcloud.api.billing.get_stripe") + def test_create_stripe_invoice_with_prepaid_credits(self, mock_stripe): + invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.team.name, + type="Prepaid Credits", + period_start=today(), + period_end=add_days(today(), 10), + ).insert() + invoice.finalize_invoice() + self.assertEqual(invoice.stripe_invoice_id, None) + + def test_negative_balance_case(self): + try: + team = create_test_team("test22@example.com") + + # add 10 credits + team.allocate_credit_amount(10, source="Prepaid Credits") + # transfer 5 credits + team.allocate_credit_amount(-5, source="Transferred Credits") + team.payment_mode = "Prepaid Credits" + team.save() + + # consume 10 credits + invoice = jingrow.get_pg(pagetype="Invoice", team=team.name) + invoice.append("items", {"quantity": 1, "rate": 10, "amount": 10}) + invoice.insert() + + # finalize invoice + invoice.finalize_invoice() + self.assertTrue(invoice.status == "Unpaid") + self.assertTrue(invoice.amount_due > 0) + + finally: + jingrow.db.delete("User", team.user) + jingrow.db.delete("Team", team.name) + jingrow.db.delete("Invoice", invoice.name) + jingrow.db.delete("Balance Transaction", {"team": team.name}) + jingrow.db.commit() + + def test_negative_balance_case_2(self): + try: + team = create_test_team("test22@example.com") + team.allocate_credit_amount(10, source="Prepaid Credits") + + invoice = jingrow.get_pg(pagetype="Invoice", team=team.name) + invoice.append("items", {"quantity": 1, "rate": 8, "amount": 8}) + invoice.insert() + invoice.finalize_invoice() + + with self.assertRaises(jingrow.ValidationError) as err: + team.allocate_credit_amount(-5, source="Transferred Credits") + self.assertTrue("is less than" in str(err.exception)) + + finally: + jingrow.db.delete("User", team.user) + jingrow.db.delete("Team", team.name) + jingrow.db.delete("Invoice", invoice.name) + jingrow.db.delete("Balance Transaction", {"team": team.name}) + jingrow.db.commit() + + def test_negative_balance_allocation(self): + try: + team = create_test_team("test22@example.com") + team.allocate_credit_amount(10, source="Prepaid Credits") + team.allocate_credit_amount(30, source="Prepaid Credits") + + with self.assertRaises(jingrow.ValidationError) as err: + team.allocate_credit_amount(-50, source="Transferred Credits") + self.assertTrue("is less than" in str(err.exception)) + + team.allocate_credit_amount(-35, source="Transferred Credits") + self.assertEqual(team.get_balance(), 5) + transactions = jingrow.get_all( + "Balance Transaction", + filters={ + "team": team.name, + "docstatus": 1, + "unallocated_amount": (">=", 0), + "source": "Prepaid Credits", + }, + fields=["name", "unallocated_amount"], + order_by="creation asc", + ) + self.assertEqual(len(transactions), 2) + self.assertEqual(transactions[0].unallocated_amount, 0) + self.assertEqual(transactions[1].unallocated_amount, 5) + + finally: + jingrow.db.delete("User", team.user) + jingrow.db.delete("Team", team.name) + jingrow.db.delete("Balance Transaction", {"team": team.name}) + jingrow.db.commit() + + def test_settle_negative_balance(self): + # create team + # allocate -100 credits + # try to settle by adding 200 credits + # the new unallocated amount should be 100 + + try: + team = create_test_team("test22@example.com") + bt = jingrow.new_pg("Balance Transaction") + bt.team = team.name + bt.amount = -100 + bt.source = "Transferred Credits" + bt.type = "Adjustment" + bt.docstatus = 1 + bt.db_insert() + + settling_transaction = team.allocate_credit_amount(200, source="Prepaid Credits") + self.assertEqual(team.get_balance(), 100) + + settling_transaction.reload() + self.assertEqual(settling_transaction.unallocated_amount, 100) + + finally: + jingrow.db.delete("User", team.user) + jingrow.db.delete("Team", team.name) + jingrow.db.delete("Balance Transaction", {"team": team.name}) + jingrow.db.commit() + + def test_invoice_for_update_after_submit_error(self): + try: + team = create_test_team("jondoe@example.com") + team.allocate_credit_amount(10, source="Free Credits") + team.payment_mode = "Prepaid Credits" + team.save() + + invoice = jingrow.new_pg("Invoice", team=team.name) + invoice.append("items", {"quantity": 5, "rate": 0.33, "amount": 1.65}) + invoice.append("items", {"quantity": 3, "rate": 2, "amount": 6, "discount_percentage": 10}) + invoice.insert() + invoice.finalize_invoice() # finalize invoice submits the pg if invoice gets settled + self.assertEqual(invoice.status, "Paid") + + before_total = invoice.total + before_total_before_discount = invoice.total_before_discount + before_total_discount_amount = invoice.total_discount_amount + invoice.validate() + invoice.save() + invoice.reload() + + after_total = invoice.total + after_total_before_discount = invoice.total_before_discount + after_total_discount_amount = invoice.total_discount_amount + self.assertEqual(before_total, after_total) + self.assertEqual(before_total_before_discount, after_total_before_discount) + self.assertEqual(before_total_discount_amount, after_total_discount_amount) + finally: + jingrow.db.delete("User", team.user) + jingrow.db.delete("Team", team.name) + jingrow.db.delete("Invoice", invoice.name) + jingrow.db.commit() + + def test_tax_without_credits(self): + try: + team = create_test_team("tax_without_credits@example.com") + jingrow.db.set_single_value("Jcloud Settings", "gst_percentage", 0.18) + + invoice = jingrow.get_pg(pagetype="Invoice", team=team.name) + invoice.append("items", {"quantity": 1, "rate": 10, "amount": 10}) + invoice.insert() + + invoice.finalize_invoice() + self.assertEqual(invoice.amount_due, 10) + self.assertEqual(invoice.amount_due_with_tax, 11.8) + + finally: + jingrow.db.set_single_value("Jcloud Settings", "gst_percentage", 0) + jingrow.db.delete("User", team.user) + jingrow.db.delete("Team", team.name) + jingrow.db.delete("Balance Transaction", {"team": team.name}) + jingrow.db.commit() + + def test_tax_with_credits(self): + """Test invoice with tax when payment mode is prepaid credits""" + try: + team = create_test_team("tax_with_credits@example.com") + team.allocate_credit_amount(5, source="Prepaid Credits") + jingrow.db.set_single_value("Jcloud Settings", "gst_percentage", 0.18) + + invoice = jingrow.get_pg(pagetype="Invoice", team=team.name) + invoice.append("items", {"quantity": 1, "rate": 10, "amount": 10}) + invoice.insert() + + invoice.finalize_invoice() + self.assertEqual(invoice.total, 10) + self.assertEqual(invoice.applied_credits, 5) + self.assertEqual(invoice.amount_due, 5) + self.assertEqual(invoice.amount_due_with_tax, 5) + + finally: + jingrow.db.set_single_value("Jcloud Settings", "gst_percentage", 0) + jingrow.db.delete("User", team.user) + jingrow.db.delete("Team", team.name) + jingrow.db.delete("Balance Transaction", {"team": team.name}) + jingrow.db.commit() + + @patch.object(Invoice, "create_stripe_invoice", new=Mock()) + def test_tax_with_credits_with_card(self): + """Test invoice with tax when payment mode is card""" + try: + team = create_test_team("tax_with_credits@example.com") + team.allocate_credit_amount(5, source="Prepaid Credits") + jingrow.db.set_value("Team", team.name, "payment_mode", "Card") + # team.reload() + jingrow.db.set_single_value("Jcloud Settings", "gst_percentage", 0.18) + + invoice = jingrow.get_pg(pagetype="Invoice", team=team.name) + invoice.append("items", {"quantity": 1, "rate": 10, "amount": 10}) + invoice.insert() + + invoice.finalize_invoice() + self.assertEqual(invoice.total, 10) + self.assertEqual(invoice.applied_credits, 5) + self.assertEqual(invoice.amount_due, 5) + self.assertEqual(invoice.amount_due_with_tax, 5.9) + + finally: + jingrow.db.set_single_value("Jcloud Settings", "gst_percentage", 0) + jingrow.db.delete("User", team.user) + jingrow.db.delete("Team", team.name) + jingrow.db.delete("Balance Transaction", {"team": team.name}) + jingrow.db.commit() + + def test_tax_for_usd_accounts(self): + try: + team = create_test_team("tax_for_usd_accounts@example.com", "United States") + jingrow.db.set_single_value("Jcloud Settings", "gst_percentage", 0.18) + + invoice = jingrow.get_pg(pagetype="Invoice", team=team.name) + invoice.append("items", {"quantity": 1, "rate": 10, "amount": 10}) + invoice.insert() + + invoice.finalize_invoice() + self.assertEqual(invoice.total, 10) + self.assertEqual(invoice.amount_due, 10) + self.assertEqual(invoice.amount_due_with_tax, 10) + + finally: + jingrow.db.set_single_value("Jcloud Settings", "gst_percentage", 0) + jingrow.db.delete("User", team.user) + jingrow.db.delete("Team", team.name) + jingrow.db.delete("Balance Transaction", {"team": team.name}) + jingrow.db.commit() diff --git a/jcloud/jcloud/pagetype/invoice_credit_allocation/__init__.py b/jcloud/jcloud/pagetype/invoice_credit_allocation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/invoice_credit_allocation/invoice_credit_allocation.json b/jcloud/jcloud/pagetype/invoice_credit_allocation/invoice_credit_allocation.json new file mode 100644 index 0000000..5630066 --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice_credit_allocation/invoice_credit_allocation.json @@ -0,0 +1,54 @@ +{ + "actions": [], + "creation": "2020-11-12 21:19:25.038731", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "transaction", + "source", + "amount", + "currency" + ], + "fields": [ + { + "fieldname": "transaction", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Transaction", + "options": "Balance Transaction" + }, + { + "fieldname": "currency", + "fieldtype": "Link", + "label": "Currency", + "options": "Currency" + }, + { + "fieldname": "amount", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Amount", + "options": "currency" + }, + { + "fieldname": "source", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Source" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2020-11-12 22:25:22.163211", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Invoice Credit Allocation", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/invoice_credit_allocation/invoice_credit_allocation.py b/jcloud/jcloud/pagetype/invoice_credit_allocation/invoice_credit_allocation.py new file mode 100644 index 0000000..c4fac8e --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice_credit_allocation/invoice_credit_allocation.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class InvoiceCreditAllocation(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + amount: DF.Currency + currency: DF.Link | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + source: DF.Data | None + transaction: DF.Link | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/invoice_discount/__init__.py b/jcloud/jcloud/pagetype/invoice_discount/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/invoice_discount/invoice_discount.json b/jcloud/jcloud/pagetype/invoice_discount/invoice_discount.json new file mode 100644 index 0000000..44c3098 --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice_discount/invoice_discount.json @@ -0,0 +1,78 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-02-23 01:00:17.427618", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "discount_type", + "based_on", + "percent", + "amount", + "via_team", + "via_items", + "note" + ], + "fields": [ + { + "default": "Flat On Total", + "fieldname": "discount_type", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Discount Type", + "options": "Flat On Total" + }, + { + "fieldname": "percent", + "fieldtype": "Percent", + "in_list_view": 1, + "label": "Percent" + }, + { + "fieldname": "amount", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Amount" + }, + { + "default": "Percent", + "fieldname": "based_on", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Based On", + "options": "Percent\nAmount" + }, + { + "allow_in_quick_entry": 1, + "default": "0", + "fieldname": "via_team", + "fieldtype": "Check", + "label": "Via Team" + }, + { + "default": "0", + "description": "This discount is calculated from invoice.items", + "fieldname": "via_items", + "fieldtype": "Check", + "label": "Via Items" + }, + { + "fieldname": "note", + "fieldtype": "Data", + "label": "Note" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-02-15 15:00:20.433287", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Invoice Discount", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/invoice_discount/invoice_discount.py b/jcloud/jcloud/pagetype/invoice_discount/invoice_discount.py new file mode 100644 index 0000000..3cc5d16 --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice_discount/invoice_discount.py @@ -0,0 +1,29 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class InvoiceDiscount(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + amount: DF.Currency + based_on: DF.Literal["Percent", "Amount"] + discount_type: DF.Literal["Flat On Total"] + note: DF.Data | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + percent: DF.Percent + via_items: DF.Check + via_team: DF.Check + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/invoice_item/__init__.py b/jcloud/jcloud/pagetype/invoice_item/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/invoice_item/invoice_item.json b/jcloud/jcloud/pagetype/invoice_item/invoice_item.json new file mode 100644 index 0000000..a890fe3 --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice_item/invoice_item.json @@ -0,0 +1,111 @@ +{ + "actions": [], + "creation": "2020-07-17 18:04:33.958085", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "document_type", + "document_name", + "plan", + "description", + "quantity", + "rate", + "amount", + "discount", + "discount_percentage", + "site", + "has_marketplace_payout_completed" + ], + "fields": [ + { + "fieldname": "description", + "fieldtype": "Data", + "label": "Description" + }, + { + "columns": 1, + "default": "1", + "fieldname": "quantity", + "fieldtype": "Float", + "in_list_view": 1, + "label": "Quantity" + }, + { + "fieldname": "rate", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Rate", + "options": "currency", + "reqd": 1 + }, + { + "fieldname": "amount", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Amount", + "options": "currency" + }, + { + "fieldname": "document_type", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Document Type", + "options": "PageType", + "search_index": 1 + }, + { + "fieldname": "document_name", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Document Name", + "options": "document_type" + }, + { + "columns": 1, + "fieldname": "plan", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Plan" + }, + { + "fieldname": "site", + "fieldtype": "Link", + "label": "Site", + "options": "Site", + "search_index": 1 + }, + { + "default": "0", + "fieldname": "has_marketplace_payout_completed", + "fieldtype": "Check", + "label": "Has Marketplace Payout Completed?" + }, + { + "default": "0", + "fieldname": "discount", + "fieldtype": "Currency", + "label": "Discount", + "options": "currency" + }, + { + "fieldname": "discount_percentage", + "fieldtype": "Percent", + "label": "Discount (%)" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-11-06 20:44:24.686991", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Invoice Item", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/invoice_item/invoice_item.py b/jcloud/jcloud/pagetype/invoice_item/invoice_item.py new file mode 100644 index 0000000..a126eed --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice_item/invoice_item.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class InvoiceItem(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + amount: DF.Currency + description: DF.Data | None + discount: DF.Currency + discount_percentage: DF.Percent + document_name: DF.DynamicLink | None + document_type: DF.Link | None + has_marketplace_payout_completed: DF.Check + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + plan: DF.Data | None + quantity: DF.Float + rate: DF.Currency + site: DF.Link | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/invoice_transaction_fee/__init__.py b/jcloud/jcloud/pagetype/invoice_transaction_fee/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/invoice_transaction_fee/invoice_transaction_fee.json b/jcloud/jcloud/pagetype/invoice_transaction_fee/invoice_transaction_fee.json new file mode 100644 index 0000000..22703df --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice_transaction_fee/invoice_transaction_fee.json @@ -0,0 +1,46 @@ +{ + "actions": [], + "creation": "2020-11-24 19:46:04.697275", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "description", + "amount", + "currency" + ], + "fields": [ + { + "fieldname": "description", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Description" + }, + { + "fieldname": "amount", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Amount", + "options": "currency" + }, + { + "fieldname": "currency", + "fieldtype": "Link", + "label": "Currency", + "options": "Currency" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2020-11-24 20:55:02.468579", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Invoice Transaction Fee", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/invoice_transaction_fee/invoice_transaction_fee.py b/jcloud/jcloud/pagetype/invoice_transaction_fee/invoice_transaction_fee.py new file mode 100644 index 0000000..7f5eb69 --- /dev/null +++ b/jcloud/jcloud/pagetype/invoice_transaction_fee/invoice_transaction_fee.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class InvoiceTransactionFee(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + amount: DF.Currency + currency: DF.Link | None + description: DF.Data | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/jcloud_feedback/__init__.py b/jcloud/jcloud/pagetype/jcloud_feedback/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_feedback/jcloud_feedback.js b/jcloud/jcloud/pagetype/jcloud_feedback/jcloud_feedback.js new file mode 100644 index 0000000..1586b1f --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_feedback/jcloud_feedback.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Jcloud Feedback', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/jcloud_feedback/jcloud_feedback.json b/jcloud/jcloud/pagetype/jcloud_feedback/jcloud_feedback.json new file mode 100644 index 0000000..c201104 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_feedback/jcloud_feedback.json @@ -0,0 +1,105 @@ +{ + "actions": [], + "creation": "2020-08-10 18:57:08.745042", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "message", + "route", + "team_created_on", + "currency", + "last_paid_invoice", + "column_break_vcbh", + "rating", + "note" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "message", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Message", + "reqd": 1 + }, + { + "fieldname": "route", + "fieldtype": "Data", + "label": "Route" + }, + { + "fieldname": "column_break_vcbh", + "fieldtype": "Column Break" + }, + { + "fieldname": "note", + "fieldtype": "Small Text", + "label": "Note" + }, + { + "fieldname": "rating", + "fieldtype": "Rating", + "label": "Rating" + }, + { + "fieldname": "team_created_on", + "fieldtype": "Date", + "label": "Team Created On" + }, + { + "fieldname": "currency", + "fieldtype": "Link", + "label": "Currency", + "options": "Currency" + }, + { + "fieldname": "last_paid_invoice", + "fieldtype": "Currency", + "label": "Last Paid Invoice" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-02-03 15:48:45.990977", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Feedback", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin" + }, + { + "create": 1, + "role": "Jcloud Member" + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "team", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_feedback/jcloud_feedback.py b/jcloud/jcloud/pagetype/jcloud_feedback/jcloud_feedback.py new file mode 100644 index 0000000..dfe3030 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_feedback/jcloud_feedback.py @@ -0,0 +1,28 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +# import jingrow +from jingrow.model.document import Document + + +class JcloudFeedback(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + currency: DF.Link | None + last_paid_invoice: DF.Currency + message: DF.Data + note: DF.SmallText | None + rating: DF.Rating + route: DF.Data | None + team: DF.Link + team_created_on: DF.Date | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/jcloud_feedback/test_jcloud_feedback.py b/jcloud/jcloud/pagetype/jcloud_feedback/test_jcloud_feedback.py new file mode 100644 index 0000000..355edb4 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_feedback/test_jcloud_feedback.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestJcloudFeedback(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_job/__init__.py b/jcloud/jcloud/pagetype/jcloud_job/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_job/jcloud_job.js b/jcloud/jcloud/pagetype/jcloud_job/jcloud_job.js new file mode 100644 index 0000000..b4e4b1f --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job/jcloud_job.js @@ -0,0 +1,24 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Jcloud Job', { + refresh: function (frm) { + [ + [__('Force Continue'), 'force_continue', frm.pg.status === 'Failure'], + [__('Force Fail'), 'force_fail', frm.pg.status === 'Running'], + ].forEach(([label, method, condition]) => { + if (condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => frm.call(method).then(() => frm.refresh()), + ); + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/jcloud_job/jcloud_job.json b/jcloud/jcloud/pagetype/jcloud_job/jcloud_job.json new file mode 100644 index 0000000..9cf3052 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job/jcloud_job.json @@ -0,0 +1,152 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2022-09-28 17:46:37.964087", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "job_type", + "status", + "column_break_3", + "start", + "end", + "duration", + "section_break_7", + "server_type", + "server", + "virtual_machine", + "column_break_11", + "arguments" + ], + "fields": [ + { + "fieldname": "job_type", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Job Type", + "options": "Jcloud Job Type", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nSkipped\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "default": "{}", + "fieldname": "arguments", + "fieldtype": "Code", + "label": "Arguments", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "duration", + "fieldtype": "Duration", + "in_list_view": 1, + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "section_break_7", + "fieldtype": "Section Break" + }, + { + "fieldname": "server_type", + "fieldtype": "Link", + "label": "Server Type", + "options": "PageType", + "read_only": 1 + }, + { + "fieldname": "server", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Server", + "options": "server_type", + "read_only": 1 + }, + { + "fetch_if_empty": 1, + "fieldname": "virtual_machine", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Virtual Machine", + "options": "Virtual Machine", + "read_only": 1 + }, + { + "fieldname": "column_break_11", + "fieldtype": "Column Break" + } + ], + "in_create": 1, + "index_web_pages_for_search": 1, + "links": [ + { + "link_pagetype": "Jcloud Job Step", + "link_fieldname": "job" + } + ], + "modified": "2024-08-05 16:56:38.861601", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Job", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_job/jcloud_job.py b/jcloud/jcloud/pagetype/jcloud_job/jcloud_job.py new file mode 100644 index 0000000..dcac2e3 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job/jcloud_job.py @@ -0,0 +1,165 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import json + +import jingrow +from jingrow.model.document import Document + + +class JcloudJob(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + arguments: DF.Code + duration: DF.Duration | None + end: DF.Datetime | None + job_type: DF.Link + name: DF.Int | None + server: DF.DynamicLink | None + server_type: DF.Link | None + start: DF.Datetime | None + status: DF.Literal["Pending", "Running", "Skipped", "Success", "Failure"] + virtual_machine: DF.Link | None + # end: auto-generated types + + def before_insert(self): + jingrow.db.get_value(self.server_type, self.server, "status", for_update=True) + if existing_jobs := jingrow.db.get_all( + self.pagetype, + { + "status": ("in", ["Pending", "Running"]), + "server_type": self.server_type, + "server": self.server, + }, + ["job_type", "status"], + ): + jingrow.throw( + f"A {existing_jobs[0].job_type} job is already {existing_jobs[0].status}. Please wait for the same." + ) + + def after_insert(self): + self.create_jcloud_job_steps() + self.execute() + + def on_update(self): + self.publish_update() + + def create_jcloud_job_steps(self): + job_type = jingrow.get_pg("Jcloud Job Type", self.job_type) + for step in job_type.steps: + pg = jingrow.get_pg( + { + "pagetype": "Jcloud Job Step", + "job": self.name, + "status": "Pending", + "job_type": self.job_type, + "step_name": step.step_name, + "wait_until_true": step.wait_until_true, + } + ) + pg.insert() + + def execute(self): + self.status = "Running" + self.start = jingrow.utils.now_datetime() + self.save() + self.next() + + def fail(self, arguments=None): + self.status = "Failure" + pending_steps = jingrow.get_all( + "Jcloud Job Step", {"job": self.name, "status": "Pending"} + ) + for step in pending_steps: + jingrow.db.set_value("Jcloud Job Step", step.name, "status", "Skipped") + self.end = jingrow.utils.now_datetime() + self.duration = (self.end - self.start).total_seconds() + self.save() + + def succeed(self): + self.status = "Success" + self.end = jingrow.utils.now_datetime() + self.duration = (self.end - self.start).total_seconds() + self.save() + + @jingrow.whitelist() + def next(self, arguments=None): + if arguments: + old_arguments = json.loads(self.arguments) + old_arguments.update(arguments) + self.arguments = json.dumps(old_arguments, indent=2) + self.status = "Running" + self.save() + next_step = self.next_step + + if not next_step: + self.succeed() + return + + jingrow.enqueue_pg("Jcloud Job Step", next_step, "execute", enqueue_after_commit=True) + + @jingrow.whitelist() + def force_continue(self): + for step in jingrow.get_all( + "Jcloud Job Step", + {"job": self.name, "status": ("in", ("Failure", "Skipped"))}, + pluck="name", + ): + jingrow.db.set_value("Jcloud Job Step", step, "status", "Pending") + self.next() + + @jingrow.whitelist() + def force_fail(self): + for step in jingrow.get_all( + "Jcloud Job Step", + {"job": self.name, "status": "Pending"}, + pluck="name", + ): + jingrow.db.set_value("Jcloud Job Step", step, "status", "Failure") + jingrow.db.set_value("Jcloud Job", self.name, "status", "Failure") + + @property + def next_step(self): + return jingrow.db.get_value( + "Jcloud Job Step", + {"job": self.name, "status": "Pending"}, + "name", + order_by="name asc", + as_dict=True, + ) + + def detail(self): + steps = jingrow.get_all( + "Jcloud Job Step", + filters={"job": self.name}, + fields=["name", "step_name", "status", "start", "end", "duration"], + order_by="name asc", + ) + + for index, step in enumerate(steps): + if step.status == "Pending" and index and steps[index - 1].status == "Success": + step.status = "Running" + + return { + "name": self.name, + "job_type": self.job_type, + "server": self.server, + "server_type": self.server_type, + "virtual_machine": self.virtual_machine, + "status": self.status, + "steps": steps, + } + + def publish_update(self): + jingrow.publish_realtime( + "jcloud_job_update", pagetype=self.pagetype, docname=self.name, message=self.detail() + ) + + def on_trash(self): + jingrow.db.delete("Jcloud Job Step", {"job": self.name}) diff --git a/jcloud/jcloud/pagetype/jcloud_job/test_jcloud_job.py b/jcloud/jcloud/pagetype/jcloud_job/test_jcloud_job.py new file mode 100644 index 0000000..8fb5b95 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job/test_jcloud_job.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJcloudJob(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_job_step/__init__.py b/jcloud/jcloud/pagetype/jcloud_job_step/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_job_step/jcloud_job_step.js b/jcloud/jcloud/pagetype/jcloud_job_step/jcloud_job_step.js new file mode 100644 index 0000000..34cd277 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_step/jcloud_job_step.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Jcloud Job Step', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/jcloud_job_step/jcloud_job_step.json b/jcloud/jcloud/pagetype/jcloud_job_step/jcloud_job_step.json new file mode 100644 index 0000000..739eec5 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_step/jcloud_job_step.json @@ -0,0 +1,153 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2022-09-28 17:46:17.371345", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "step_name", + "job", + "job_type", + "status", + "column_break_4", + "start", + "end", + "duration", + "wait_until_true", + "attempts", + "section_break_8", + "result", + "traceback" + ], + "fields": [ + { + "fieldname": "step_name", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Step Name", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nRunning\nSkipped\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "job", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Job ", + "options": "Jcloud Job", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "duration", + "fieldtype": "Duration", + "in_list_view": 1, + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break" + }, + { + "fieldname": "result", + "fieldtype": "Code", + "label": "Result", + "read_only": 1 + }, + { + "fieldname": "traceback", + "fieldtype": "Code", + "label": "Traceback", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "wait_until_true", + "fieldtype": "Check", + "label": "Wait Until True", + "read_only": 1 + }, + { + "fieldname": "attempts", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Attempts", + "read_only": 1 + }, + { + "fieldname": "job_type", + "fieldtype": "Link", + "label": "Job Type", + "options": "Jcloud Job Type", + "reqd": 1 + } + ], + "in_create": 1, + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-08-05 16:56:46.525168", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Job Step", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "step_name", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_job_step/jcloud_job_step.py b/jcloud/jcloud/pagetype/jcloud_job_step/jcloud_job_step.py new file mode 100644 index 0000000..8838799 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_step/jcloud_job_step.py @@ -0,0 +1,76 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import json + +import jingrow +from jingrow.model.document import Document +from jingrow.utils.safe_exec import safe_exec + + +class JcloudJobStep(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + attempts: DF.Int + duration: DF.Duration | None + end: DF.Datetime | None + job: DF.Link + job_type: DF.Link + name: DF.Int | None + result: DF.Code | None + start: DF.Datetime | None + status: DF.Literal["Pending", "Running", "Skipped", "Success", "Failure"] + step_name: DF.Data + traceback: DF.Code | None + wait_until_true: DF.Check + # end: auto-generated types + + @jingrow.whitelist() + def execute(self): + if not self.start: + self.start = jingrow.utils.now_datetime() + self.status = "Running" + script = jingrow.db.get_value( + "Jcloud Job Type Step", + {"parent": self.job_type, "step_name": self.step_name}, + "script", + ) + job = jingrow.get_pg("Jcloud Job", self.job) + arguments = json.loads(job.arguments) + try: + local = {"arguments": jingrow._dict(arguments), "result": None, "pg": job} + safe_exec(script, _locals=local) + result = local["result"] + + if self.wait_until_true: + self.attempts = self.attempts + 1 + if result[0]: + self.status = "Success" + elif result[1]: + self.status = "Failure" + else: + self.status = "Pending" + import time + + time.sleep(1) + else: + self.status = "Success" + self.result = str(result) + except Exception: + self.status = "Failure" + self.traceback = jingrow.get_traceback(with_context=True) + + self.end = jingrow.utils.now_datetime() + self.duration = (self.end - self.start).total_seconds() + self.save() + + if self.status == "Failure": + job.fail(local["arguments"]) + else: + job.next(local["arguments"]) diff --git a/jcloud/jcloud/pagetype/jcloud_job_step/test_jcloud_job_step.py b/jcloud/jcloud/pagetype/jcloud_job_step/test_jcloud_job_step.py new file mode 100644 index 0000000..3d6ff00 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_step/test_jcloud_job_step.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJcloudJobStep(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_job_type/__init__.py b/jcloud/jcloud/pagetype/jcloud_job_type/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_job_type/jcloud_job_type.js b/jcloud/jcloud/pagetype/jcloud_job_type/jcloud_job_type.js new file mode 100644 index 0000000..4f69848 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_type/jcloud_job_type.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Jcloud Job Type', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/jcloud_job_type/jcloud_job_type.json b/jcloud/jcloud/pagetype/jcloud_job_type/jcloud_job_type.json new file mode 100644 index 0000000..5588ab9 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_type/jcloud_job_type.json @@ -0,0 +1,46 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "prompt", + "creation": "2022-09-28 17:42:30.798157", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "steps" + ], + "fields": [ + { + "fieldname": "steps", + "fieldtype": "Table", + "label": "Steps", + "options": "Jcloud Job Type Step", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-09-28 17:51:52.412510", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Job Type", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_job_type/jcloud_job_type.py b/jcloud/jcloud/pagetype/jcloud_job_type/jcloud_job_type.py new file mode 100644 index 0000000..d959780 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_type/jcloud_job_type.py @@ -0,0 +1,24 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class JcloudJobType(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.jcloud_job_type_step.jcloud_job_type_step import ( + JcloudJobTypeStep, + ) + + steps: DF.Table[JcloudJobTypeStep] + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/jcloud_job_type/test_jcloud_job_type.py b/jcloud/jcloud/pagetype/jcloud_job_type/test_jcloud_job_type.py new file mode 100644 index 0000000..3e9d4db --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_type/test_jcloud_job_type.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJcloudJobType(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_job_type_step/__init__.py b/jcloud/jcloud/pagetype/jcloud_job_type_step/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_job_type_step/jcloud_job_type_step.js b/jcloud/jcloud/pagetype/jcloud_job_type_step/jcloud_job_type_step.js new file mode 100644 index 0000000..73829d8 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_type_step/jcloud_job_type_step.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Jcloud Job Type Step', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/jcloud_job_type_step/jcloud_job_type_step.json b/jcloud/jcloud/pagetype/jcloud_job_type_step/jcloud_job_type_step.json new file mode 100644 index 0000000..9fba649 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_type_step/jcloud_job_type_step.json @@ -0,0 +1,58 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-09-28 17:41:09.642406", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "step_name", + "column_break_2", + "wait_until_true", + "section_break_4", + "script" + ], + "fields": [ + { + "fieldname": "step_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Step Name", + "reqd": 1 + }, + { + "fieldname": "script", + "fieldtype": "Code", + "label": "Script", + "options": "Python", + "reqd": 1 + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "wait_until_true", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Wait Until True" + }, + { + "fieldname": "section_break_4", + "fieldtype": "Section Break" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-09-30 16:13:05.783052", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Job Type Step", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_job_type_step/jcloud_job_type_step.py b/jcloud/jcloud/pagetype/jcloud_job_type_step/jcloud_job_type_step.py new file mode 100644 index 0000000..6fa461f --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_type_step/jcloud_job_type_step.py @@ -0,0 +1,25 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class JcloudJobTypeStep(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + script: DF.Code + step_name: DF.Data + wait_until_true: DF.Check + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/jcloud_job_type_step/test_jcloud_job_type_step.py b/jcloud/jcloud/pagetype/jcloud_job_type_step/test_jcloud_job_type_step.py new file mode 100644 index 0000000..a0028b6 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_job_type_step/test_jcloud_job_type_step.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJcloudJobTypeStep(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_method_permission/__init__.py b/jcloud/jcloud/pagetype/jcloud_method_permission/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_method_permission/jcloud_method_permission.js b/jcloud/jcloud/pagetype/jcloud_method_permission/jcloud_method_permission.js new file mode 100644 index 0000000..6fb18cd --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_method_permission/jcloud_method_permission.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Jcloud Method Permission", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/jcloud_method_permission/jcloud_method_permission.json b/jcloud/jcloud/pagetype/jcloud_method_permission/jcloud_method_permission.json new file mode 100644 index 0000000..3caf11b --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_method_permission/jcloud_method_permission.json @@ -0,0 +1,67 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-08-09 21:22:15.511701", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "document_type", + "checkbox_label", + "method" + ], + "fields": [ + { + "fieldname": "document_type", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Document Type", + "options": "PageType", + "reqd": 1 + }, + { + "fieldname": "method", + "fieldtype": "Data", + "in_filter": 1, + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Method", + "reqd": 1 + }, + { + "fieldname": "checkbox_label", + "fieldtype": "Data", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Checkbox Label", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-09-25 21:33:32.729651", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Method Permission", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_method_permission/jcloud_method_permission.py b/jcloud/jcloud/pagetype/jcloud_method_permission/jcloud_method_permission.py new file mode 100644 index 0000000..f29f4f0 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_method_permission/jcloud_method_permission.py @@ -0,0 +1,39 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + + +class JcloudMethodPermission(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + checkbox_label: DF.Data + document_type: DF.Link + method: DF.Data + # end: auto-generated types + + pass + + +def available_actions(): + result = {} + doctypes = jingrow.get_all( + "Jcloud Method Permission", pluck="document_type", distinct=True + ) + + for pagetype in doctypes: + result[pagetype] = { + perm["checkbox_label"]: perm["method"] + for perm in jingrow.get_all( + "Jcloud Method Permission", {"document_type": pagetype}, ["checkbox_label", "method"] + ) + } + + return result diff --git a/jcloud/jcloud/pagetype/jcloud_method_permission/test_jcloud_method_permission.py b/jcloud/jcloud/pagetype/jcloud_method_permission/test_jcloud_method_permission.py new file mode 100644 index 0000000..704b3d7 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_method_permission/test_jcloud_method_permission.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJcloudMethodPermission(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_notification/__init__.py b/jcloud/jcloud/pagetype/jcloud_notification/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_notification/jcloud_notification.js b/jcloud/jcloud/pagetype/jcloud_notification/jcloud_notification.js new file mode 100644 index 0000000..3383199 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_notification/jcloud_notification.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Jcloud Notification", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/jcloud_notification/jcloud_notification.json b/jcloud/jcloud/pagetype/jcloud_notification/jcloud_notification.json new file mode 100644 index 0000000..ec32afc --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_notification/jcloud_notification.json @@ -0,0 +1,208 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-07-05 10:54:14.431684", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "document_type", + "reference_pagetype", + "column_break_brxc", + "type", + "document_name", + "reference_name", + "interaction_section", + "is_actionable", + "read", + "column_break_rada", + "is_addressed", + "message_section", + "title", + "message", + "traceback", + "assistance_url", + "class" + ], + "fields": [ + { + "default": "0", + "fieldname": "read", + "fieldtype": "Check", + "label": "Read" + }, + { + "fieldname": "type", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Type", + "options": "Site Update\nSite Migrate\nVersion Upgrade\nBench Deploy\nSite Recovery\nAgent Job Failure\nDowntime/Performance", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "message", + "fieldtype": "Long Text", + "label": "Message", + "read_only": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "To Team", + "options": "Team", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "document_type", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Document Type", + "options": "PageType", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "document_name", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Document Name", + "options": "document_type", + "read_only": 1, + "reqd": 1 + }, + { + "depends_on": "eval:pg.traceback", + "fieldname": "traceback", + "fieldtype": "Code", + "label": "Traceback", + "read_only": 1 + }, + { + "fieldname": "column_break_brxc", + "fieldtype": "Column Break" + }, + { + "fieldname": "interaction_section", + "fieldtype": "Section Break", + "label": "User Interaction" + }, + { + "default": "0", + "depends_on": "eval:pg.is_actionable", + "description": "Actionable notifications can be rectified by the user. For example if a deploy breaks due to incompatible app version.", + "fieldname": "is_actionable", + "fieldtype": "Check", + "label": "Is Actionable", + "read_only": 1 + }, + { + "fieldname": "column_break_rada", + "fieldtype": "Column Break" + }, + { + "fieldname": "message_section", + "fieldtype": "Section Break", + "label": "Message" + }, + { + "depends_on": "eval:pg.title", + "fieldname": "title", + "fieldtype": "Small Text", + "label": "Title", + "read_only": 1 + }, + { + "depends_on": "eval:pg.assistance_url", + "description": "Meant to be used if the notification is actionable. The URL can point to documentation on how to resolve the issue.", + "fieldname": "assistance_url", + "fieldtype": "Data", + "label": "Assitance URL", + "read_only": 1 + }, + { + "default": "Info", + "fieldname": "class", + "fieldtype": "Select", + "label": "Class", + "options": "Info\nSuccess\nWarning\nError", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval:pg.is_actionable", + "description": "Actionable notifications can be addressed by the user.\n\nUsed to prevent an action until the issue has been addressed. For example preventing further deploy until an incompatible app version has been fixed.", + "fieldname": "is_addressed", + "fieldtype": "Check", + "label": "Is Addressed", + "read_only": 1 + }, + { + "description": "For filtering notifications in the dashboard", + "fieldname": "reference_pagetype", + "fieldtype": "Link", + "hidden": 1, + "label": "Reference PageType", + "options": "PageType" + }, + { + "description": "For filtering notifications in the dashboard", + "fieldname": "reference_name", + "fieldtype": "Dynamic Link", + "hidden": 1, + "label": "Reference Name", + "options": "reference_pagetype" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-10-31 09:50:25.986886", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Notification", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_notification/jcloud_notification.py b/jcloud/jcloud/pagetype/jcloud_notification/jcloud_notification.py new file mode 100644 index 0000000..c3d38ae --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_notification/jcloud_notification.py @@ -0,0 +1,119 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + +from jcloud.api.client import dashboard_whitelist + + +class JcloudNotification(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + assistance_url: DF.Data | None + document_name: DF.DynamicLink + document_type: DF.Link + is_actionable: DF.Check + is_addressed: DF.Check + message: DF.LongText | None + read: DF.Check + reference_pagetype: DF.Link | None + reference_name: DF.DynamicLink | None + team: DF.Link + title: DF.SmallText | None + traceback: DF.Code | None + type: DF.Literal[ + "Site Update", + "Site Migrate", + "Version Upgrade", + "Bench Deploy", + "Site Recovery", + "Agent Job Failure", + "Downtime/Performance", + ] + # end: auto-generated types + + dashboard_fields = ( + "team", + "document_type", + "class", + "type", + "document_name", + "is_actionable", + "read", + "is_addressed", + "title", + "message", + "traceback", + "assistance_url", + ) + + def after_insert(self): + if jingrow.local.dev_server: + return + + user = jingrow.db.get_value("Team", self.team, "user") + if user == "Administrator": + return + + if self.type == "Bench Deploy": + self.send_bench_deploy_failed(user) + + def send_bench_deploy_failed(self, user: str): + group_name = jingrow.db.get_value("Deploy Candidate", self.document_name, "group") + rg_title = jingrow.db.get_value("Release Group", group_name, "title") + + jingrow.sendmail( + recipients=[user], + subject=f"Bench Deploy Failed - {rg_title}", + template="bench_deploy_failure", + args={ + "message": self.title, + "link": f"dashboard/groups/{group_name}/deploys/{self.document_name}", + }, + ) + + @dashboard_whitelist() + def mark_as_addressed(self): + self.read = True + self.is_addressed = True + self.save() + jingrow.db.commit() + + @dashboard_whitelist() + def mark_as_read(self): + self.db_set("read", True) + + +def create_new_notification(team, type, document_type, document_name, message): + if not jingrow.db.exists("Jcloud Notification", {"document_name": document_name}): + if document_type == "Agent Job": + reference_pagetype = "Site" + reference_pg = jingrow.db.get_value("Agent Job", document_name, "site") + if not reference_pg: + reference_pagetype = "Server" + reference_pg = jingrow.db.get_value("Agent Job", document_name, "server") + elif document_type == "Deploy Candidate": + reference_pagetype = "Release Group" + reference_pg = jingrow.db.get_value("Deploy Candidate", document_name, "group") + + jingrow.get_pg( + { + "pagetype": "Jcloud Notification", + "team": team, + "type": type, + "document_type": document_type, + "document_name": document_name or 0, + "message": message, + "reference_pagetype": reference_pagetype, + "reference_name": reference_pg, + } + ).insert() + jingrow.publish_realtime("jcloud_notification", pagetype="Jcloud Notification", message={"team": team}) diff --git a/jcloud/jcloud/pagetype/jcloud_notification/patches/link_reference_pagetype_to_notifications.py b/jcloud/jcloud/pagetype/jcloud_notification/patches/link_reference_pagetype_to_notifications.py new file mode 100644 index 0000000..de6dfbc --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_notification/patches/link_reference_pagetype_to_notifications.py @@ -0,0 +1,33 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow +from tqdm import tqdm + + +def execute(): + notifications = jingrow.db.get_all( + "Jcloud Notification", ["name", "document_type", "document_name"] + ) + for notification in tqdm(notifications): + if notification.document_type == "Agent Job": + reference_pagetype = "Site" + reference_pg = jingrow.db.get_value("Agent Job", notification.document_name, "site") + if not reference_pg: + reference_pagetype = "Server" + reference_pg = jingrow.db.get_value( + "Agent Job", notification.document_name, "server" + ) + + elif notification.document_type == "Deploy Candidate": + reference_pagetype = "Release Group" + reference_pg = jingrow.db.get_value( + "Deploy Candidate", notification.document_name, "group" + ) + + jingrow.db.set_value( + "Jcloud Notification", + notification.name, + {"reference_pagetype": reference_pagetype, "reference_name": reference_pg}, + update_modified=False, + ) diff --git a/jcloud/jcloud/pagetype/jcloud_notification/test_jcloud_notification.py b/jcloud/jcloud/pagetype/jcloud_notification/test_jcloud_notification.py new file mode 100644 index 0000000..8892980 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_notification/test_jcloud_notification.py @@ -0,0 +1,53 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.api.notifications import get_unread_count +from jcloud.jcloud.pagetype.agent_job.agent_job import poll_pending_jobs +from jcloud.jcloud.pagetype.agent_job.test_agent_job import fake_agent_job +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.deploy_candidate_difference.test_deploy_candidate_difference import ( + create_test_deploy_candidate_differences, +) +from jcloud.jcloud.pagetype.release_group.test_release_group import ( + create_test_release_group, +) +from jcloud.jcloud.pagetype.site.test_site import create_test_bench, create_test_site + + +class TestJcloudNotification(JingrowTestCase): + def setUp(self): + app1 = create_test_app() # jingrow + app2 = create_test_app("app2", "App 2") + app3 = create_test_app("app3", "App 3") + self.apps = [app1, app2, app3] + + def tearDown(self): + jingrow.db.rollback() + + def test_notification_is_created_when_agent_job_fails(self): + group = create_test_release_group(self.apps) + bench1 = create_test_bench(group=group) + bench2 = create_test_bench(group=group, server=bench1.server) + + create_test_deploy_candidate_differences( + bench2.candidate + ) # for site update to be available + + site = create_test_site(bench=bench1.name) + + self.assertEqual(jingrow.db.count("Jcloud Notification"), 0) + with fake_agent_job("Update Site Pull", "Failure",), fake_agent_job( + "Recover Failed Site Update", + "Success", + ): + site.schedule_update() + poll_pending_jobs() + + notification = jingrow.get_last_pg("Jcloud Notification") + self.assertEqual(notification.type, "Site Update") + # api test is added here since it's trivial + # move to separate file if it gets more complex + self.assertEqual(get_unread_count(), 1) diff --git a/jcloud/jcloud/pagetype/jcloud_permission_group/__init__.py b/jcloud/jcloud/pagetype/jcloud_permission_group/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_permission_group/jcloud_permission_group.js b/jcloud/jcloud/pagetype/jcloud_permission_group/jcloud_permission_group.js new file mode 100644 index 0000000..3528ff8 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_permission_group/jcloud_permission_group.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Jcloud Permission Group", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/jcloud_permission_group/jcloud_permission_group.json b/jcloud/jcloud/pagetype/jcloud_permission_group/jcloud_permission_group.json new file mode 100644 index 0000000..156666a --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_permission_group/jcloud_permission_group.json @@ -0,0 +1,103 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-08-07 12:47:43.330395", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "column_break_kbnh", + "title", + "section_break_rknu", + "users", + "permissions" + ], + "fields": [ + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title", + "reqd": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "users", + "fieldtype": "Table", + "label": "Users", + "options": "Jcloud Permission Group User" + }, + { + "fieldname": "column_break_kbnh", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_rknu", + "fieldtype": "Section Break" + }, + { + "fieldname": "permissions", + "fieldtype": "JSON", + "label": "Permissions" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-03-27 06:03:36.540752", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Permission Group", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "title", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_permission_group/jcloud_permission_group.py b/jcloud/jcloud/pagetype/jcloud_permission_group/jcloud_permission_group.py new file mode 100644 index 0000000..0427b80 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_permission_group/jcloud_permission_group.py @@ -0,0 +1,357 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + +from jcloud.api.client import dashboard_whitelist + +DEFAULT_PERMISSIONS = { + "*": {"*": {"*": True}} # all doctypes # all documents # all methods +} + + +class JcloudPermissionGroup(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.jcloud_permission_group_user.jcloud_permission_group_user import ( + JcloudPermissionGroupUser, + ) + + permissions: DF.JSON | None + team: DF.Link + title: DF.Data + users: DF.Table[JcloudPermissionGroupUser] + # end: auto-generated types + + dashboard_fields = ["title", "users"] + + def get_pg(self, pg): + if pg.users: + values = { + d.name: d + for d in jingrow.db.get_all( + "User", + filters={"name": ["in", [user.user for user in pg.users]]}, + fields=["name", "full_name", "user_image"], + ) + } + pg.users = [d.as_dict() for d in pg.users] + for user in pg.users: + user.full_name = values.get(user.user, {}).get("full_name") + user.user_image = values.get(user.user, {}).get("user_image") + return pg + + def validate(self): + self.validate_permissions() + self.validate_users() + + def validate_permissions(self): + permissions = jingrow.parse_json(self.permissions) + if not permissions: + self.permissions = DEFAULT_PERMISSIONS + return + + for pagetype, pagetype_perms in permissions.items(): + if pagetype not in get_all_restrictable_doctypes() and pagetype != "*": + jingrow.throw(f"{pagetype} is not a valid pagetype.") + + if not isinstance(pagetype_perms, dict): + jingrow.throw( + f"Invalid perms for {pagetype}. Rule must be key-value pairs of document name and document perms." + ) + + for pg_name, pg_perms in pagetype_perms.items(): + if not isinstance(pg_perms, dict): + jingrow.throw( + f"Invalid perms for {pagetype} {pg_name}. Rule must be key-value pairs of method and permission." + ) + + if pagetype == "*": + continue + + restrictable_methods = get_all_restrictable_methods(pagetype) + if not restrictable_methods: + jingrow.throw(f"{pagetype} does not have any restrictable methods.") + + for method, permitted in pg_perms.items(): + if method != "*" and method not in restrictable_methods: + jingrow.throw(f"{method} is not a restrictable method of {pagetype}") + + def validate_users(self): + for user in self.users: + if user.user == "Administrator": + continue + user_belongs_to_team = jingrow.db.exists( + "Team Member", {"parent": self.team, "user": user.user} + ) + if not user_belongs_to_team: + jingrow.throw(f"{user.user} does not belong to {self.team}") + + @dashboard_whitelist() + def delete(self): + super().delete() + + @dashboard_whitelist() + def get_users(self): + user_names = [user.user for user in self.users] + if not user_names: + return [] + + return jingrow.db.get_all( + "User", + filters={"name": ["in", user_names], "enabled": 1}, + fields=[ + "name", + "first_name", + "last_name", + "full_name", + "user_image", + "name as email", + ], + ) + + @dashboard_whitelist() + def add_user(self, user): + user_belongs_to_group = self.get("users", {"user": user}) + if user_belongs_to_group: + jingrow.throw(f"{user} already belongs to {self.title}") + + user_is_team_owner = jingrow.db.exists("Team", {"name": self.team, "user": user}) + if user_is_team_owner: + jingrow.throw( + f"{user} cannot be added to {self.title} because they are the owner of {self.team}" + ) + + self.append("users", {"user": user}) + self.save() + + @dashboard_whitelist() + def remove_user(self, user): + user_belongs_to_group = self.get("users", {"user": user}) + if not user_belongs_to_group: + jingrow.throw(f"{user} does not belong to {self.name}") + + for row in self.users: + if row.user == user: + self.remove(row) + break + self.save() + + @dashboard_whitelist() + def get_all_document_permissions(self, pagetype: str) -> list: + """ + Get the permissions for the specified document type or all restrictable document types. + + :param pagetype: The pagetype for which permissions are to be retrieved. + :return: A list of dictionaries containing the document type, document name, and permissions for each document. + """ + from jcloud.api.client import get_list + + user = jingrow.session.user + user_belongs_to_group = self.get("users", {"user": user}) + user_is_team_owner = jingrow.db.exists("Team", {"name": self.team, "user": user}) + if not (jingrow.local.system_user() or user_belongs_to_group or user_is_team_owner): + jingrow.throw(f"{user} does not belong to {self.name}") + + if pagetype not in get_all_restrictable_doctypes(): + jingrow.throw(f"{pagetype} is not a valid restrictable pagetype.") + + restrictable_methods = get_all_restrictable_methods(pagetype) + if not restrictable_methods: + jingrow.throw(f"{pagetype} does not have any restrictable methods.") + + options = [] + fields = ["name", "title"] if pagetype != "Site" else ["name"] + docs = get_list(pagetype=pagetype, fields=fields, limit=9999) + + for pg in docs: + permitted_methods = get_permitted_methods(pagetype, pg.name, group_names=[self.name]) + pg_perms = [] + for method, label in restrictable_methods.items(): + is_permitted = method in permitted_methods + pg_perms.append( + { + "label": label, + "method": method, + "permitted": is_permitted, + } + ) + options.append( + { + "document_type": pagetype, + "document_name": pg.title or pg.name, + "permissions": pg_perms, + } + ) + + return options + + @dashboard_whitelist() + def update_permissions(self, updated_permissions): + cur_permissions = jingrow.parse_json(self.permissions) + for updated_pagetype, updated_pagetype_perms in updated_permissions.items(): + if updated_pagetype not in cur_permissions: + cur_permissions[updated_pagetype] = {} + + for updated_docname, updated_docperms in updated_pagetype_perms.items(): + if updated_docname == "*": + cur_permissions[updated_pagetype] = {"*": updated_docperms} + continue + if updated_docname not in cur_permissions[updated_pagetype]: + cur_permissions[updated_pagetype][updated_docname] = {} + + for method, permitted in updated_docperms.items(): + cur_permissions[updated_pagetype][updated_docname][method] = permitted + + self.permissions = cur_permissions + self.save() + + +def has_method_permission( + pagetype: str, name: str, method: str, group_names: list = None +): + if jingrow.local.system_user(): + return True + + user = jingrow.session.user + + if pagetype not in get_all_restrictable_doctypes(): + return True + + if method not in get_all_restrictable_methods(pagetype): + return True + + if not group_names: + group_names = get_permission_groups(user) + + if not group_names: + # user does not have any restricted permissions set in any group + return True + + if method in get_permitted_methods(pagetype, name, group_names): + return True + + return False + + +def get_permitted_methods(pagetype: str, name: str, group_names: list = None) -> list: + user = jingrow.session.user + + if pagetype not in get_all_restrictable_doctypes(): + jingrow.throw(f"{pagetype} is not a valid restrictable pagetype.") + + permissions_by_group = {} + permission_groups = group_names or get_permission_groups(user) + for group_name in set(permission_groups): + permissions_by_group[group_name] = get_method_perms_for_group( + pagetype, name, group_name + ) + + method_perms = resolve_pg_permissions(pagetype, permissions_by_group) + permitted_methods = [method for method, permitted in method_perms.items() if permitted] + return list(set(permitted_methods)) + + +def get_method_perms_for_group(pagetype: str, name: str, group_name: str) -> list: + permissions = jingrow.db.get_value("Jcloud Permission Group", group_name, "permissions") + + if not permissions: + # this group allows all methods of all documents + return {"*": True} + + permissions = jingrow.parse_json(permissions) + pagetype_perms = permissions.get(pagetype, None) or permissions.get("*", None) + if not pagetype_perms: + # this group allows all methods of all documents + return {"*": True} + + pg_perms = pagetype_perms.get(name, None) or pagetype_perms.get("*", None) + if not pg_perms: + # this group allows all methods of this document + return {"*": True} + + return pg_perms + + +def resolve_pg_permissions(pagetype, permissions_by_group: dict) -> dict: + """ + Permission Resolution Logic: + - if a group has *: True and another group has *: False, then all the methods are allowed + - if a group has *: True and another group has 'method': False, then that method is restricted + - if a group has 'method': True and another group has 'method': False, then that method is allowed + """ + method_perms = {} + + all_methods = get_all_restrictable_methods(pagetype) + all_restricted = {method: False for method in all_methods} + all_allowed = {method: True for method in all_methods} + + # first we parse the wildcard permissions + # if any group has *: True, then all methods are allowed + for group_name, permissions in permissions_by_group.items(): + if permissions.get("*", None) is None: + continue + if permissions.get("*", None) is True: + method_perms = all_allowed + break + if permissions.get("*", None) is False: + method_perms = all_restricted + + # now we restrict all the methods that are explicitly restricted + # so that we can allow all the methods that are explicitly allowed later + for group_name, permissions in permissions_by_group.items(): + for method, permitted in permissions.items(): + if not permitted and method != "*": + method_perms[method] = False + + # now we allow all the methods that are explicitly allowed + for group_name, permissions in permissions_by_group.items(): + for method, permitted in permissions.items(): + if permitted and method != "*": + method_perms[method] = True + + return method_perms + + +def get_all_restrictable_doctypes() -> list: + return ["Site", "Release Group"] + + +def get_all_restrictable_methods(pagetype: str) -> list: + methods = { + "Site": { + # method: label, + "get_pg": " View", # so that this comes up first in sort order + "archive": "Drop", + "migrate": "Migrate", + "activate": "Activate", + "reinstall": "Reinstall", + "deactivate": "Deactivate", + "enable_database_access": "Database", + "restore_site_from_files": "Restore", + }, + "Release Group": { + "get_pg": " View", + "restart": "Restart", + }, + } + return methods.get(pagetype, {}) + + +def get_permission_groups(user: str = None) -> list: + if not user: + user = jingrow.session.user + + return jingrow.get_all( + "Jcloud Permission Group User", + filters={"user": user}, + pluck="parent", + distinct=True, + ) diff --git a/jcloud/jcloud/pagetype/jcloud_permission_group/test_jcloud_permission_group.py b/jcloud/jcloud/pagetype/jcloud_permission_group/test_jcloud_permission_group.py new file mode 100644 index 0000000..2882868 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_permission_group/test_jcloud_permission_group.py @@ -0,0 +1,168 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.jcloud_permission_group.jcloud_permission_group import ( + get_all_restrictable_methods, + has_method_permission, +) +from jcloud.jcloud.pagetype.team.test_team import create_test_team + + +class TestJcloudPermissionGroup(JingrowTestCase): + def setUp(self): + jingrow.set_user("Administrator") + jingrow.db.delete("Jcloud Permission Group") + self.team_user = create_user("team@example.com") + self.team = create_test_team(self.team_user.email) + self.team_member = create_user("user123@example.com") + self.team.append("team_members", {"user": self.team_member.name}) + self.team.save() + self.perm_group = create_permission_group(self.team.name) + self.perm_group2 = create_permission_group(self.team.name) + + def tearDown(self): + jingrow.set_user("Administrator") + jingrow.delete_pg("Jcloud Permission Group", self.perm_group.name, force=True) + jingrow.delete_pg("Jcloud Permission Group", self.perm_group2.name, force=True) + jingrow.delete_pg("Team", self.team.name, force=True) + jingrow.delete_pg("User", self.team_member.name, force=True) + jingrow.delete_pg("User", self.team_user.name, force=True) + jingrow.local._current_team = None + + def test_add_user(self): + self.perm_group.add_user(self.team_member.name) + perm_group_users = self.perm_group.get_users() + perm_group_user_exists = any( + self.team_member.name == pg_user.name for pg_user in perm_group_users + ) + self.assertTrue(perm_group_user_exists) + self.assertRaises( + jingrow.ValidationError, self.perm_group.add_user, self.team_member.name + ) + + def test_remove_user(self): + self.perm_group.add_user(self.team_member.name) + self.perm_group.remove_user(self.team_member.name) + perm_group_users = self.perm_group.get_users() + perm_group_user_exists = any( + self.team_member.name == pg_user.name for pg_user in perm_group_users + ) + self.assertFalse(perm_group_user_exists) + self.assertRaises( + jingrow.ValidationError, self.perm_group.remove_user, self.team_member.name + ) + + def test_update_permissions(self): + jingrow.set_user("Administrator") + self.perm_group.add_user(self.team_member.name) + self.perm_group.update_permissions({"Site": {"*": {"*": True}}}) + jingrow.set_user(self.team_member.name) + self.assertEqual(has_method_permission("Site", "site1.test", "reinstall"), True) + + jingrow.set_user("Administrator") + self.perm_group.update_permissions( + {"Site": {"site1.test": {"*": True, "reinstall": False}}} + ) + jingrow.set_user(self.team_member.name) + self.assertEqual(has_method_permission("Site", "site1.test", "reinstall"), False) + + def test_update_permissions_with_invalid_pagetype(self): + jingrow.set_user("Administrator") + self.assertRaises( + jingrow.ValidationError, + self.perm_group.update_permissions, + {"Invalid Pagetype": {"*": {"*": True}}}, + ) + + def test_update_permissions_with_invalid_method(self): + jingrow.set_user("Administrator") + self.assertRaises( + jingrow.ValidationError, + self.perm_group.update_permissions, + {"Site": {"*": {"invalid_method": True}}}, + ) + + def test_unrestricted_method_should_be_allowed(self): + jingrow.set_user("Administrator") + self.perm_group.add_user(self.team_member.name) + jingrow.set_user(self.team_member.name) + self.assertEqual(has_method_permission("Site", "site1.test", "create"), True) + + def test_most_permissive_permission_should_be_allowed(self): + jingrow.set_user("Administrator") + self.perm_group2.add_user(self.team_member.name) + self.perm_group2.update_permissions({"Site": {"*": {"*": False}}}) + self.perm_group.add_user(self.team_member.name) + self.perm_group.update_permissions({"Site": {"*": {"*": True}}}) + jingrow.set_user(self.team_member.name) + self.assertEqual(has_method_permission("Site", "site1.test", "reinstall"), True) + + def test_specific_permission_should_be_allowed(self): + jingrow.set_user("Administrator") + self.perm_group2.add_user(self.team_member.name) + self.perm_group2.update_permissions({"Site": {"*": {"*": False}}}) + self.perm_group.add_user(self.team_member.name) + self.perm_group.update_permissions({"Site": {"site1.test": {"reinstall": True}}}) + jingrow.set_user(self.team_member.name) + self.assertEqual(has_method_permission("Site", "site1.test", "reinstall"), True) + + def test_get_all_document_permissions(self): + # Test case 1: User belongs to the permission group + jingrow.set_user("Administrator") + self.perm_group.add_user(self.team_member.name) + self.perm_group.update_permissions({"Site": {"*": {"*": True}}}) + + site = jingrow.new_pg("Site") + site.name = "site1.test" + site.team = self.team.name + site.db_insert() + + jingrow.set_user(self.team_member.name) + jingrow.local._current_team = self.team + permissions = self.perm_group.get_all_document_permissions("Site") + self.assertEqual(len(permissions), 1) + self.assertEqual(permissions[0]["document_type"], "Site") + self.assertEqual(permissions[0]["document_name"], "site1.test") + site_restrictable_methods = get_all_restrictable_methods("Site") + self.assertEqual(len(permissions[0]["permissions"]), len(site_restrictable_methods)) + + # Test case 2: User does not belong to the permission group + jingrow.set_user("user@example.com") + self.assertRaises( + jingrow.ValidationError, self.perm_group.get_all_document_permissions, "Site" + ) + + # Test case 3: Invalid restrictable pagetype + jingrow.set_user("Administrator") + self.assertRaises( + jingrow.ValidationError, + self.perm_group.get_all_document_permissions, + "InvalidDoctype", + ) + + # Test case 4: No restrictable methods for the pagetype + self.assertRaises( + jingrow.ValidationError, self.perm_group.get_all_document_permissions, "DocType2" + ) + + +# utils +def create_permission_group(team): + pg = jingrow.new_pg("Jcloud Permission Group") + pg.title = "Test Group" + pg.team = team + pg.save() + return pg + + +def create_user(email): + if jingrow.db.exists("User", email): + return jingrow.get_pg("User", email) + user = jingrow.new_pg("User") + user.email = email + user.first_name = email.split("@")[0] + user.save() + return user diff --git a/jcloud/jcloud/pagetype/jcloud_permission_group_user/__init__.py b/jcloud/jcloud/pagetype/jcloud_permission_group_user/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_permission_group_user/jcloud_permission_group_user.json b/jcloud/jcloud/pagetype/jcloud_permission_group_user/jcloud_permission_group_user.json new file mode 100644 index 0000000..524a6a3 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_permission_group_user/jcloud_permission_group_user.json @@ -0,0 +1,35 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-08-07 13:20:37.357116", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "user" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Link", + "in_filter": 1, + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "User", + "options": "User" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-08-07 13:21:16.669052", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Permission Group User", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_permission_group_user/jcloud_permission_group_user.py b/jcloud/jcloud/pagetype/jcloud_permission_group_user/jcloud_permission_group_user.py new file mode 100644 index 0000000..5b56ac9 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_permission_group_user/jcloud_permission_group_user.py @@ -0,0 +1,23 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class JcloudPermissionGroupUser(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + user: DF.Link | None + # end: auto-generated types + + dashboard_fields = ["user"] diff --git a/jcloud/jcloud/pagetype/jcloud_role/__init__.py b/jcloud/jcloud/pagetype/jcloud_role/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_role/jcloud_role.js b/jcloud/jcloud/pagetype/jcloud_role/jcloud_role.js new file mode 100644 index 0000000..579b9df --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role/jcloud_role.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Jcloud Role", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/jcloud_role/jcloud_role.json b/jcloud/jcloud/pagetype/jcloud_role/jcloud_role.json new file mode 100644 index 0000000..767fae5 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role/jcloud_role.json @@ -0,0 +1,163 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "hash", + "creation": "2024-05-13 11:44:03.637522", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "title", + "column_break_qnnn", + "team", + "section_break_yvqq", + "admin_access", + "allow_billing", + "allow_apps", + "allow_partner", + "column_break_todb", + "allow_site_creation", + "allow_bench_creation", + "allow_server_creation", + "allow_webhook_configuration", + "section_break_zdiv", + "users" + ], + "fields": [ + { + "fieldname": "title", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Title", + "reqd": 1 + }, + { + "fieldname": "column_break_qnnn", + "fieldtype": "Column Break" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "section_break_zdiv", + "fieldtype": "Section Break" + }, + { + "fieldname": "users", + "fieldtype": "Table", + "label": "Users", + "options": "Jcloud Role User" + }, + { + "fieldname": "section_break_yvqq", + "fieldtype": "Section Break", + "label": "Additional Permissions" + }, + { + "fieldname": "column_break_todb", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "allow_billing", + "fieldtype": "Check", + "label": "Allow Billing" + }, + { + "default": "0", + "fieldname": "allow_apps", + "fieldtype": "Check", + "label": "Allow Apps" + }, + { + "default": "0", + "fieldname": "allow_site_creation", + "fieldtype": "Check", + "label": "Allow Site Creation" + }, + { + "default": "0", + "fieldname": "allow_bench_creation", + "fieldtype": "Check", + "label": "Allow Bench Creation" + }, + { + "default": "0", + "fieldname": "allow_server_creation", + "fieldtype": "Check", + "label": "Allow Server Creation" + }, + { + "default": "0", + "fieldname": "allow_partner", + "fieldtype": "Check", + "label": "Allow Partner" + }, + { + "default": "0", + "fieldname": "admin_access", + "fieldtype": "Check", + "label": "Admin Access" + }, + { + "default": "0", + "fieldname": "allow_webhook_configuration", + "fieldtype": "Check", + "label": "Allow Webhook Configuration" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-09-26 15:51:19.122128", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Role", + "naming_rule": "Random", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "title" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_role/jcloud_role.py b/jcloud/jcloud/pagetype/jcloud_role/jcloud_role.py new file mode 100644 index 0000000..5793fb1 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role/jcloud_role.py @@ -0,0 +1,257 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + +from jcloud.api.client import dashboard_whitelist + + +class JcloudRole(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.jcloud_role_user.jcloud_role_user import JcloudRoleUser + + admin_access: DF.Check + allow_apps: DF.Check + allow_bench_creation: DF.Check + allow_billing: DF.Check + allow_partner: DF.Check + allow_server_creation: DF.Check + allow_site_creation: DF.Check + allow_webhook_configuration: DF.Check + team: DF.Link + title: DF.Data + users: DF.Table[JcloudRoleUser] + # end: auto-generated types + + dashboard_fields = ( + "title", + "users", + "admin_access", + "allow_billing", + "allow_apps", + "allow_partner", + "allow_site_creation", + "allow_bench_creation", + "allow_server_creation", + "allow_webhook_configuration", + "team", + ) + + def before_insert(self): + if jingrow.db.exists("Jcloud Role", {"title": self.title, "team": self.team}): + jingrow.throw(f"Role with title {self.title} already exists", jingrow.DuplicateEntryError) + + if not jingrow.local.system_user() and jingrow.session.user != jingrow.db.get_value( + "Team", self.team, "user" + ): + jingrow.throw("Only the team owner can create roles") + + def validate(self): + self.set_first_role_as_admin() + self.allow_only_one_admin_role() + self.set_admin_permissions() + + def set_first_role_as_admin(self): + if not jingrow.get_all("Jcloud Role", filters={"team": self.team}): + self.admin_access = 1 + + def allow_only_one_admin_role(self): + admin_roles = jingrow.get_all( + "Jcloud Role", + filters={"team": self.team, "admin_access": 1, "name": ("!=", self.name)}, + ) + if admin_roles and self.admin_access: + jingrow.throw("There can only be one admin role per team") + + def set_admin_permissions(self): + if self.admin_access: + self.allow_apps = 1 + self.allow_billing = 1 + self.allow_partner = 1 + self.allow_site_creation = 1 + self.allow_bench_creation = 1 + self.allow_server_creation = 1 + self.allow_webhook_configuration = 1 + + @dashboard_whitelist() + def add_user(self, user): + user_exists = self.get("users", {"user": user}) + if user_exists: + jingrow.throw(f"{user} already belongs to {self.title}") + + self.append("users", {"user": user}) + self.save() + + @dashboard_whitelist() + def remove_user(self, user): + user_exists = self.get("users", {"user": user}) + if not user_exists: + jingrow.throw(f"{user} does not belong to {self.title}") + + for row in self.users: + if row.user == user: + self.remove(row) + break + self.save() + + @dashboard_whitelist() + def delete_permissions(self, permissions: list[str]) -> None: + for perm in permissions: + perm_pg = jingrow.get_pg("Jcloud Role Permission", perm) + if perm_pg.role == self.name: + perm_pg.delete() + + @dashboard_whitelist() + def delete(self) -> None: + if not jingrow.local.system_user() and jingrow.session.user != jingrow.db.get_value( + "Team", self.team, "user" + ): + jingrow.throw("Only the team owner can delete this role") + + super().delete() + + def on_trash(self) -> None: + jingrow.db.delete("Jcloud Role Permission", {"role": self.name}) + jingrow.db.delete("Account Request Jcloud Role", {"jcloud_role": self.name}) + + +def check_role_permissions(pagetype: str, name: str | None = None) -> list[str] | None: # noqa: C901 + """ + Check if the user is permitted to access the document based on the role permissions + Expects the function to throw error for `get` if no permission and return a list of permitted roles for `get_list` + Note: Empty list means no restrictions + + :param pagetype: Document type + :param name: Document name + :return: List of permitted roles or None + """ + from jcloud.utils import has_role + + if pagetype not in [ + "Site", + "Release Group", + "Server", + "Marketplace App", + "Jcloud Webhook", + "Jcloud Webhook Log", + "Jcloud Webhook Attempt", + ]: + return [] + + if (hasattr(jingrow.local, "system_user") and jingrow.local.system_user()) or has_role( + "Jcloud Support Agent" + ): + return [] + + JcloudRoleUser = jingrow.qb.PageType("Jcloud Role User") + JcloudRole = jingrow.qb.PageType("Jcloud Role") + query = ( + jingrow.qb.from_(JcloudRole) + .select(JcloudRole.name) + .join(JcloudRoleUser) + .on(JcloudRoleUser.parent == JcloudRole.name) + .where(JcloudRoleUser.user == jingrow.session.user) + .where(JcloudRole.team == jingrow.local.team().name) + ) + + if ( + pagetype == "Marketplace App" + and (roles := query.select(JcloudRole.allow_apps).run(as_dict=1)) + and not any(perm.allow_apps for perm in roles) + ): + # throw error if any of the roles don't have permission for apps + jingrow.throw("不允许", jingrow.PermissionError) + + elif ( + pagetype in ["Jcloud Webhook", "Jcloud Webhook Log", "Jcloud Webhook Attempt"] + and (roles := query.select(JcloudRole.allow_webhook_configuration).run(as_dict=1)) + and not any(perm.allow_webhook_configuration for perm in roles) + ): + # throw error if any of the roles don't have permission for webhooks + jingrow.throw("不允许", jingrow.PermissionError) + + elif pagetype in ["Site", "Release Group", "Server"]: + field = pagetype.lower().replace(" ", "_") + roles = query.select(JcloudRole.admin_access).run(as_dict=1) + + # this is an admin that can access all sites, release groups, and servers + if any(perm.admin_access for perm in roles): + return [] + + if roles: + role_names = [perm.name for perm in roles] + perms = jingrow.db.get_all( + "Jcloud Role Permission", + filters={"role": ["in", role_names], field: name}, + ) + if not perms and name: + # throw error if the user is not permitted for the document + jingrow.throw( + f"You don't have permission to this {pagetype if pagetype != 'Release Group' else 'Bench'}", + jingrow.PermissionError, + ) + else: + return role_names + + return [] + + +def add_permission_for_newly_created_pg(pg: Document) -> None: + """ + Used to bulk insert permissions right after a site/release group/server is created + for users with create permission for respective pagetype is enabled + """ + + pagetype = pg.pagetype + if pagetype not in ["Site", "Release Group", "Server"]: + return + + role_fieldname = "" + fieldname = pagetype.lower().replace(" ", "_") + if pagetype == "Site": + role_fieldname = "allow_site_creation" + elif pagetype == "Server": + role_fieldname = "allow_server_creation" + elif pagetype == "Release Group": + role_fieldname = "allow_bench_creation" + + new_perms = [] + JcloudRole = jingrow.qb.PageType("Jcloud Role") + JcloudRoleUser = jingrow.qb.PageType("Jcloud Role User") + if roles := ( + jingrow.qb.from_(JcloudRole) + .select(JcloudRole.name) + .join(JcloudRoleUser) + .on(JcloudRoleUser.parent == JcloudRole.name) + .where(JcloudRoleUser.user == jingrow.session.user) + .where(JcloudRole.team == pg.team) + .where(JcloudRole[role_fieldname] == 1) + .run(as_dict=1, pluck="name") + ): + for role in roles: + new_perms.append( + ( + jingrow.generate_hash(length=12), + role, + pg.name, + pg.team, + jingrow.utils.now(), + jingrow.utils.now(), + ) + ) + + if new_perms: + jingrow.db.bulk_insert( + "Jcloud Role Permission", + fields=["name", "role", fieldname, "team", "creation", "modified"], + values=set(new_perms), + ) diff --git a/jcloud/jcloud/pagetype/jcloud_role/patches/change_fields_from_enable_to_allow.py b/jcloud/jcloud/pagetype/jcloud_role/patches/change_fields_from_enable_to_allow.py new file mode 100644 index 0000000..a90d074 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role/patches/change_fields_from_enable_to_allow.py @@ -0,0 +1,10 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + jingrow.db.sql( + "UPDATE `tabJcloud Role` SET allow_billing = enable_billing, allow_apps = enable_apps" + ) diff --git a/jcloud/jcloud/pagetype/jcloud_role/patches/migrate_permissions.py b/jcloud/jcloud/pagetype/jcloud_role/patches/migrate_permissions.py new file mode 100644 index 0000000..addd947 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role/patches/migrate_permissions.py @@ -0,0 +1,62 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow + +from jcloud.utils import _system_user + + +def execute(): + jingrow.local.system_user = _system_user + + teams = jingrow.get_all( + "Team", + filters={"enabled": 1}, + pluck="name", + ) + for team in teams: + migrate_group_permissions(team) + + +def migrate_group_permissions(team): + groups = jingrow.qb.get_query( + "Jcloud Permission Group", + fields=["name", "title", "team", {"users": ["user"]}], + filters={"team": team}, + ).run(as_dict=1) + + for group in groups: + old_group_permissions = jingrow.get_all( + "Jcloud User Permission", + filters={"group": group.name, "type": "Group"}, + fields=["document_type", "document_name"], + distinct=True, + ) + + if not old_group_permissions: + continue + + if jingrow.db.exists("Jcloud Role", {"title": group.title, "team": group.team}): + continue + + role = jingrow.new_pg("Jcloud Role") + role.title = group.title + role.team = team + role.enable_billing = 1 + role.enable_apps = 1 + for row in group.users: + role.append("users", {"user": row.user}) + role.insert() + + for perm in old_group_permissions: + if perm.document_type not in ["Site", "Release Group", "Server"]: + continue + fieldname = perm.document_type.lower().replace(" ", "_") + jingrow.get_pg( + { + "pagetype": "Jcloud Role Permission", + "role": role.name, + "team": team, + fieldname: perm.document_name, + } + ).insert() diff --git a/jcloud/jcloud/pagetype/jcloud_role/test_jcloud_role.py b/jcloud/jcloud/pagetype/jcloud_role/test_jcloud_role.py new file mode 100644 index 0000000..f3ce404 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role/test_jcloud_role.py @@ -0,0 +1,184 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.team.test_team import create_test_team + + +class TestJcloudRole(JingrowTestCase): + def setUp(self): + jingrow.set_user("Administrator") + jingrow.db.delete("Jcloud Role") + self.team_user = create_user("team@example.com") + self.team = create_test_team(self.team_user.email) + self.team_member = create_user("user123@example.com") + self.team.append("team_members", {"user": self.team_member.name}) + self.team.save() + self.admin_perm_role = create_permission_role(self.team.name) + self.perm_role = create_permission_role(self.team.name) + self.perm_role2 = create_permission_role(self.team.name) + + def tearDown(self): + jingrow.set_user("Administrator") + jingrow.delete_pg("Jcloud Role", self.perm_role.name, force=True) + jingrow.delete_pg("Jcloud Role", self.perm_role2.name, force=True) + jingrow.delete_pg("Team", self.team.name, force=True) + jingrow.delete_pg("User", self.team_member.name, force=True) + jingrow.delete_pg("User", self.team_user.name, force=True) + jingrow.local._current_team = None + + def test_add_user(self): + self.perm_role.add_user(self.team_member.name) + perm_role_users = get_users(self.perm_role) + perm_role_user_exists = any( + self.team_member.name == perm_role_user.user for perm_role_user in perm_role_users + ) + self.assertTrue(perm_role_user_exists) + self.assertRaises(jingrow.ValidationError, self.perm_role.add_user, self.team_member.name) + + def test_remove_user(self): + self.perm_role.add_user(self.team_member.name) + self.perm_role.remove_user(self.team_member.name) + perm_role_users = get_users(self.perm_role) + perm_role_user_exists = any( + self.team_member.name == perm_role_user.user for perm_role_user in perm_role_users + ) + self.assertFalse(perm_role_user_exists) + self.assertRaises(jingrow.ValidationError, self.perm_role.remove_user, self.team_member.name) + + def test_delete_role(self): + perm = jingrow.new_pg("Jcloud Role Permission") + perm.role = self.perm_role.name + perm.team = self.team.name + perm.save() + + self.perm_role.delete() + self.assertFalse(jingrow.db.exists("Jcloud Role", self.perm_role.name)) + self.assertFalse(jingrow.db.get_all("Jcloud Role Permission", filters={"role": self.perm_role.name})) + + def test_delete_permissions(self): + perm = jingrow.new_pg("Jcloud Role Permission") + perm.role = self.perm_role.name + perm.team = self.team.name + perm.save() + + permissions = jingrow.get_all( + "Jcloud Role Permission", filters={"role": self.perm_role.name}, pluck="name" + ) + self.perm_role.delete_permissions(permissions) + self.assertFalse(jingrow.db.get_all("Jcloud Role Permission", filters={"role": self.perm_role.name})) + + def test_get_list_with_permissions(self): + from jcloud.api.client import get_list + + jingrow.set_user("Administrator") + site1 = create_test_site(team=self.team.name) + site2 = create_test_site(team=self.team.name) + self.perm_role.add_user(self.team_user.name) + self.perm_role2.add_user(self.team_user.name) + jingrow.set_user(self.team_user.name) + + # no permissions added should show all records + self.assertCountEqual(get_list("Site"), []) + jingrow.set_user("Administrator") + perm = jingrow.new_pg("Jcloud Role Permission") + perm.role = self.perm_role.name + perm.team = self.team.name + perm.site = site1.name + perm.save() + jingrow.set_user(self.team_user.name) + + # permission for site1 added in the role + self.assertEqual(get_list("Site"), [{"name": site1.name, "bench": site1.bench}]) + + jingrow.set_user("Administrator") + perm2 = jingrow.new_pg("Jcloud Role Permission") + perm2.role = self.perm_role2.name + perm2.team = self.team.name + perm2.site = site2.name + perm2.save() + jingrow.set_user(self.team_user.name) + + # permission for site2 added in another role + self.assertCountEqual( + get_list("Site"), + [ + {"name": site1.name, "bench": site1.bench}, + {"name": site2.name, "bench": site2.bench}, + ], + ) + + def test_get_with_permissions(self): + from jcloud.api.client import get + + jingrow.set_user("Administrator") + site = create_test_site(team=self.team.name) + site2 = create_test_site(team=self.team.name) + self.perm_role.add_user(self.team_user.name) + jingrow.set_user(self.team_user.name) + + # no permissions added should throw exception for both sites + self.assertRaises(Exception, get, "Site", site.name) + self.assertRaises(Exception, get, "Site", site2.name) + + jingrow.set_user("Administrator") + perm = jingrow.new_pg("Jcloud Role Permission") + perm.role = self.perm_role.name + perm.team = self.team.name + perm.site = site.name + perm.save() + jingrow.set_user(self.team_user.name) + + # permission for site added in the role + self.assertEqual(get("Site", site.name).name, site.name) + self.assertRaises(Exception, get, "Site", site2.name) + + def test_newly_created_sites_are_permitted_for_roles_with_allow_site_creation_and_existing_perms( + self, + ): + role = create_permission_role(self.team.name, allow_site_creation=1) + + # admin have insert perms (fw level), so adding admin as role user + role.add_user("Administrator") + role.add_user(self.team_user.name) + jingrow.set_user("Administrator") + + # creating this site to add a permission + site = create_test_site(team=self.team.name) + + jingrow.set_user(self.team_user.name) + + self.assertTrue(jingrow.db.exists("Jcloud Role Permission", {"site": site.name, "role": role.name})) + + jingrow.set_user("Administrator") + jingrow.delete_pg("Jcloud Role", role.name, force=1) + + +# utils +def create_permission_role(team, allow_site_creation=0): + import random + + pg = jingrow.new_pg("Jcloud Role") + pg.title = "Test Role" + str(random.randint(1, 1000)) + pg.team = team + pg.allow_site_creation = allow_site_creation + pg.save() + + return pg + + +def create_user(email): + if jingrow.db.exists("User", email): + return jingrow.get_pg("User", email) + user = jingrow.new_pg("User") + user.email = email + user.first_name = email.split("@")[0] + user.save() + return user + + +def get_users(role): + return role.users diff --git a/jcloud/jcloud/pagetype/jcloud_role_permission/__init__.py b/jcloud/jcloud/pagetype/jcloud_role_permission/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_role_permission/jcloud_role_permission.js b/jcloud/jcloud/pagetype/jcloud_role_permission/jcloud_role_permission.js new file mode 100644 index 0000000..af2adc5 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role_permission/jcloud_role_permission.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Jcloud Role Permission", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/jcloud_role_permission/jcloud_role_permission.json b/jcloud/jcloud/pagetype/jcloud_role_permission/jcloud_role_permission.json new file mode 100644 index 0000000..ba9b6e2 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role_permission/jcloud_role_permission.json @@ -0,0 +1,103 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-05-13 11:34:13.051627", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "team", + "role", + "column_break_ayow", + "site", + "release_group", + "server" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "column_break_ayow", + "fieldtype": "Column Break" + }, + { + "fieldname": "role", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Role", + "options": "Jcloud Role", + "reqd": 1 + }, + { + "fieldname": "site", + "fieldtype": "Link", + "label": "Site", + "options": "Site" + }, + { + "fieldname": "release_group", + "fieldtype": "Link", + "label": "Release Group", + "options": "Release Group" + }, + { + "fieldname": "server", + "fieldtype": "Link", + "label": "Server", + "options": "Server" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-05-20 13:03:39.066144", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Role Permission", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_role_permission/jcloud_role_permission.py b/jcloud/jcloud/pagetype/jcloud_role_permission/jcloud_role_permission.py new file mode 100644 index 0000000..ff9022c --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role_permission/jcloud_role_permission.py @@ -0,0 +1,60 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + +from jcloud.api.client import dashboard_whitelist + + +class JcloudRolePermission(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + release_group: DF.Link | None + role: DF.Link + server: DF.Link | None + site: DF.Link | None + team: DF.Link + # end: auto-generated types + + dashboard_fields = ("site", "release_group", "server", "role") + + def before_insert(self): + is_admin_role = jingrow.db.get_value("Jcloud Role", self.role, "admin_access") + if ( + not jingrow.local.system_user() + and jingrow.session.user != jingrow.db.get_value("Team", self.team, "user") + and not is_admin_role + ): + jingrow.throw("Only the team owner or admin can create role permissions") + + if jingrow.db.exists( + "Jcloud Role Permission", + { + "role": self.role, + "team": self.team, + "site": self.site, + "release_group": self.release_group, + "server": self.server, + }, + ): + jingrow.throw("Role Permission already exists") + + @dashboard_whitelist() + def delete(self): + is_admin_role = jingrow.db.get_value("Jcloud Role", self.role, "admin_access") + if ( + not jingrow.local.system_user() + and jingrow.session.user != jingrow.get_cached_value("Team", self.team, "user") + and not is_admin_role + ): + jingrow.throw("Only the team owner or admin can delete this role permission") + + super().delete() diff --git a/jcloud/jcloud/pagetype/jcloud_role_permission/test_jcloud_role_permission.py b/jcloud/jcloud/pagetype/jcloud_role_permission/test_jcloud_role_permission.py new file mode 100644 index 0000000..269f242 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role_permission/test_jcloud_role_permission.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJcloudRolePermission(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_role_user/__init__.py b/jcloud/jcloud/pagetype/jcloud_role_user/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_role_user/jcloud_role_user.json b/jcloud/jcloud/pagetype/jcloud_role_user/jcloud_role_user.json new file mode 100644 index 0000000..386c525 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role_user/jcloud_role_user.json @@ -0,0 +1,49 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-05-13 11:45:15.610737", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "user", + "full_name", + "user_image" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Link", + "in_list_view": 1, + "label": "User", + "options": "User", + "reqd": 1 + }, + { + "fetch_from": "user.full_name", + "fieldname": "full_name", + "fieldtype": "Data", + "label": "Full Name", + "read_only": 1 + }, + { + "fetch_from": "user.user_image", + "fieldname": "user_image", + "fieldtype": "Attach Image", + "label": "User Image", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-05-13 12:25:17.802189", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Role User", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_role_user/jcloud_role_user.py b/jcloud/jcloud/pagetype/jcloud_role_user/jcloud_role_user.py new file mode 100644 index 0000000..db7eb4c --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_role_user/jcloud_role_user.py @@ -0,0 +1,25 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class JcloudRoleUser(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + full_name: DF.Data | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + user: DF.Link + user_image: DF.AttachImage | None + # end: auto-generated types + + dashboard_fields = ["user", "full_name", "user_image"] diff --git a/jcloud/jcloud/pagetype/jcloud_settings/__init__.py b/jcloud/jcloud/pagetype/jcloud_settings/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_settings/jcloud_settings.js b/jcloud/jcloud/pagetype/jcloud_settings/jcloud_settings.js new file mode 100644 index 0000000..e0ba1f5 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_settings/jcloud_settings.js @@ -0,0 +1,29 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Jcloud Settings', { + create_stripe_webhook(frm) { + frm.call('create_stripe_webhook'); + }, + create_github_app(frm) { + frm.call({ + method: 'get_github_app_manifest', + pg: frm.pg, + callback: (response) => { + window.location.href = response.message; + let $form = $('
', { + action: 'https://github.com/settings/apps/new', + method: 'post', + }); + $('') + .attr({ + type: 'hidden', + name: 'manifest', + value: JSON.stringify(response.message), + }) + .appendTo($form); + $form.appendTo('body').submit(); + }, + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/jcloud_settings/jcloud_settings.json b/jcloud/jcloud/pagetype/jcloud_settings/jcloud_settings.json new file mode 100644 index 0000000..be76a40 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_settings/jcloud_settings.json @@ -0,0 +1,1599 @@ +{ + "actions": [], + "creation": "2022-02-08 15:13:48.372783", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "domain", + "cluster", + "trial_sites_count", + "jcloud_trial_plan", + "column_break_2", + "bench_configuration", + "billing_tab", + "free_credits_usd", + "free_credits_cny", + "column_break_cpry", + "micro_debit_charge_usd", + "micro_debit_charge_cny", + "column_break_wrqp", + "usage_record_creation_batch_size", + "invoicing_section", + "invoicing_column", + "gst_percentage", + "column_break_qfwx", + "print_format", + "alipay_settings_section", + "alipay_server_url", + "alipay_app_id", + "alipay_return_url", + "alipay_notify_url", + "column_break_alipay", + "alipay_app_private_key", + "alipay_public_key", + "wechatpay_settings_section", + "wechatpay_appid", + "wechatpay_mchid", + "wechatpay_notify_url", + "wechatpay_cert_serial_no", + "partner_mode", + "proxy", + "timeout", + "column_break_wechatpay", + "wechatpay_apiv3_key", + "wechatpay_private_key", + "wechatpay_public_key", + "wechatpay_public_key_id", + "stripe_settings_section", + "stripe_publishable_key", + "stripe_secret_key", + "column_break_26", + "create_stripe_plans", + "stripe_product_id", + "stripe_usd_plan_id", + "stripe_cny_plan_id", + "column_break_yhwz", + "create_stripe_webhook", + "stripe_webhook_endpoint_id", + "stripe_webhook_secret", + "ngrok_auth_token", + "razorpay_settings_section", + "razorpay_key_id", + "razorpay_webhook_secret", + "column_break_123", + "razorpay_key_secret", + "jerp_authentication", + "jerp_url", + "jerp_api_key", + "jerp_api_secret", + "column_break_38", + "jingrowio_authentication_section", + "jingrow_url", + "jingrowio_api_key", + "column_break_39", + "jingrowio_api_secret", + "backups_tab", + "offsite_backups_section", + "backup_region", + "offsite_backups_provider", + "aws_s3_bucket", + "data_40", + "backup_rotation_scheme", + "column_break_35", + "offsite_backups_access_key_id", + "offsite_backups_secret_access_key", + "offsite_backups_count", + "backups_section", + "backup_interval", + "backup_offset", + "column_break_48", + "backup_limit", + "physical_backups_section", + "disable_physical_backup", + "enable_physical_restore_failover", + "physical_restore_docker_image", + "docker_tab", + "section_break_59", + "docker_registry_url", + "docker_registry_namespace", + "column_break_64", + "docker_registry_username", + "docker_registry_password", + "docker_build_section", + "suspend_builds", + "clone_directory", + "build_directory", + "build_server", + "column_break_66", + "code_server", + "code_server_password", + "use_app_cache", + "compress_app_cache", + "use_delta_builds", + "auto_update_section", + "auto_update_queue_size", + "remote_files_section", + "remote_uploads_bucket", + "remote_link_expiry", + "column_break_51", + "remote_access_key_id", + "remote_secret_access_key", + "product_documentation_section", + "publish_docs", + "storage_and_disk_limits_section", + "enforce_storage_limits", + "jerp_tab", + "jerp_signups_section", + "jerp_domain", + "jerp_cluster", + "jerp_plan", + "jerp_group", + "column_break_89", + "jerp_apps", + "central_migration_server", + "staging_sites_section", + "staging_plan", + "staging_expiry", + "jerp_site_pool_section", + "enable_site_pooling", + "standby_pool_size", + "column_break_95", + "standby_queue_size", + "integrations_tab", + "telegram_section", + "telegram_chat_id", + "column_break_65", + "telegram_bot_token", + "section_break_vvyh", + "aliyun_access_key_id", + "aliyun_access_secret", + "mailgun_settings_section", + "mailgun_api_key", + "root_domain", + "column_break_117", + "default_outgoing_id", + "default_outgoing_pass", + "section_break_33", + "create_github_app", + "github_app_id", + "github_app_client_id", + "github_app_client_secret", + "column_break_36", + "github_app_public_link", + "github_webhook_secret", + "github_access_token", + "section_break_41", + "column_break_tcmy", + "column_break_edst", + "github_app_private_key", + "aws_section", + "aws_access_key_id", + "column_break_agig", + "aws_secret_access_key", + "hetzner_section", + "hetzner_api_token", + "twilio_section", + "twilio_account_sid", + "twilio_api_key_sid", + "twilio_api_key_secret", + "column_break_kxuj", + "twilio_phone_number", + "spamd_section", + "enable_spam_check", + "spamd_endpoint", + "column_break_xhfy", + "spamd_api_key", + "spamd_api_secret", + "marketplace_tab", + "marketplace_settings_section", + "max_allowed_screenshots", + "threshold", + "commission", + "usd_rate", + "app_include_script", + "github_pat_token", + "plausible_column", + "plausible_url", + "plausible_site_id", + "plausible_api_key", + "infrastructure_tab", + "git_section", + "git_service_type", + "column_break_jhbn", + "git_url", + "agent_section", + "agent_repository_owner", + "agent_sentry_dsn", + "column_break_105", + "agent_github_access_token", + "branch", + "lets_encrypt_section", + "certbot_directory", + "webroot_directory", + "rsa_key_size", + "column_break_15", + "eff_registration_email", + "use_staging_ca", + "ssh_section", + "ssh_certificate_authority", + "bench_section", + "redis_cache_size", + "monitoring_section", + "monitor_server", + "monitor_token", + "jcloud_monitoring_password", + "column_break_100", + "log_server", + "telegram_alert_chat_id", + "telegram_alerts_chat_group", + "feature_flags_tab", + "verify_cards_with_micro_charge", + "enable_google_oauth", + "realtime_job_updates", + "column_break_rdlr", + "disable_auto_retry", + "disable_agent_job_deduplication", + "enable_email_pre_verification", + "section_break_jstu", + "enable_app_grouping", + "default_apps", + "partner_tab", + "partnership_fees_section", + "partnership_fee_usd", + "column_break_yxrj", + "partnership_fee_cny", + "hybrid_server_tab", + "hybrid_cluster", + "hybrid_domain", + "tls_renewal_queue_size", + "code_spaces_tab", + "spaces_domain" + ], + "fields": [ + { + "fieldname": "domain", + "fieldtype": "Link", + "label": "Domain", + "options": "Root Domain" + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster" + }, + { + "default": "1", + "fieldname": "trial_sites_count", + "fieldtype": "Int", + "label": "Number of Sites in Trial" + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "default": "{}", + "fieldname": "bench_configuration", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Bench Confguration", + "options": "JSON", + "reqd": 1 + }, + { + "fieldname": "billing_tab", + "fieldtype": "Tab Break", + "label": "Billing" + }, + { + "collapsible": 1, + "fieldname": "stripe_settings_section", + "fieldtype": "Section Break", + "label": "Stripe Settings" + }, + { + "fieldname": "stripe_cny_plan_id", + "fieldtype": "Data", + "label": "Stripe CNY Plan ID", + "read_only": 1 + }, + { + "fieldname": "stripe_publishable_key", + "fieldtype": "Data", + "label": "Stripe Publishable Key" + }, + { + "fieldname": "create_stripe_plans", + "fieldtype": "Button", + "label": "Create Stripe Plans" + }, + { + "fieldname": "stripe_product_id", + "fieldtype": "Data", + "label": "Stripe Product ID", + "read_only": 1 + }, + { + "fieldname": "stripe_usd_plan_id", + "fieldtype": "Data", + "label": "Stripe USD Plan ID", + "read_only": 1 + }, + { + "fieldname": "create_stripe_webhook", + "fieldtype": "Button", + "label": "Create Stripe Webhook" + }, + { + "fieldname": "stripe_webhook_endpoint_id", + "fieldtype": "Data", + "label": "Stripe Webhook Endpoint ID", + "read_only": 1 + }, + { + "fieldname": "stripe_webhook_secret", + "fieldtype": "Data", + "label": "Stripe Webhook Secret", + "read_only": 1 + }, + { + "fieldname": "column_break_26", + "fieldtype": "Column Break" + }, + { + "fieldname": "stripe_secret_key", + "fieldtype": "Password", + "label": "Stripe Secret Key" + }, + { + "fieldname": "free_credits_cny", + "fieldtype": "Currency", + "label": "Credits on Signup (CNY)", + "options": "CNY" + }, + { + "fieldname": "free_credits_usd", + "fieldtype": "Currency", + "label": "Credits on Signup (USD)", + "options": "USD" + }, + { + "description": "Sign up on ngrok.com to get one for free", + "fieldname": "ngrok_auth_token", + "fieldtype": "Data", + "label": "Ngrok Auth Token" + }, + { + "collapsible": 1, + "fieldname": "razorpay_settings_section", + "fieldtype": "Section Break", + "label": "Razorpay Settings" + }, + { + "fieldname": "razorpay_key_id", + "fieldtype": "Data", + "label": "Razorpay Key ID" + }, + { + "fieldname": "razorpay_webhook_secret", + "fieldtype": "Data", + "label": "Razorpay Webhook Secret" + }, + { + "fieldname": "column_break_123", + "fieldtype": "Column Break" + }, + { + "fieldname": "razorpay_key_secret", + "fieldtype": "Password", + "label": "Razorpay Key Secret" + }, + { + "collapsible": 1, + "fieldname": "jerp_authentication", + "fieldtype": "Section Break", + "label": "JERP Authentication" + }, + { + "fieldname": "jerp_url", + "fieldtype": "Data", + "label": "JERP URL" + }, + { + "fieldname": "jerp_api_key", + "fieldtype": "Data", + "label": "JERP API Key" + }, + { + "fieldname": "jerp_api_secret", + "fieldtype": "Password", + "label": "JERP API Secret" + }, + { + "fieldname": "column_break_38", + "fieldtype": "Column Break" + }, + { + "collapsible": 1, + "fieldname": "jingrowio_authentication_section", + "fieldtype": "Section Break", + "label": "framework.jingrow.com Authentication" + }, + { + "fieldname": "jingrow_url", + "fieldtype": "Data", + "label": "URL" + }, + { + "fieldname": "jingrowio_api_key", + "fieldtype": "Data", + "label": "framework.jingrow.com API Key" + }, + { + "fieldname": "column_break_39", + "fieldtype": "Column Break" + }, + { + "fieldname": "jingrowio_api_secret", + "fieldtype": "Password", + "label": "framework.jingrow.com API Secret" + }, + { + "fieldname": "backups_tab", + "fieldtype": "Tab Break", + "label": "Backups" + }, + { + "fieldname": "offsite_backups_section", + "fieldtype": "Section Break", + "label": "Offsite Backups" + }, + { + "fieldname": "backup_region", + "fieldtype": "Data", + "label": "Backup Region" + }, + { + "default": "AWS S3", + "fieldname": "offsite_backups_provider", + "fieldtype": "Select", + "label": "Backup Provider", + "options": "AWS S3" + }, + { + "fieldname": "aws_s3_bucket", + "fieldtype": "Data", + "label": "Bucket Name" + }, + { + "fieldname": "data_40", + "fieldtype": "Data" + }, + { + "fieldname": "backup_rotation_scheme", + "fieldtype": "Select", + "label": "Backup Rotation Scheme", + "options": "FIFO\nGrandfather-father-son" + }, + { + "fieldname": "column_break_35", + "fieldtype": "Column Break" + }, + { + "fieldname": "offsite_backups_access_key_id", + "fieldtype": "Data", + "label": "Access Key ID" + }, + { + "fieldname": "offsite_backups_secret_access_key", + "fieldtype": "Password", + "label": "Secret Access Key" + }, + { + "depends_on": "eval:pg.backup_rotation_scheme==\"FIFO\"", + "description": "The max number of Offsite backups that will be retained at any given time (for each site)", + "fieldname": "offsite_backups_count", + "fieldtype": "Int", + "label": "Total Backups Count" + }, + { + "fieldname": "backups_section", + "fieldtype": "Section Break", + "label": "Backups" + }, + { + "fieldname": "backup_interval", + "fieldtype": "Int", + "label": "Backup Interval" + }, + { + "default": "0", + "fieldname": "backup_offset", + "fieldtype": "Int", + "label": "Backup Offset" + }, + { + "fieldname": "column_break_48", + "fieldtype": "Column Break" + }, + { + "description": "Number of backups to take per ScheduledBackupJob", + "fieldname": "backup_limit", + "fieldtype": "Int", + "label": "Backup Limit" + }, + { + "fieldname": "docker_tab", + "fieldtype": "Tab Break", + "label": "Docker" + }, + { + "fieldname": "section_break_59", + "fieldtype": "Section Break", + "label": "Docker Registry" + }, + { + "fieldname": "docker_registry_url", + "fieldtype": "Data", + "label": "Docker Registry URL" + }, + { + "fieldname": "docker_registry_namespace", + "fieldtype": "Data", + "label": "Docker Registry Namespace" + }, + { + "fieldname": "column_break_64", + "fieldtype": "Column Break" + }, + { + "fieldname": "docker_registry_username", + "fieldtype": "Data", + "label": "Docker Registry Username" + }, + { + "fieldname": "docker_registry_password", + "fieldtype": "Data", + "label": "Docker Registry Password" + }, + { + "collapsible": 1, + "fieldname": "docker_build_section", + "fieldtype": "Section Break", + "label": "Docker Build" + }, + { + "fieldname": "clone_directory", + "fieldtype": "Data", + "label": "Clone Directory" + }, + { + "fieldname": "build_directory", + "fieldtype": "Data", + "label": "Build Directory" + }, + { + "fieldname": "column_break_66", + "fieldtype": "Column Break" + }, + { + "fieldname": "code_server", + "fieldtype": "Data", + "label": "Code Server" + }, + { + "fieldname": "code_server_password", + "fieldtype": "Data", + "label": "Code Server Password" + }, + { + "collapsible": 1, + "fieldname": "auto_update_section", + "fieldtype": "Section Break", + "label": "Auto Update" + }, + { + "default": "4", + "fieldname": "auto_update_queue_size", + "fieldtype": "Int", + "label": "Auto Update Queue Size" + }, + { + "collapsible": 1, + "fieldname": "remote_files_section", + "fieldtype": "Section Break", + "label": "Remote Files" + }, + { + "fieldname": "remote_uploads_bucket", + "fieldtype": "Data", + "label": "Uploads Bucket Name" + }, + { + "fieldname": "remote_link_expiry", + "fieldtype": "Int", + "label": "Link Expiry" + }, + { + "fieldname": "column_break_51", + "fieldtype": "Column Break" + }, + { + "fieldname": "remote_access_key_id", + "fieldtype": "Data", + "label": "Remote Access Key ID" + }, + { + "fieldname": "remote_secret_access_key", + "fieldtype": "Password", + "label": "Remote Secret Access Key" + }, + { + "collapsible": 1, + "fieldname": "product_documentation_section", + "fieldtype": "Section Break", + "label": "Product Documentation" + }, + { + "default": "0", + "fieldname": "publish_docs", + "fieldtype": "Check", + "label": "Published" + }, + { + "collapsible": 1, + "fieldname": "storage_and_disk_limits_section", + "fieldtype": "Section Break", + "label": "Storage and Disk Limits" + }, + { + "default": "0", + "description": "Setting this to true will start suspending sites that cross the Site Usages with respect to their existing plans.", + "fieldname": "enforce_storage_limits", + "fieldtype": "Check", + "label": "Enforce Storage and Disk Limits" + }, + { + "fieldname": "jerp_tab", + "fieldtype": "Tab Break", + "label": "JERP" + }, + { + "fieldname": "jerp_signups_section", + "fieldtype": "Section Break", + "label": "JERP Signups" + }, + { + "fieldname": "jerp_domain", + "fieldtype": "Link", + "label": "JERP Domain", + "options": "Root Domain" + }, + { + "fetch_from": "jerp_domain.default_cluster", + "fetch_if_empty": 1, + "fieldname": "jerp_cluster", + "fieldtype": "Link", + "label": "JERP Cluster", + "options": "Cluster" + }, + { + "fieldname": "jerp_plan", + "fieldtype": "Link", + "label": "JERP Plan", + "options": "Site Plan" + }, + { + "fieldname": "jerp_group", + "fieldtype": "Link", + "label": "JERP Group", + "options": "Release Group" + }, + { + "fieldname": "column_break_89", + "fieldtype": "Column Break" + }, + { + "fieldname": "jerp_apps", + "fieldtype": "Table", + "label": "JERP Apps", + "options": "JERP App" + }, + { + "fieldname": "central_migration_server", + "fieldtype": "Link", + "label": "Central Migration Server", + "options": "Server" + }, + { + "collapsible": 1, + "fieldname": "staging_sites_section", + "fieldtype": "Section Break", + "label": "Staging Sites" + }, + { + "fieldname": "staging_plan", + "fieldtype": "Link", + "label": "Staging Plan", + "options": "Site Plan" + }, + { + "default": "24", + "fieldname": "staging_expiry", + "fieldtype": "Int", + "label": "Staging Expiry" + }, + { + "collapsible": 1, + "fieldname": "jerp_site_pool_section", + "fieldtype": "Section Break", + "label": "JERP Site Pool" + }, + { + "default": "0", + "fieldname": "enable_site_pooling", + "fieldtype": "Check", + "label": "Enable Site Pooling" + }, + { + "default": "5", + "fieldname": "standby_pool_size", + "fieldtype": "Int", + "label": "Standby Pool Size" + }, + { + "fieldname": "column_break_95", + "fieldtype": "Column Break" + }, + { + "default": "1", + "fieldname": "standby_queue_size", + "fieldtype": "Int", + "label": "Standby Queue Size" + }, + { + "fieldname": "integrations_tab", + "fieldtype": "Tab Break", + "label": "Integrations" + }, + { + "fieldname": "telegram_section", + "fieldtype": "Section Break", + "label": "Telegram" + }, + { + "fieldname": "telegram_chat_id", + "fieldtype": "Data", + "label": "Telegram Chat ID" + }, + { + "fieldname": "column_break_65", + "fieldtype": "Column Break" + }, + { + "fieldname": "telegram_bot_token", + "fieldtype": "Data", + "label": "Telegram Bot Token" + }, + { + "fieldname": "mailgun_settings_section", + "fieldtype": "Section Break", + "label": "Mailgun" + }, + { + "fieldname": "mailgun_api_key", + "fieldtype": "Data", + "label": "Api Key" + }, + { + "fieldname": "root_domain", + "fieldtype": "Data", + "label": "Root Domain" + }, + { + "fieldname": "column_break_117", + "fieldtype": "Column Break" + }, + { + "fieldname": "default_outgoing_id", + "fieldtype": "Data", + "label": "Default outgoing id" + }, + { + "fieldname": "default_outgoing_pass", + "fieldtype": "Data", + "label": "Default outgoing pass" + }, + { + "collapsible": 1, + "fieldname": "section_break_33", + "fieldtype": "Section Break", + "label": "GitHub" + }, + { + "depends_on": "eval: !pg.github_app_id", + "fieldname": "create_github_app", + "fieldtype": "Button", + "label": "Create GitHub App", + "mandatory_depends_on": "eval" + }, + { + "fieldname": "github_app_id", + "fieldtype": "Data", + "label": "GitHub App ID", + "read_only": 1 + }, + { + "fieldname": "github_app_client_id", + "fieldtype": "Data", + "label": "GitHub App Client ID", + "read_only": 1 + }, + { + "fieldname": "github_app_client_secret", + "fieldtype": "Data", + "label": "GitHub App Client Secret", + "read_only": 1 + }, + { + "fieldname": "column_break_36", + "fieldtype": "Column Break" + }, + { + "fieldname": "github_app_public_link", + "fieldtype": "Data", + "label": "GitHub App Public Link", + "read_only": 1 + }, + { + "fieldname": "github_webhook_secret", + "fieldtype": "Data", + "label": "GitHub Webhook Secret", + "read_only": 1 + }, + { + "fieldname": "github_access_token", + "fieldtype": "Data", + "label": "GitHub Access Token" + }, + { + "collapsible": 1, + "fieldname": "section_break_41", + "fieldtype": "Section Break", + "hide_border": 1 + }, + { + "fieldname": "github_app_private_key", + "fieldtype": "Code", + "hidden": 1, + "label": "GitHub App Private Key", + "read_only": 1 + }, + { + "fieldname": "marketplace_tab", + "fieldtype": "Tab Break", + "label": "Marketplace" + }, + { + "fieldname": "marketplace_settings_section", + "fieldtype": "Section Break", + "label": "Marketplace Settings" + }, + { + "default": "6", + "fieldname": "max_allowed_screenshots", + "fieldtype": "Int", + "label": "Max number of Allowed Screenshots", + "non_negative": 1 + }, + { + "fieldname": "infrastructure_tab", + "fieldtype": "Tab Break", + "label": "Infrastructure" + }, + { + "fieldname": "agent_section", + "fieldtype": "Section Break", + "label": "Agent" + }, + { + "default": "jingrow", + "fieldname": "agent_repository_owner", + "fieldtype": "Data", + "label": "Agent Repository Owner" + }, + { + "fieldname": "column_break_105", + "fieldtype": "Column Break" + }, + { + "fieldname": "agent_github_access_token", + "fieldtype": "Data", + "label": "Agent GitHub Access Token" + }, + { + "fieldname": "lets_encrypt_section", + "fieldtype": "Section Break", + "label": "Let's Encrypt" + }, + { + "fieldname": "certbot_directory", + "fieldtype": "Data", + "label": "Certbot Directory", + "reqd": 1 + }, + { + "fieldname": "webroot_directory", + "fieldtype": "Data", + "label": "Webroot Directory" + }, + { + "default": "2048", + "fieldname": "rsa_key_size", + "fieldtype": "Select", + "label": "RSA Key Size", + "options": "2048\n3072\n4096", + "reqd": 1 + }, + { + "fieldname": "column_break_15", + "fieldtype": "Column Break" + }, + { + "fieldname": "eff_registration_email", + "fieldtype": "Data", + "label": "EFF Registration Email", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "use_staging_ca", + "fieldtype": "Check", + "label": "Use Staging CA" + }, + { + "collapsible": 1, + "fieldname": "ssh_section", + "fieldtype": "Section Break", + "label": "SSH" + }, + { + "fieldname": "ssh_certificate_authority", + "fieldtype": "Link", + "label": "SSH Certificate Authority", + "options": "SSH Certificate Authority" + }, + { + "collapsible": 1, + "fieldname": "monitoring_section", + "fieldtype": "Section Break", + "label": "Monitoring" + }, + { + "fieldname": "telegram_alert_chat_id", + "fieldtype": "Data", + "label": "Telegram Alert Chat ID" + }, + { + "fieldname": "monitor_server", + "fieldtype": "Link", + "label": "Monitor Server", + "options": "Monitor Server" + }, + { + "fieldname": "column_break_100", + "fieldtype": "Column Break" + }, + { + "fieldname": "monitor_token", + "fieldtype": "Data", + "label": "Monitor Token" + }, + { + "fieldname": "log_server", + "fieldtype": "Link", + "label": "Log Server", + "options": "Log Server" + }, + { + "fieldname": "feature_flags_tab", + "fieldtype": "Tab Break", + "label": "Feature Flags" + }, + { + "default": "No", + "fieldname": "verify_cards_with_micro_charge", + "fieldtype": "Select", + "label": "Verify Cards with Micro Charge", + "options": "No\nOnly CNY\nOnly USD\nBoth CNY and USD" + }, + { + "fieldname": "threshold", + "fieldtype": "Float", + "label": "Marketplace Payout Threshold" + }, + { + "fieldname": "commission", + "fieldtype": "Float", + "label": "Marketplace Commission" + }, + { + "fieldname": "usd_rate", + "fieldtype": "Float", + "label": "USD Rate" + }, + { + "fieldname": "jcloud_monitoring_password", + "fieldtype": "Password", + "label": "Jcloud Monitoring Password" + }, + { + "description": "Adds this script to app_include_js via site config. Used for in-site billing", + "fieldname": "app_include_script", + "fieldtype": "Data", + "label": "App Include Script" + }, + { + "fieldname": "telegram_alerts_chat_group", + "fieldtype": "Link", + "label": "Telegram Alerts Chat Group", + "options": "Telegram Group" + }, + { + "default": "0", + "fieldname": "enable_google_oauth", + "fieldtype": "Check", + "label": "Enable Google Oauth " + }, + { + "fieldname": "plausible_api_key", + "fieldtype": "Password", + "label": "Plausible API Key" + }, + { + "fieldname": "plausible_column", + "fieldtype": "Column Break", + "label": "Plausible" + }, + { + "fieldname": "plausible_url", + "fieldtype": "Data", + "label": "Plausible URL" + }, + { + "fieldname": "plausible_site_id", + "fieldtype": "Data", + "label": "Plausible site id" + }, + { + "fieldname": "code_spaces_tab", + "fieldtype": "Tab Break", + "label": "Code Spaces" + }, + { + "fieldname": "spaces_domain", + "fieldtype": "Link", + "label": "Spaces Domain", + "options": "Root Domain" + }, + { + "default": "0", + "fieldname": "suspend_builds", + "fieldtype": "Check", + "label": "Suspend Builds", + "read_only": 1 + }, + { + "fieldname": "aws_section", + "fieldtype": "Section Break", + "label": "AWS" + }, + { + "fieldname": "aws_access_key_id", + "fieldtype": "Data", + "label": "AWS Access Key ID" + }, + { + "fieldname": "column_break_agig", + "fieldtype": "Column Break" + }, + { + "fieldname": "aws_secret_access_key", + "fieldtype": "Password", + "label": "AWS Secret Access Key" + }, + { + "fieldname": "twilio_section", + "fieldtype": "Section Break", + "label": "Twilio" + }, + { + "fieldname": "twilio_account_sid", + "fieldtype": "Data", + "label": "Twilio Account SID" + }, + { + "fieldname": "column_break_kxuj", + "fieldtype": "Column Break" + }, + { + "fieldname": "twilio_phone_number", + "fieldtype": "Phone", + "label": "Twilio Phone Number" + }, + { + "fieldname": "invoicing_column", + "fieldtype": "Column Break" + }, + { + "fieldname": "gst_percentage", + "fieldtype": "Float", + "label": "GST Percentage" + }, + { + "fieldname": "column_break_tcmy", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_edst", + "fieldtype": "Column Break" + }, + { + "fieldname": "twilio_api_key_sid", + "fieldtype": "Data", + "label": "Twilio API Key SID" + }, + { + "fieldname": "twilio_api_key_secret", + "fieldtype": "Password", + "label": "Twilio API Key Secret" + }, + { + "fieldname": "invoicing_section", + "fieldtype": "Section Break", + "label": "Invoicing" + }, + { + "fieldname": "column_break_qfwx", + "fieldtype": "Column Break" + }, + { + "description": "Fetched from jingrow.com", + "fieldname": "print_format", + "fieldtype": "Data", + "label": "Print Format" + }, + { + "default": "0", + "description": "Uses Bench get-app cache for faster image builds. Will be set only if Bench version is 5.22.1 or later.", + "fieldname": "use_app_cache", + "fieldtype": "Check", + "label": "Use App Cache" + }, + { + "default": "0", + "depends_on": "eval: pg.use_app_cache", + "description": "Use Gzip to compress bench get-app artifacts before caching.", + "fieldname": "compress_app_cache", + "fieldtype": "Check", + "label": "Compress App Cache" + }, + { + "fieldname": "column_break_rdlr", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "realtime_job_updates", + "fieldtype": "Check", + "label": "Realtime Job Updates" + }, + { + "default": "0", + "description": "Quickens builds by fetching app changes without rebuilding app if app rebuild is not required.", + "fieldname": "use_delta_builds", + "fieldtype": "Check", + "label": "Use Delta Builds" + }, + { + "fieldname": "hybrid_server_tab", + "fieldtype": "Tab Break", + "label": "Hybrid Server" + }, + { + "fieldname": "hybrid_cluster", + "fieldtype": "Link", + "label": "Hybrid Cluster", + "options": "Cluster" + }, + { + "fieldname": "hybrid_domain", + "fieldtype": "Link", + "label": "Hybrid Domain", + "options": "Root Domain" + }, + { + "default": "0", + "fieldname": "disable_auto_retry", + "fieldtype": "Check", + "label": "Disable Auto Retry" + }, + { + "default": "1", + "fieldname": "disable_agent_job_deduplication", + "fieldtype": "Check", + "label": "Disable Agent Job Deduplication" + }, + { + "fieldname": "agent_sentry_dsn", + "fieldtype": "Data", + "label": "Agent Sentry DSN" + }, + { + "fieldname": "build_server", + "fieldtype": "Link", + "label": "Build Server", + "options": "Server" + }, + { + "default": "10", + "fieldname": "tls_renewal_queue_size", + "fieldtype": "Int", + "label": "TLS Renewal Queue Size" + }, + { + "default": "80", + "fieldname": "micro_debit_charge_cny", + "fieldtype": "Currency", + "label": "Micro Debit Charge (CNY)", + "precision": "0" + }, + { + "default": "1", + "fieldname": "micro_debit_charge_usd", + "fieldtype": "Currency", + "label": "Micro Debit Charge (USD)", + "precision": "0" + }, + { + "default": "master", + "fieldname": "branch", + "fieldtype": "Data", + "label": "Branch" + }, + { + "fieldname": "column_break_yhwz", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_cpry", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_wrqp", + "fieldtype": "Column Break" + }, + { + "default": "500", + "fieldname": "usage_record_creation_batch_size", + "fieldtype": "Int", + "label": "Usage Record Creation Batch Size" + }, + { + "fieldname": "hetzner_section", + "fieldtype": "Section Break", + "label": "Hetzner" + }, + { + "fieldname": "hetzner_api_token", + "fieldtype": "Password", + "label": "Hetzner API Token" + }, + { + "fieldname": "jcloud_trial_plan", + "fieldtype": "Link", + "label": "Jcloud Trial Plan", + "options": "Site Plan" + }, + { + "fieldname": "section_break_jstu", + "fieldtype": "Section Break" + }, + { + "default": "0", + "fieldname": "enable_app_grouping", + "fieldtype": "Check", + "label": "Enable App Grouping" + }, + { + "fieldname": "default_apps", + "fieldtype": "Table", + "label": "Default Apps", + "options": "App Group" + }, + { + "default": "0", + "fieldname": "enable_email_pre_verification", + "fieldtype": "Check", + "label": "Enable Email Pre-Verification" + }, + { + "fieldname": "bench_section", + "fieldtype": "Section Break", + "label": "Bench" + }, + { + "default": "512", + "fieldname": "redis_cache_size", + "fieldtype": "Int", + "label": "Redis Cache Size (MB)" + }, + { + "fieldname": "partner_tab", + "fieldtype": "Tab Break", + "label": "Partner" + }, + { + "fieldname": "partnership_fees_section", + "fieldtype": "Section Break", + "label": "Partnership Fees" + }, + { + "fieldname": "partnership_fee_usd", + "fieldtype": "Int", + "label": "Partnership Fee USD" + }, + { + "fieldname": "column_break_yxrj", + "fieldtype": "Column Break" + }, + { + "fieldname": "partnership_fee_cny", + "fieldtype": "Int", + "label": "Partnership Fee CNY" + }, + { + "fieldname": "github_pat_token", + "fieldtype": "Data", + "label": "Github PAT Token" + }, + { + "default": "1", + "fieldname": "disable_physical_backup", + "fieldtype": "Check", + "label": "Disable Physical Backup" + }, + { + "fieldname": "physical_backups_section", + "fieldtype": "Section Break", + "label": "Physical Backups" + }, + { + "default": "0", + "description": "If physical restore fails, it will spawn up a new database container to try logical restoration.", + "fieldname": "enable_physical_restore_failover", + "fieldtype": "Check", + "label": "Enable Physical Restore Failover" + }, + { + "fieldname": "physical_restore_docker_image", + "fieldtype": "Data", + "label": "Physical Restore Docker Image" + }, + { + "fieldname": "spamd_section", + "fieldtype": "Section Break", + "label": "Spamd" + }, + { + "default": "0", + "fieldname": "enable_spam_check", + "fieldtype": "Check", + "label": "Enable Spam Check" + }, + { + "fieldname": "spamd_endpoint", + "fieldtype": "Data", + "label": "Spamd Endpoint" + }, + { + "fieldname": "column_break_xhfy", + "fieldtype": "Column Break" + }, + { + "fieldname": "spamd_api_key", + "fieldtype": "Data", + "label": "Spamd API Key" + }, + { + "fieldname": "spamd_api_secret", + "fieldtype": "Password", + "label": "Spamd API Secret" + }, + { + "collapsible": 1, + "fieldname": "alipay_settings_section", + "fieldtype": "Section Break", + "label": "Alipay Settings" + }, + { + "default": "https://openapi.alipay.com/gateway.do", + "description": "\u652f\u4ed8\u5b9d\u7f51\u5173\u5730\u5740\uff0c\u4f8b\u5982\uff1ahttps://openapi.alipay.com/gateway.do", + "fieldname": "alipay_server_url", + "fieldtype": "Data", + "label": "Alipay Server URL" + }, + { + "description": "\u652f\u4ed8\u5b9d\u5e94\u7528ID", + "fieldname": "alipay_app_id", + "fieldtype": "Data", + "label": "Alipay App ID" + }, + { + "description": "\u652f\u4ed8\u5b8c\u6210\u540e\u7684\u8df3\u8f6c\u5730\u5740", + "fieldname": "alipay_return_url", + "fieldtype": "Data", + "label": "Alipay Return URL" + }, + { + "description": "\u652f\u4ed8\u7ed3\u679c\u901a\u77e5\u5730\u5740", + "fieldname": "alipay_notify_url", + "fieldtype": "Data", + "label": "Alipay Notify URL" + }, + { + "fieldname": "column_break_alipay", + "fieldtype": "Column Break" + }, + { + "description": "\u5e94\u7528\u79c1\u94a5\uff0c\u7528\u4e8e\u7b7e\u540d", + "fieldname": "alipay_app_private_key", + "fieldtype": "Long Text", + "label": "Alipay App Private Key" + }, + { + "description": "\u652f\u4ed8\u5b9d\u516c\u94a5\uff0c\u7528\u4e8e\u9a8c\u8bc1\u7b7e\u540d", + "fieldname": "alipay_public_key", + "fieldtype": "Long Text", + "label": "Alipay Public Key" + }, + { + "collapsible": 1, + "fieldname": "wechatpay_settings_section", + "fieldtype": "Section Break", + "label": "Wechatpay Settings" + }, + { + "description": "\u5fae\u4fe1\u652f\u4ed8AppID", + "fieldname": "wechatpay_appid", + "fieldtype": "Data", + "label": "Wechatpay App ID" + }, + { + "description": "\u5fae\u4fe1\u652f\u4ed8\u5546\u6237\u53f7", + "fieldname": "wechatpay_mchid", + "fieldtype": "Data", + "label": "Wechatpay Merchant ID" + }, + { + "description": "\u5fae\u4fe1\u652f\u4ed8\u7ed3\u679c\u901a\u77e5\u5730\u5740", + "fieldname": "wechatpay_notify_url", + "fieldtype": "Data", + "label": "Wechatpay Notify URL" + }, + { + "description": "\u8bc1\u4e66\u5e8f\u5217\u53f7", + "fieldname": "wechatpay_cert_serial_no", + "fieldtype": "Data", + "label": "Wechatpay Certificate Serial Number" + }, + { + "fieldname": "column_break_wechatpay", + "fieldtype": "Column Break" + }, + { + "description": "\u5fae\u4fe1\u652f\u4ed8APIv3\u5bc6\u94a5", + "fieldname": "wechatpay_apiv3_key", + "fieldtype": "Data", + "label": "Wechatpay API v3 Key" + }, + { + "description": "\u5fae\u4fe1\u652f\u4ed8\u5546\u6237\u79c1\u94a5", + "fieldname": "wechatpay_private_key", + "fieldtype": "Long Text", + "label": "Wechatpay Private Key" + }, + { + "description": "\u5fae\u4fe1\u652f\u4ed8\u5e73\u53f0\u516c\u94a5", + "fieldname": "wechatpay_public_key", + "fieldtype": "Long Text", + "label": "Wechatpay Public Key" + }, + { + "description": "\u5fae\u4fe1\u652f\u4ed8\u516c\u94a5ID", + "fieldname": "wechatpay_public_key_id", + "fieldtype": "Data", + "label": "Wechatpay Public Key ID" + }, + { + "default": "False", + "fieldname": "partner_mode", + "fieldtype": "Data", + "label": "Partner Mode" + }, + { + "default": "None", + "fieldname": "proxy", + "fieldtype": "Data", + "label": "Proxy" + }, + { + "default": "(1, 2)", + "fieldname": "timeout", + "fieldtype": "Data", + "label": "Timeout" + }, + { + "fieldname": "section_break_vvyh", + "fieldtype": "Section Break", + "label": "Aliyun SMS" + }, + { + "fieldname": "aliyun_access_key_id", + "fieldtype": "Data", + "label": " Aliyun ACCESS KEY ID" + }, + { + "fieldname": "aliyun_access_secret", + "fieldtype": "Password", + "label": "Aliyun ACCESS SECRET" + }, + { + "fieldname": "git_section", + "fieldtype": "Section Break", + "label": "Git" + }, + { + "default": "gitea", + "fieldname": "git_service_type", + "fieldtype": "Select", + "label": "Git Service Type", + "options": "gitea\ngithub" + }, + { + "fieldname": "git_url", + "fieldtype": "Data", + "label": "Git URL" + }, + { + "fieldname": "column_break_jhbn", + "fieldtype": "Column Break" + } + ], + "issingle": 1, + "links": [], + "modified": "2025-04-06 19:58:13.368427", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Settings", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "print": 1, + "read": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_settings/jcloud_settings.py b/jcloud/jcloud/pagetype/jcloud_settings/jcloud_settings.py new file mode 100644 index 0000000..9feb583 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_settings/jcloud_settings.py @@ -0,0 +1,293 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import boto3 +import jingrow +from boto3.session import Session +from jingrow.model.document import Document +from jingrow.utils import get_url +from twilio.rest import Client + +from jcloud.api.billing import get_stripe +from jcloud.jcloud.pagetype.telegram_message.telegram_message import TelegramMessage +from jcloud.telegram_utils import Telegram + + +class JcloudSettings(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jcloud.jcloud.pagetype.app_group.app_group import AppGroup + from jcloud.jcloud.pagetype.jerp_app.jerp_app import JERPApp + from jingrow.types import DF + + agent_github_access_token: DF.Data | None + agent_repository_owner: DF.Data | None + agent_sentry_dsn: DF.Data | None + alipay_app_id: DF.Data | None + alipay_app_private_key: DF.LongText | None + alipay_notify_url: DF.Data | None + alipay_public_key: DF.LongText | None + alipay_return_url: DF.Data | None + alipay_server_url: DF.Data | None + aliyun_access_key_id: DF.Data | None + aliyun_access_secret: DF.Password | None + app_include_script: DF.Data | None + auto_update_queue_size: DF.Int + aws_access_key_id: DF.Data | None + aws_s3_bucket: DF.Data | None + aws_secret_access_key: DF.Password | None + backup_interval: DF.Int + backup_limit: DF.Int + backup_offset: DF.Int + backup_region: DF.Data | None + backup_rotation_scheme: DF.Literal["FIFO", "Grandfather-father-son"] + bench_configuration: DF.Code + branch: DF.Data | None + build_directory: DF.Data | None + build_server: DF.Link | None + central_migration_server: DF.Link | None + certbot_directory: DF.Data + clone_directory: DF.Data | None + cluster: DF.Link | None + code_server: DF.Data | None + code_server_password: DF.Data | None + commission: DF.Float + compress_app_cache: DF.Check + data_40: DF.Data | None + default_apps: DF.Table[AppGroup] + default_outgoing_id: DF.Data | None + default_outgoing_pass: DF.Data | None + disable_agent_job_deduplication: DF.Check + disable_auto_retry: DF.Check + disable_physical_backup: DF.Check + docker_registry_namespace: DF.Data | None + docker_registry_password: DF.Data | None + docker_registry_url: DF.Data | None + docker_registry_username: DF.Data | None + domain: DF.Link | None + eff_registration_email: DF.Data + enable_app_grouping: DF.Check + enable_email_pre_verification: DF.Check + enable_google_oauth: DF.Check + enable_physical_restore_failover: DF.Check + enable_site_pooling: DF.Check + enable_spam_check: DF.Check + enforce_storage_limits: DF.Check + free_credits_cny: DF.Currency + free_credits_usd: DF.Currency + git_service_type: DF.Literal["gitea", "github"] + git_url: DF.Data | None + github_access_token: DF.Data | None + github_app_client_id: DF.Data | None + github_app_client_secret: DF.Data | None + github_app_id: DF.Data | None + github_app_private_key: DF.Code | None + github_app_public_link: DF.Data | None + github_pat_token: DF.Data | None + github_webhook_secret: DF.Data | None + gst_percentage: DF.Float + hetzner_api_token: DF.Password | None + hybrid_cluster: DF.Link | None + hybrid_domain: DF.Link | None + jcloud_monitoring_password: DF.Password | None + jcloud_trial_plan: DF.Link | None + jerp_api_key: DF.Data | None + jerp_api_secret: DF.Password | None + jerp_apps: DF.Table[JERPApp] + jerp_cluster: DF.Link | None + jerp_domain: DF.Link | None + jerp_group: DF.Link | None + jerp_plan: DF.Link | None + jerp_url: DF.Data | None + jingrow_url: DF.Data | None + jingrowio_api_key: DF.Data | None + jingrowio_api_secret: DF.Password | None + log_server: DF.Link | None + mailgun_api_key: DF.Data | None + max_allowed_screenshots: DF.Int + micro_debit_charge_cny: DF.Currency + micro_debit_charge_usd: DF.Currency + monitor_server: DF.Link | None + monitor_token: DF.Data | None + ngrok_auth_token: DF.Data | None + offsite_backups_access_key_id: DF.Data | None + offsite_backups_count: DF.Int + offsite_backups_provider: DF.Literal["AWS S3"] + offsite_backups_secret_access_key: DF.Password | None + partner_mode: DF.Data | None + partnership_fee_cny: DF.Int + partnership_fee_usd: DF.Int + physical_restore_docker_image: DF.Data | None + plausible_api_key: DF.Password | None + plausible_site_id: DF.Data | None + plausible_url: DF.Data | None + print_format: DF.Data | None + proxy: DF.Data | None + publish_docs: DF.Check + razorpay_key_id: DF.Data | None + razorpay_key_secret: DF.Password | None + razorpay_webhook_secret: DF.Data | None + realtime_job_updates: DF.Check + redis_cache_size: DF.Int + remote_access_key_id: DF.Data | None + remote_link_expiry: DF.Int + remote_secret_access_key: DF.Password | None + remote_uploads_bucket: DF.Data | None + root_domain: DF.Data | None + rsa_key_size: DF.Literal["2048", "3072", "4096"] + spaces_domain: DF.Link | None + spamd_api_key: DF.Data | None + spamd_api_secret: DF.Password | None + spamd_endpoint: DF.Data | None + ssh_certificate_authority: DF.Link | None + staging_expiry: DF.Int + staging_plan: DF.Link | None + standby_pool_size: DF.Int + standby_queue_size: DF.Int + stripe_cny_plan_id: DF.Data | None + stripe_product_id: DF.Data | None + stripe_publishable_key: DF.Data | None + stripe_secret_key: DF.Password | None + stripe_usd_plan_id: DF.Data | None + stripe_webhook_endpoint_id: DF.Data | None + stripe_webhook_secret: DF.Data | None + suspend_builds: DF.Check + telegram_alert_chat_id: DF.Data | None + telegram_alerts_chat_group: DF.Link | None + telegram_bot_token: DF.Data | None + telegram_chat_id: DF.Data | None + threshold: DF.Float + timeout: DF.Data | None + tls_renewal_queue_size: DF.Int + trial_sites_count: DF.Int + twilio_account_sid: DF.Data | None + twilio_api_key_secret: DF.Password | None + twilio_api_key_sid: DF.Data | None + twilio_phone_number: DF.Phone | None + usage_record_creation_batch_size: DF.Int + usd_rate: DF.Float + use_app_cache: DF.Check + use_delta_builds: DF.Check + use_staging_ca: DF.Check + verify_cards_with_micro_charge: DF.Literal["No", "Only CNY", "Only USD", "Both CNY and USD"] + webroot_directory: DF.Data | None + wechatpay_apiv3_key: DF.Data | None + wechatpay_appid: DF.Data | None + wechatpay_cert_serial_no: DF.Data | None + wechatpay_mchid: DF.Data | None + wechatpay_notify_url: DF.Data | None + wechatpay_private_key: DF.LongText | None + wechatpay_public_key: DF.LongText | None + wechatpay_public_key_id: DF.Data | None + # end: auto-generated types + + dashboard_fields = ( + "partnership_fee_cny", + "partnership_fee_usd", + ) + + @jingrow.whitelist() + def create_stripe_webhook(self): + stripe = get_stripe() + url = jingrow.utils.get_url( + "/api/method/jcloud.jcloud.pagetype.stripe_webhook_log.stripe_webhook_log.stripe_webhook_handler" + ) + webhook = stripe.WebhookEndpoint.create( + url=url, + enabled_events=[ + "payment_intent.requires_action", + "payment_intent.payment_failed", + "payment_intent.succeeded", + "payment_method.attached", + "invoice.payment_action_required", + "invoice.payment_succeeded", + "invoice.payment_failed", + "invoice.finalized", + "mandate.updated", + "setup_intent.succeeded", + ], + ) + self.stripe_webhook_endpoint_id = webhook["id"] + self.stripe_webhook_secret = webhook["secret"] + self.flags.ignore_mandatory = True + self.save() + + @jingrow.whitelist() + def get_github_app_manifest(self): + if jingrow.conf.developer_mode: + app_name = f"Jingrow {jingrow.generate_hash(length=6).upper()}" + else: + app_name = "Jingrow" + return { + "name": app_name, + "url": "https://jingrow.cloud", + "hook_attributes": {"url": get_url("api/method/jcloud.api.github.hook")}, + "redirect_url": get_url("github/redirect"), + "description": "Managed Jingrow Hosting", + "public": True, + "default_events": ["create", "push", "release"], + "default_permissions": {"contents": "read"}, + # These keys aren't documented under the app creation from manifest + # https://docs.git.jingrow.com:3000/en/free-pro-team@latest/developers/apps/creating-a-github-app-from-a-manifest + # But are shown under app creation using url parameters + # https://docs.git.jingrow.com:3000/en/free-pro-team@latest/developers/apps/creating-a-github-app-using-url-parameters + # They seem to work. This might change later + "callback_url": get_url("github/authorize"), + "request_oauth_on_install": True, + "setup_on_update": True, + } + + @property + def boto3_offsite_backup_session(self) -> Session: + """Get new preconfigured boto3 session for offsite backup provider.""" + return Session( + aws_access_key_id=self.offsite_backups_access_key_id, + aws_secret_access_key=self.get_password( + "offsite_backups_secret_access_key", raise_exception=False + ), + region_name="ap-south-1", + ) + + @property + def boto3_iam_client(self): + return boto3.client( + "iam", + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.get_password("aws_secret_access_key"), + ) + + @classmethod + def is_offsite_setup(cls): + return any( + jingrow.db.get_value( + "Jcloud Settings", + "Jcloud Settings", + ["aws_s3_bucket", "offsite_backups_access_key_id"], + ) + ) + + @property + def telegram(self): + return Telegram + + @property + def telegram_message(self): + return TelegramMessage + + @property + def twilio_client(self) -> Client: + account_sid = self.twilio_account_sid + api_key_sid = self.twilio_api_key_sid + api_key_secret = self.get_password("twilio_api_key_secret") + return Client(api_key_sid, api_key_secret, account_sid) + + def get_default_apps(self): + if hasattr(self, "enable_app_grouping") and hasattr(self, "default_apps"): # noqa + if self.enable_app_grouping: + return [app.app for app in self.default_apps] + return [] diff --git a/jcloud/jcloud/pagetype/jcloud_settings/patches/move_stripe_credentials_to_jcloud_settings.py b/jcloud/jcloud/pagetype/jcloud_settings/patches/move_stripe_credentials_to_jcloud_settings.py new file mode 100644 index 0000000..5b79fd7 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_settings/patches/move_stripe_credentials_to_jcloud_settings.py @@ -0,0 +1,21 @@ +import jingrow + + +def execute(): + from jingrow.utils.password import get_decrypted_password, set_encrypted_password + + stripe_account = jingrow.db.get_single_value("Jcloud Settings", "stripe_account") + + # Fetch credentials from "Stripe Settings" pagetype + secret_key = get_decrypted_password("Stripe Settings", stripe_account, "secret_key") + publishable_key = jingrow.db.get_value( + "Stripe Settings", stripe_account, "publishable_key" + ) + + jingrow.reload_pagetype("Jcloud Settings") + + # Set credentials in Jcloud Settings + jingrow.db.set_single_value("Jcloud Settings", "stripe_publishable_key", publishable_key) + set_encrypted_password( + "Jcloud Settings", "Jcloud Settings", secret_key, "stripe_secret_key" + ) diff --git a/jcloud/jcloud/pagetype/jcloud_settings/patches/set_jcloud_monitoring_password.py b/jcloud/jcloud/pagetype/jcloud_settings/patches/set_jcloud_monitoring_password.py new file mode 100644 index 0000000..6d8157f --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_settings/patches/set_jcloud_monitoring_password.py @@ -0,0 +1,11 @@ +import jingrow + + +def execute(): + jingrow.reload_pagetype("Jcloud Settings") + settings = jingrow.get_single("Jcloud Settings") + try: + settings.get_password("jcloud_monitoring_password") + except jingrow.AuthenticationError: + settings.jcloud_monitoring_password = jingrow.generate_hash() + settings.save() diff --git a/jcloud/jcloud/pagetype/jcloud_settings/patches/set_redis_cache_size.py b/jcloud/jcloud/pagetype/jcloud_settings/patches/set_redis_cache_size.py new file mode 100644 index 0000000..167cafc --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_settings/patches/set_redis_cache_size.py @@ -0,0 +1,11 @@ +import jingrow +from jingrow.core.utils import find + + +def execute(): + jingrow.reload_pagetype("Jcloud Settings") + settings = jingrow.get_single("Jcloud Settings") + if not settings.redis_cache_size: + redis_cache_size_field = find(settings.meta.fields, lambda x: x.fieldname == "redis_cache_size") + settings.redis_cache_size = redis_cache_size_field.default + settings.save() diff --git a/jcloud/jcloud/pagetype/jcloud_settings/test_jcloud_settings.py b/jcloud/jcloud/pagetype/jcloud_settings/test_jcloud_settings.py new file mode 100644 index 0000000..64d4236 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_settings/test_jcloud_settings.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +import unittest + +import jingrow + +from jcloud.jcloud.pagetype.cluster.test_cluster import create_test_cluster + + +def create_test_jcloud_settings(): + """Create test jcloud settings pg""" + create_test_cluster() + if not jingrow.db.exists("TLS Certificate", "*.fc.dev"): + jingrow.get_pg( + { + "pagetype": "TLS Certificate", + "name": "*.fc.dev", + "domain": "fc.dev", + "wildcard": True, + "status": "Active", + "rsa_key_size": 2048, + } + ).db_insert() + + jingrow.get_pg( + { + "pagetype": "Root Domain", + "name": "fc.dev", + "dns_provider": "AWS Route 53", + "default_cluster": "Default", + "aws_access_key_id": jingrow.mock("password"), + "aws_secret_access_key": jingrow.mock("password"), + } + ).insert(ignore_if_duplicate=True) + + settings = jingrow.get_single("Jcloud Settings") + settings.domain = "fc.dev" + settings.bench_configuration = "{}" + settings.rsa_key_size = 2048 + settings.certbot_directory = ".certbot" + settings.eff_registration_email = jingrow.mock("email") + settings.save() + return settings + + +class TestJcloudSettings(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_tag/__init__.py b/jcloud/jcloud/pagetype/jcloud_tag/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_tag/jcloud_tag.js b/jcloud/jcloud/pagetype/jcloud_tag/jcloud_tag.js new file mode 100644 index 0000000..837e45b --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_tag/jcloud_tag.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Jcloud Tag", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/jcloud_tag/jcloud_tag.json b/jcloud/jcloud/pagetype/jcloud_tag/jcloud_tag.json new file mode 100644 index 0000000..c2145ae --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_tag/jcloud_tag.json @@ -0,0 +1,85 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-07-02 00:06:18.990686", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "tag", + "pagetype_name" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "fieldname": "tag", + "fieldtype": "Data", + "in_list_view": 1, + "in_preview": 1, + "label": "Tag" + }, + { + "fieldname": "pagetype_name", + "fieldtype": "Link", + "label": "Pagetype Name", + "options": "PageType" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-05-30 13:40:56.625943", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Tag", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "tag" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_tag/jcloud_tag.py b/jcloud/jcloud/pagetype/jcloud_tag/jcloud_tag.py new file mode 100644 index 0000000..26295ec --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_tag/jcloud_tag.py @@ -0,0 +1,22 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class JcloudTag(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + pagetype_name: DF.Link | None + tag: DF.Data | None + team: DF.Link | None + # end: auto-generated types + + dashboard_fields = ["tag", "pagetype_name", "team"] diff --git a/jcloud/jcloud/pagetype/jcloud_tag/test_jcloud_tag.py b/jcloud/jcloud/pagetype/jcloud_tag/test_jcloud_tag.py new file mode 100644 index 0000000..6465e8e --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_tag/test_jcloud_tag.py @@ -0,0 +1,25 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.team.test_team import create_test_team + + +def create_and_add_test_tag(name: str, pagetype: str, tag: str = "test_tag"): + test_tag = jingrow.get_pg( + { + "pagetype": "Jcloud Tag", + "pagetype_name": pagetype, + "team": create_test_team(), + "tag": tag, + } + ).insert(ignore_permissions=True) + pg = jingrow.get_pg(pagetype, name).append("tags", {"tag": test_tag}) + pg.save() + return test_tag + + +class TestJcloudTag(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_user_permission/__init__.py b/jcloud/jcloud/pagetype/jcloud_user_permission/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_user_permission/jcloud_user_permission.js b/jcloud/jcloud/pagetype/jcloud_user_permission/jcloud_user_permission.js new file mode 100644 index 0000000..530d28c --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_user_permission/jcloud_user_permission.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Jcloud User Permission", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/jcloud_user_permission/jcloud_user_permission.json b/jcloud/jcloud/pagetype/jcloud_user_permission/jcloud_user_permission.json new file mode 100644 index 0000000..0415ba2 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_user_permission/jcloud_user_permission.json @@ -0,0 +1,108 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-08-04 15:08:48.970377", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "type", + "user", + "group", + "document_type", + "document_name", + "action", + "config" + ], + "fields": [ + { + "depends_on": "eval: pg.type == \"User\" || pg.type == \"Config\";", + "fieldname": "user", + "fieldtype": "Link", + "in_filter": 1, + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "User", + "options": "User", + "search_index": 1 + }, + { + "depends_on": "eval: pg.type != 'Config';", + "fieldname": "document_type", + "fieldtype": "Link", + "in_filter": 1, + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Document Type", + "options": "PageType" + }, + { + "depends_on": "eval: pg.type != 'Config';", + "fieldname": "document_name", + "fieldtype": "Dynamic Link", + "in_filter": 1, + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Document Name", + "options": "document_type" + }, + { + "depends_on": "eval: pg.type != 'Config';", + "fieldname": "action", + "fieldtype": "Data", + "in_filter": 1, + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Action" + }, + { + "default": "User", + "fieldname": "type", + "fieldtype": "Select", + "label": "Type", + "options": "User\nGroup\nConfig" + }, + { + "depends_on": "eval: pg.type == \"Group\";", + "fieldname": "group", + "fieldtype": "Link", + "label": "Group", + "options": "Jcloud Permission Group" + }, + { + "depends_on": "eval: pg.type == 'Config';", + "fieldname": "config", + "fieldtype": "JSON", + "label": "config" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-12-15 15:48:28.325993", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud User Permission", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_user_permission/jcloud_user_permission.py b/jcloud/jcloud/pagetype/jcloud_user_permission/jcloud_user_permission.py new file mode 100644 index 0000000..b30063b --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_user_permission/jcloud_user_permission.py @@ -0,0 +1,120 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +from typing import Dict + +import jingrow +from jingrow.model.document import Document + +ALLOWED_CONFIG_PERMS = ["global", "restricted"] + + +class JcloudUserPermission(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + action: DF.Data | None + config: DF.JSON | None + document_name: DF.DynamicLink | None + document_type: DF.Link | None + group: DF.Link | None + type: DF.Literal["User", "Group", "Config"] + user: DF.Link | None + # end: auto-generated types + + def validate(self): + if self.type == "Config": + self.validate_config() + + def validate_config(self): + config = jingrow.parse_json(self.config) + if not set(config.keys()).issubset(set(ALLOWED_CONFIG_PERMS)): + jingrow.throw(f"Invalid config key. Allowed keys are: {format(ALLOWED_CONFIG_PERMS)}") + + +def has_user_permission(pg: str, name: str, action: str, groups: list = None): + groups = groups or [] + user = jingrow.session.user + allowed = False + + if not groups: + groups = jingrow.get_all("Jcloud Permission Group User", {"user": user}, pluck="parent") + + # part of a group with access + if jingrow.db.exists( + "Jcloud User Permission", + { + "type": "Group", + "group": ("in", groups), + "document_type": pg, + "document_name": name, + "action": action, + }, + ): + allowed = True + + # user has granular perm access + if jingrow.db.exists( + "Jcloud User Permission", + { + "type": "User", + "user": user, + "document_type": pg, + "document_name": name, + "action": action, + }, + ): + allowed = True + + # has config perm access + config = jingrow.db.get_value( + "Jcloud User Permission", {"user": user, "type": "Config"}, "config", as_dict=True + ) + if config: + allowed = check_config_perm( + jingrow.parse_json(config["config"]), pg, name, action, allowed + ) + + return allowed + + +def check_config_perm( + config: Dict, pagetype: str, name: str, action: str, allowed: bool +): + perm_types = config.keys() + + if "global" in perm_types: + allowed = has_config_perm(config["global"], pagetype, name, action, allowed, "global") + + if "restricted" in perm_types: + allowed = has_config_perm( + config["restricted"], pagetype, name, action, allowed, "restricted" + ) + + return allowed + + +def has_config_perm( + config: Dict, pagetype: str, name: str, action: str, allowed: bool, ptype: str +): + if pagetype in config.keys(): + docnames = config[pagetype].keys() + if name in docnames: + name = name + elif "*" in docnames: + name = "*" + else: + return allowed + + if action in config[pagetype][name] or "*" in config[pagetype][name]: + if ptype == "restricted": + allowed = False + elif ptype == "global": + allowed = True + + return allowed diff --git a/jcloud/jcloud/pagetype/jcloud_user_permission/test_jcloud_user_permission.py b/jcloud/jcloud/pagetype/jcloud_user_permission/test_jcloud_user_permission.py new file mode 100644 index 0000000..24a0e13 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_user_permission/test_jcloud_user_permission.py @@ -0,0 +1,90 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.jcloud_user_permission.jcloud_user_permission import ( + has_user_permission, +) +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.team.test_team import create_test_team + + +class TestJcloudUserPermission(JingrowTestCase): + def setUp(self): + self.team = create_test_team() + self.site = create_test_site(subdomain="testpermsite") + + def tearDown(self): + jingrow.set_user("Administrator") + jingrow.db.rollback() + + def test_jcloud_user_permission(self): + self.assertFalse(has_user_permission("Site", self.site.name, "jcloud.api.site.login")) + + jingrow.get_pg( + pagetype="Jcloud User Permission", + type="User", + user=jingrow.session.user, + document_type="Site", + document_name=self.site.name, + action="jcloud.api.site.login", + ).insert(ignore_permissions=True) + + self.assertTrue(has_user_permission("Site", self.site.name, "jcloud.api.site.login")) + self.assertFalse( + has_user_permission("Site", self.site.name, "jcloud.api.site.migrate") + ) + + def test_jcloud_group_permission(self): + group = jingrow.get_pg( + pagetype="Jcloud Permission Group", team=self.team.name, title="Test Group" + ) + group.append("users", {"user": jingrow.session.user}) + group.insert(ignore_permissions=True) + + jingrow.get_pg( + pagetype="Jcloud User Permission", + type="Group", + group=group.name, + document_type="Site", + document_name=self.site.name, + action="jcloud.api.site.overview", + ).insert(ignore_permissions=True) + + self.assertTrue( + has_user_permission( + "Site", self.site.name, "jcloud.api.site.overview", groups=[group.name] + ) + ) + self.assertFalse( + has_user_permission( + "Site", self.site.name, "jcloud.api.site.migrate", groups=[group.name] + ) + ) + + def test_jcloud_config_permission(self): + perms = { + "global": { + "Site": {"*": "jcloud.api.site.login"}, + }, + "restricted": {"Site": {"test.jingrow.dev": "jcloud.api.site.migrate"}}, + } + jingrow.get_pg( + pagetype="Jcloud User Permission", + type="Config", + config=jingrow.as_json(perms), + user=jingrow.session.user, + ).insert(ignore_permissions=True) + + self.assertTrue(has_user_permission("Site", self.site.name, "jcloud.api.site.login")) + self.assertFalse( + has_user_permission("Site", "sometest.jingrow.dev", "jcloud.api.site.restore") + ) + self.assertFalse( + has_user_permission("Site", "test.jingrow.dev", "jcloud.api.site.migrate") + ) + self.assertTrue( + has_user_permission("Site", "test.jingrow.dev", "jcloud.api.site.login") + ) diff --git a/jcloud/jcloud/pagetype/jcloud_webhook/__init__.py b/jcloud/jcloud/pagetype/jcloud_webhook/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_webhook/jcloud_webhook.js b/jcloud/jcloud/pagetype/jcloud_webhook/jcloud_webhook.js new file mode 100644 index 0000000..119c9c4 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook/jcloud_webhook.js @@ -0,0 +1,46 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Jcloud Webhook', { + refresh(frm) { + let webhook = frm.get_pg(); + + if (!webhook.enabled) { + frm.add_custom_button( + __('Activate'), + () => { + frm.call('activate').then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } + }); + }, + __('Actions'), + ); + } else { + frm.add_custom_button( + __('Disable'), + () => { + frm.call('disable').then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } + }); + }, + __('Actions'), + ); + + frm.add_custom_button( + __('Disable and Notify'), + () => { + frm.call('disable_and_notify').then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } + }); + }, + __('Actions'), + ); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/jcloud_webhook/jcloud_webhook.json b/jcloud/jcloud/pagetype/jcloud_webhook/jcloud_webhook.json new file mode 100644 index 0000000..176c6ae --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook/jcloud_webhook.json @@ -0,0 +1,94 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-09-18 14:32:26.332089", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "enabled", + "team", + "endpoint", + "secret", + "section_break_xbfh", + "events" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "description": "Will be added in X-Webhook-Secret header of webhook request", + "fieldname": "secret", + "fieldtype": "Data", + "label": "Secret", + "not_nullable": 1 + }, + { + "fieldname": "section_break_xbfh", + "fieldtype": "Section Break" + }, + { + "fieldname": "events", + "fieldtype": "Table", + "label": "Events", + "options": "Jcloud Webhook Selected Event", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled", + "read_only_depends_on": "eval: !pg.enabled" + }, + { + "fieldname": "endpoint", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Endpoint", + "reqd": 1, + "search_index": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-09-23 15:06:57.848414", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Webhook", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_webhook/jcloud_webhook.py b/jcloud/jcloud/pagetype/jcloud_webhook/jcloud_webhook.py new file mode 100644 index 0000000..3f373b3 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook/jcloud_webhook.py @@ -0,0 +1,188 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import contextlib +import ipaddress +import json +from urllib.parse import urlparse + +import jingrow +import jingrow.query_builder +import jingrow.query_builder.functions +import requests +from jingrow.model.document import Document + +from jcloud.api.client import dashboard_whitelist +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.utils import is_valid_hostname + + +class JcloudWebhook(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.jcloud_webhook_selected_event.jcloud_webhook_selected_event import ( + JcloudWebhookSelectedEvent, + ) + + enabled: DF.Check + endpoint: DF.Data + events: DF.Table[JcloudWebhookSelectedEvent] + secret: DF.Data + team: DF.Link + # end: auto-generated types + + PAGETYPE = "Jcloud Webhook" + dashboard_fields = ("enabled", "endpoint", "events") + + def validate(self): + # maximum 5 webhooks per team + if self.is_new() and jingrow.db.count("Jcloud Webhook", {"team": self.team}) > 5: + jingrow.throw("You have reached the maximum number of webhooks per team") + + if self.has_value_changed("endpoint"): + self.enabled = 0 + # should have atleast one event selected + if not self.events: + jingrow.throw("At least one event should be selected") + # validate endpoint url format + self.validate_endpoint_url_format() + # check for duplicate webhooks + webhooks = jingrow.get_all( + "Jcloud Webhook", + filters={"team": self.team, "endpoint": self.endpoint, "name": ("!=", self.name)}, + pluck="name", + ) + if len(webhooks) != 0: + jingrow.throw("You have already added webhook for this endpoint") + + def validate_endpoint_url_format(self): + url = urlparse(self.endpoint) + if not url.netloc: + jingrow.throw("Endpoint should be a valid url") + + # protocol should be http or https + if url.scheme not in ["http", "https"]: + jingrow.throw("Endpoint should start with http:// or https://") + + # dont allow query params + if url.query: + jingrow.throw("Endpoint should not have query params") + + isIPAddress = False + # If endpoint target is ip address, it should be a public ip address + with contextlib.suppress(ValueError): + ip = ipaddress.ip_address(url.hostname) + isIPAddress = True + if not ip.is_global: + jingrow.throw("Endpoint address should be a public ip or domain") + + if not isIPAddress: + # domain should be a fqdn + if not is_valid_hostname(url.hostname): + jingrow.throw("Endpoint address should be a valid domain") + + # Endpoint can't be any local domain + if not jingrow.conf.developer_mode and ("localhost" in url.hostname or ".local" in url.hostname): + jingrow.throw("Endpoint can't be localhost or local domain") + + @dashboard_whitelist() + def validate_endpoint(self) -> dict: + response = "" + response_status_code = 0 + payload = {"event": "Webhook Validate", "data": {}} + try: + req = requests.post( + self.endpoint, + timeout=5, + json=payload, + headers={"X-Webhook-Secret": self.secret}, + ) + response = req.text or "" + response_status_code = req.status_code + except requests.exceptions.ConnectionError: + response = "Failed to connect to the webhook endpoint" + except requests.exceptions.SSLError: + response = "SSL Error. Please check if SSL the certificate of the webhook is valid." + except (requests.exceptions.Timeout, requests.exceptions.ConnectTimeout): + response = "Request Timeout. Please check if the webhook is reachable." + except Exception as e: + response = str(e) + + return jingrow._dict( + { + "success": response_status_code >= 200 and response_status_code < 300, + "request": json.dumps(payload, indent=2), + "response": response, + "response_status_code": response_status_code, + } + ) + + @dashboard_whitelist() + def activate(self): + result = self.validate_endpoint() + if result.get("success"): + self.enabled = 1 + self.save() + jingrow.msgprint("Webhook activated successfully") + else: + message = f"Status Code - {result.response_status_code}
Response -
{result.response}" + jingrow.throw(title="Webhook endpoint is invalid", msg=message) + + @dashboard_whitelist() + def disable(self): + self.enabled = False + self.save() + + @dashboard_whitelist() + def disable_and_notify(self): + self.disable() + email = jingrow.db.get_value("Team", self.team, "user") + if not email: + return + if jingrow.conf.developer_mode: + print(f"Emailing {email}") + print(f"{self.name} webhook has been disabled") + return + + jingrow.sendmail( + recipients=email, + subject="Important: Your Configured Webhook on Jingrow is disabled", + template="jcloud_webhook_disabled", + args={"endpoint": self.endpoint}, + now=True, + ) + + @dashboard_whitelist() + def delete(self): + jingrow.db.sql("delete from `tabJcloud Webhook Attempt` where webhook = %s", (self.name,)) + jingrow.delete_pg("Jcloud Webhook", self.name) + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Site") + + +def auto_disable_high_delivery_failure_webhooks(): + # In past hour, if 70% of webhook deliveries has failed, disable the webhook and notify the user + data = jingrow.db.sql( + """ +SELECT `endpoint` +FROM `tabJcloud Webhook Attempt` +WHERE `creation` >= NOW() - INTERVAL 1 HOUR +GROUP BY `endpoint` +HAVING (COUNT(CASE WHEN `status` = 'Failed' THEN 1 END) / COUNT(*)) * 100 > 70; +""", + as_dict=True, + ) + endpoints = [row.endpoint for row in data] + pg_names = jingrow.get_all("Jcloud Webhook", filters={"endpoint": ("in", endpoints)}, pluck="name") + for pg_name in pg_names: + pg = jingrow.get_pg("Jcloud Webhook", pg_name) + pg.disable_and_notify() diff --git a/jcloud/jcloud/pagetype/jcloud_webhook/test_jcloud_webhook.py b/jcloud/jcloud/pagetype/jcloud_webhook/test_jcloud_webhook.py new file mode 100644 index 0000000..35e195a --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook/test_jcloud_webhook.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJcloudWebhook(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_attempt/__init__.py b/jcloud/jcloud/pagetype/jcloud_webhook_attempt/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_attempt/jcloud_webhook_attempt.js b/jcloud/jcloud/pagetype/jcloud_webhook_attempt/jcloud_webhook_attempt.js new file mode 100644 index 0000000..3a52120 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_attempt/jcloud_webhook_attempt.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Jcloud Webhook Attempt", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_attempt/jcloud_webhook_attempt.json b/jcloud/jcloud/pagetype/jcloud_webhook_attempt/jcloud_webhook_attempt.json new file mode 100644 index 0000000..bbfef62 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_attempt/jcloud_webhook_attempt.json @@ -0,0 +1,95 @@ +{ + "actions": [], + "creation": "2024-09-19 09:33:43.941516", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "status", + "column_break_aqtt", + "timestamp", + "section_break_kvpa", + "column_break_uhnt", + "webhook", + "column_break_wpgi", + "endpoint", + "section_break_nnsg", + "response_status_code", + "response_body" + ], + "fields": [ + { + "fieldname": "webhook", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Webhook", + "options": "Jcloud Webhook", + "reqd": 1 + }, + { + "fieldname": "column_break_uhnt", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_aqtt", + "fieldtype": "Column Break" + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Sent\nFailed", + "reqd": 1 + }, + { + "fieldname": "section_break_nnsg", + "fieldtype": "Section Break" + }, + { + "fieldname": "response_status_code", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Response Status Code" + }, + { + "fieldname": "response_body", + "fieldtype": "Small Text", + "label": "Response Body", + "read_only": 1 + }, + { + "fieldname": "endpoint", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Endpoint", + "reqd": 1 + }, + { + "fieldname": "section_break_kvpa", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_wpgi", + "fieldtype": "Column Break" + }, + { + "fieldname": "timestamp", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "Timestamp", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-09-25 10:59:44.304591", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Webhook Attempt", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_attempt/jcloud_webhook_attempt.py b/jcloud/jcloud/pagetype/jcloud_webhook_attempt/jcloud_webhook_attempt.py new file mode 100644 index 0000000..f957ca4 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_attempt/jcloud_webhook_attempt.py @@ -0,0 +1,34 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + + +class JcloudWebhookAttempt(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + endpoint: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + response_body: DF.SmallText | None + response_status_code: DF.Data | None + status: DF.Literal["Sent", "Failed"] + timestamp: DF.Datetime + webhook: DF.Link + # end: auto-generated types + + +def has_permission(pg, ptype, user): + if ptype != "read": + return False + return jingrow.get_pg("Jcloud Webhook", pg.webhook).has_permission("read", user) diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_attempt/test_jcloud_webhook_attempt.py b/jcloud/jcloud/pagetype/jcloud_webhook_attempt/test_jcloud_webhook_attempt.py new file mode 100644 index 0000000..811b2ee --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_attempt/test_jcloud_webhook_attempt.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJcloudWebhookAttempt(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_event/__init__.py b/jcloud/jcloud/pagetype/jcloud_webhook_event/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_event/jcloud_webhook_event.js b/jcloud/jcloud/pagetype/jcloud_webhook_event/jcloud_webhook_event.js new file mode 100644 index 0000000..6724995 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_event/jcloud_webhook_event.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Jcloud Webhook Event", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_event/jcloud_webhook_event.json b/jcloud/jcloud/pagetype/jcloud_webhook_event/jcloud_webhook_event.json new file mode 100644 index 0000000..cc1675e --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_event/jcloud_webhook_event.json @@ -0,0 +1,61 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "field:title", + "creation": "2024-09-18 14:35:06.487107", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "enabled", + "title", + "description" + ], + "fields": [ + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "description", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Description", + "reqd": 1 + }, + { + "fieldname": "title", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Title", + "reqd": 1, + "unique": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-09-18 15:21:38.898742", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Webhook Event", + "naming_rule": "By fieldname", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_event/jcloud_webhook_event.py b/jcloud/jcloud/pagetype/jcloud_webhook_event/jcloud_webhook_event.py new file mode 100644 index 0000000..6a2aca8 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_event/jcloud_webhook_event.py @@ -0,0 +1,23 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class JcloudWebhookEvent(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + description: DF.Data + enabled: DF.Check + title: DF.Data + # end: auto-generated types + + PAGETYPE = "Jcloud Webhook Event" + dashboard_fields = ("name", "description") diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_event/test_jcloud_webhook_event.py b/jcloud/jcloud/pagetype/jcloud_webhook_event/test_jcloud_webhook_event.py new file mode 100644 index 0000000..e665db0 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_event/test_jcloud_webhook_event.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJcloudWebhookEvent(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_log/__init__.py b/jcloud/jcloud/pagetype/jcloud_webhook_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_log/jcloud_webhook_log.js b/jcloud/jcloud/pagetype/jcloud_webhook_log/jcloud_webhook_log.js new file mode 100644 index 0000000..dbd6e78 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_log/jcloud_webhook_log.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Jcloud Webhook Log", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_log/jcloud_webhook_log.json b/jcloud/jcloud/pagetype/jcloud_webhook_log/jcloud_webhook_log.json new file mode 100644 index 0000000..4bc7492 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_log/jcloud_webhook_log.json @@ -0,0 +1,114 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-09-18 16:34:27.110549", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "status", + "event", + "column_break_qqjj", + "team", + "section_break_vyzs", + "retries", + "column_break_btzb", + "next_retry_at", + "section_break_wqvc", + "attempts", + "section_break_bkbk", + "request_payload" + ], + "fields": [ + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nQueued\nSent\nPartially Sent\nFailed", + "reqd": 1 + }, + { + "fieldname": "event", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Event", + "options": "Jcloud Webhook Event", + "reqd": 1 + }, + { + "fieldname": "column_break_qqjj", + "fieldtype": "Column Break" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "section_break_bkbk", + "fieldtype": "Section Break" + }, + { + "fieldname": "section_break_wqvc", + "fieldtype": "Section Break" + }, + { + "fieldname": "section_break_vyzs", + "fieldtype": "Section Break" + }, + { + "default": "0", + "fieldname": "retries", + "fieldtype": "Int", + "label": "Retries" + }, + { + "fieldname": "column_break_btzb", + "fieldtype": "Column Break" + }, + { + "fieldname": "next_retry_at", + "fieldtype": "Datetime", + "label": "Next Retry At" + }, + { + "fieldname": "request_payload", + "fieldtype": "JSON", + "label": "Request Payload", + "reqd": 1 + }, + { + "fieldname": "attempts", + "fieldtype": "Table", + "label": "Attempts", + "options": "Jcloud Webhook Attempt" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-09-25 16:11:01.102311", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Webhook Log", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_log/jcloud_webhook_log.py b/jcloud/jcloud/pagetype/jcloud_webhook_log/jcloud_webhook_log.py new file mode 100644 index 0000000..6cc342f --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_log/jcloud_webhook_log.py @@ -0,0 +1,210 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json + +import jingrow +import requests +from jingrow.model.document import Document +from jingrow.utils import add_to_date, now + +from jcloud.overrides import get_permission_query_conditions_for_pagetype + + +class JcloudWebhookLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.jcloud_webhook_attempt.jcloud_webhook_attempt import JcloudWebhookAttempt + + attempts: DF.Table[JcloudWebhookAttempt] + event: DF.Link + next_retry_at: DF.Datetime | None + request_payload: DF.JSON + retries: DF.Int + status: DF.Literal["Pending", "Queued", "Sent", "Partially Sent", "Failed"] + team: DF.Link + # end: auto-generated types + + def validate(self): + if not self.next_retry_at: + self.next_retry_at = jingrow.utils.now() + + def _send_webhook_call(self, webhook_name, payload, url, secret, save: bool = True) -> bool: + response = "" + response_status_code = 0 + try: + req = requests.post( + url, + json=payload, + headers={"X-Webhook-Secret": secret}, + timeout=5, + ) + response = req.text or "" + response_status_code = req.status_code + except requests.exceptions.ConnectionError: + response = "Failed to connect to the webhook endpoint" + except requests.exceptions.SSLError: + response = "SSL Error. Please check if SSL the certificate of the webhook is valid." + except (requests.exceptions.Timeout, requests.exceptions.ConnectTimeout): + response = "Request Timeout. Please check if the webhook is reachable." + except Exception as e: + response = str(e) + + sent = response_status_code >= 200 and response_status_code < 300 + + self.append( + "attempts", + { + "endpoint": url, + "webhook": webhook_name, + "status": "Sent" if sent else "Failed", + "response_body": response, + "response_status_code": response_status_code, + "timestamp": jingrow.utils.now(), + }, + ) + if save: + self.save() + + return sent + + def schedule_retry(self, save: True): + self.retries = self.retries + 1 + self.next_retry_at = add_to_date(now(), minutes=2**self.retries) + if save: + self.save() + + def send(self): + if len(self.attempts) == 0: + self._process_webhook_call() + return + + # Try failed attempts + self._retry_failed_attempts() + + def _process_webhook_call(self): + try: + JcloudWebhookSelectedEvent = jingrow.qb.PageType("Jcloud Webhook Selected Event") + JcloudWebhook = jingrow.qb.PageType("Jcloud Webhook") + query = ( + jingrow.qb.from_(JcloudWebhookSelectedEvent) + .select(JcloudWebhook.name, JcloudWebhook.endpoint, JcloudWebhook.secret) + .left_join(JcloudWebhook) + .on(JcloudWebhookSelectedEvent.parent == JcloudWebhook.name) + .where(JcloudWebhookSelectedEvent.event == self.event) + .where(JcloudWebhook.team == self.team) + .where(JcloudWebhook.enabled == 1) + ) + webhooks = query.run(as_dict=True) + payload = json.loads(self.request_payload) + total = len(webhooks) + sent = 0 + for webhook in webhooks: + isSent = self._send_webhook_call( + webhook.name, + payload, + webhook.endpoint, + webhook.secret, + save=False, + ) + if isSent: + sent += 1 + + if total == 0: + self.status = "Sent" + else: + if sent == total: + self.status = "Sent" + elif sent != total and sent != 0: + self.status = "Partially Sent" + else: + self.status = "Failed" + self.schedule_retry(save=False) + except Exception: + self.status = "Failed" + self.schedule_retry(save=False) + + self.save() + + def _retry_failed_attempts(self): + webhook_call_status = jingrow._dict() + for record in self.attempts: + if record.status == "Failed" and webhook_call_status.get(record.webhook, "") != "Sent": + webhook_call_status[record.webhook] = "Failed" + if record.status == "Sent": + webhook_call_status[record.webhook] = "Sent" + + # filter out webhooks that need to be retried + webhooks_to_retry = [ + webhook for webhook in webhook_call_status if webhook_call_status[webhook] == "Failed" + ] + + sent = 0 + payload = json.loads(self.request_payload) + + for webhook in webhooks_to_retry: + webhook_data = jingrow.get_value( + "Jcloud Webhook", record.webhook, ["endpoint", "secret"], as_dict=True + ) + is_sent = self._send_webhook_call( + webhook, + payload, + webhook_data.endpoint, + webhook_data.secret, + ) + if is_sent: + sent += 1 + + if len(webhooks_to_retry) == 0 or sent == len(webhooks_to_retry): + self.status = "Sent" + elif (len(webhook_call_status) - len(webhooks_to_retry) > 0) or sent > 0: + self.status = "Partially Sent" + self.schedule_retry(save=False) + else: + self.status = "Failed" + self.schedule_retry(save=False) + + self.save() + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Jcloud Webhook Log") + + +def process(): + records = jingrow.get_all( + "Jcloud Webhook Log", + filters={ + "status": ["in", ["Pending", "Failed", "Partially Sent"]], + "retries": ["<=", 3], + "next_retry_at": ["<=", jingrow.utils.now()], + }, + pluck="name", + limit=100, + ) + # set status of these records to Queued + jingrow.db.set_value("Jcloud Webhook Log", {"name": ("in", records)}, "status", "Queued") + # enqueue these records + for record in records: + jingrow.enqueue_pg( + "Jcloud Webhook Log", + record, + method="send", + queue="default", + job_id=f"jcloud_webhook_log:{record}", + deduplicate=True, + ) + + +def clean_logs_older_than_24_hours(): + names = jingrow.get_all( + "Jcloud Webhook Log", filters={"creation": ["<", jingrow.utils.add_days(None, -1)]}, pluck="name" + ) + jingrow.delete_pg("Jcloud Webhook Log", names) diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_log/test_jcloud_webhook_log.py b/jcloud/jcloud/pagetype/jcloud_webhook_log/test_jcloud_webhook_log.py new file mode 100644 index 0000000..d7d1da4 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_log/test_jcloud_webhook_log.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJcloudWebhookLog(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_selected_event/__init__.py b/jcloud/jcloud/pagetype/jcloud_webhook_selected_event/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_selected_event/jcloud_webhook_selected_event.json b/jcloud/jcloud/pagetype/jcloud_webhook_selected_event/jcloud_webhook_selected_event.json new file mode 100644 index 0000000..2641691 --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_selected_event/jcloud_webhook_selected_event.json @@ -0,0 +1,33 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-09-18 15:16:58.232474", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "event" + ], + "fields": [ + { + "fieldname": "event", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Event", + "options": "Jcloud Webhook Event", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-09-18 15:22:11.876264", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud Webhook Selected Event", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jcloud_webhook_selected_event/jcloud_webhook_selected_event.py b/jcloud/jcloud/pagetype/jcloud_webhook_selected_event/jcloud_webhook_selected_event.py new file mode 100644 index 0000000..365db1d --- /dev/null +++ b/jcloud/jcloud/pagetype/jcloud_webhook_selected_event/jcloud_webhook_selected_event.py @@ -0,0 +1,23 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class JcloudWebhookSelectedEvent(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + event: DF.Link + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/jerp_app/__init__.py b/jcloud/jcloud/pagetype/jerp_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jerp_app/jerp_app.json b/jcloud/jcloud/pagetype/jerp_app/jerp_app.json new file mode 100644 index 0000000..1ef7631 --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_app/jerp_app.json @@ -0,0 +1,32 @@ +{ + "actions": [], + "creation": "2021-03-31 11:08:26.860569", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "app" + ], + "fields": [ + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App", + "options": "App", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2021-03-31 11:08:26.860569", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "JERP App", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jerp_app/jerp_app.py b/jcloud/jcloud/pagetype/jerp_app/jerp_app.py new file mode 100644 index 0000000..32e2adc --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_app/jerp_app.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class JERPApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/jerp_consultant/__init__.py b/jcloud/jcloud/pagetype/jerp_consultant/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jerp_consultant/jerp_consultant.js b/jcloud/jcloud/pagetype/jerp_consultant/jerp_consultant.js new file mode 100644 index 0000000..fbc5df7 --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_consultant/jerp_consultant.js @@ -0,0 +1,6 @@ +// Copyright (c) 2019, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('JERP Consultant', { + refresh: function (frm) {}, +}); diff --git a/jcloud/jcloud/pagetype/jerp_consultant/jerp_consultant.json b/jcloud/jcloud/pagetype/jerp_consultant/jerp_consultant.json new file mode 100644 index 0000000..5346dd4 --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_consultant/jerp_consultant.json @@ -0,0 +1,78 @@ +{ + "actions": [], + "allow_import": 1, + "autoname": "field:user", + "creation": "2019-01-23 14:20:56.285285", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "user", + "active", + "column_break_4", + "territories" + ], + "fields": [ + { + "default": "1", + "fieldname": "active", + "fieldtype": "Check", + "label": "Active" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fieldname": "territories", + "fieldtype": "Table MultiSelect", + "label": "Territories", + "options": "JERP Consultant Region" + }, + { + "fieldname": "user", + "fieldtype": "Link", + "in_list_view": 1, + "label": "User", + "options": "User", + "reqd": 1, + "unique": 1 + } + ], + "links": [], + "modified": "2021-04-21 18:08:19.750497", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "JERP Consultant", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Consultant", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jerp_consultant/jerp_consultant.py b/jcloud/jcloud/pagetype/jerp_consultant/jerp_consultant.py new file mode 100644 index 0000000..4725177 --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_consultant/jerp_consultant.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.document import Document +from jingrow.utils import get_fullname + + +class JERPConsultant(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.jerp_consultant_region.jerp_consultant_region import ( + JERPConsultantRegion, + ) + + active: DF.Check + territories: DF.TableMultiSelect[JERPConsultantRegion] + user: DF.Link + # end: auto-generated types + + @property + def full_name(self): + return get_fullname(self.name) + + @classmethod + def list_for_region(cls, region_name: str) -> [str]: + """List ACTIVE consultants for a region.""" + return jingrow.db.sql_list( + f""" + SELECT + consultant.name + FROM + `tabJERP Consultant` consultant + JOIN + `tabJERP Consultant Region` region + ON + region.parent = consultant.name + WHERE + consultant.active = True and + region.territory = "{region_name}" + """ + ) + + @classmethod + def _get_one_for_region(cls, region_name: str) -> str: + """Get consultant for a region other than the one last allocated.""" + consultants = cls.list_for_region(region_name) + region = jingrow.get_cached_pg("Region", region_name) + try: + return consultants[consultants.index(region.last_allocated_to) + 1] + except (IndexError, ValueError): + return consultants[0] + except IndexError: + return "" + + @classmethod + def get_one_for_country(cls, country: str) -> str: + """ + Try to get next consultant for a country in round robin fashion. + + Return blank if none. + """ + try: + region = jingrow.db.get_value("Country", country, "region") + jerp_consultant = cls._get_one_for_region(region) + jingrow.db.set_value("Region", region, "last_allocated_to", jerp_consultant) + return jerp_consultant + except Exception: + return "" diff --git a/jcloud/jcloud/pagetype/jerp_consultant/test_jerp_consultant.py b/jcloud/jcloud/pagetype/jerp_consultant/test_jerp_consultant.py new file mode 100644 index 0000000..c343665 --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_consultant/test_jerp_consultant.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# See license.txt + + +import unittest + + +class TestJERPConsultant(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/jerp_consultant_region/__init__.py b/jcloud/jcloud/pagetype/jerp_consultant_region/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jerp_consultant_region/jerp_consultant_region.json b/jcloud/jcloud/pagetype/jerp_consultant_region/jerp_consultant_region.json new file mode 100644 index 0000000..7c57ae6 --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_consultant_region/jerp_consultant_region.json @@ -0,0 +1,32 @@ +{ + "actions": [], + "creation": "2021-04-29 20:25:56.246263", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "territory" + ], + "fields": [ + { + "fieldname": "territory", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Territory", + "options": "Region", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2021-04-29 20:25:56.246263", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "JERP Consultant Region", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jerp_consultant_region/jerp_consultant_region.py b/jcloud/jcloud/pagetype/jerp_consultant_region/jerp_consultant_region.py new file mode 100644 index 0000000..b9ee79d --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_consultant_region/jerp_consultant_region.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class JERPConsultantRegion(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + territory: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/jerp_site_settings/__init__.py b/jcloud/jcloud/pagetype/jerp_site_settings/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jerp_site_settings/jerp_site_settings.js b/jcloud/jcloud/pagetype/jerp_site_settings/jerp_site_settings.js new file mode 100644 index 0000000..f341a17 --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_site_settings/jerp_site_settings.js @@ -0,0 +1,10 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('JERP Site Settings', { + refresh: function (frm) { + frm.add_custom_button(__('Open Site'), () => { + window.open(`https://jingrow.com/dashboard/sites/${frm.pg.site}`); + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/jerp_site_settings/jerp_site_settings.json b/jcloud/jcloud/pagetype/jerp_site_settings/jerp_site_settings.json new file mode 100644 index 0000000..9b51be4 --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_site_settings/jerp_site_settings.json @@ -0,0 +1,101 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "field:site", + "creation": "2022-06-03 14:33:04.239124", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "expiry", + "users", + "emails", + "space", + "column_break_6", + "plan", + "support_expiry" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "label": "Site", + "options": "Site", + "read_only": 1, + "reqd": 1, + "unique": 1 + }, + { + "fieldname": "expiry", + "fieldtype": "Date", + "in_list_view": 1, + "label": "Expiry", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "users", + "fieldtype": "Int", + "label": "Users", + "read_only": 1 + }, + { + "fieldname": "plan", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Plan", + "read_only": 1 + }, + { + "fieldname": "support_expiry", + "fieldtype": "Date", + "label": "Support Expiry", + "read_only": 1 + }, + { + "default": "25000", + "fieldname": "emails", + "fieldtype": "Int", + "label": "Emails", + "read_only": 1 + }, + { + "default": "25", + "fieldname": "space", + "fieldtype": "Int", + "label": "Space (in GB)", + "read_only": 1 + }, + { + "fieldname": "column_break_6", + "fieldtype": "Column Break" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-06-20 06:58:05.388164", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "JERP Site Settings", + "naming_rule": "By fieldname", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jerp_site_settings/jerp_site_settings.py b/jcloud/jcloud/pagetype/jerp_site_settings/jerp_site_settings.py new file mode 100644 index 0000000..fbd01fe --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_site_settings/jerp_site_settings.py @@ -0,0 +1,41 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import json + +import jingrow +from jingrow.model.document import Document + + +class JERPSiteSettings(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + emails: DF.Int + expiry: DF.Date + plan: DF.Data | None + site: DF.Link + space: DF.Int + support_expiry: DF.Date | None + users: DF.Int + # end: auto-generated types + + def on_update(self): + config_keys = ("users", "expiry", "emails", "space", "current_plan") + values = (self.users, self.expiry, self.emails, self.space, self.plan) + + site = jingrow.get_pg("Site", self.site) + config = json.loads(site.config) + limits = config.get("limits", {}) + + limits.update(dict(zip(config_keys, values))) + + # remove null/empty values + limits = {k: v for k, v in limits.items() if v} + + site.update_site_config({"limits": limits}) diff --git a/jcloud/jcloud/pagetype/jerp_site_settings/test_jerp_site_settings.py b/jcloud/jcloud/pagetype/jerp_site_settings/test_jerp_site_settings.py new file mode 100644 index 0000000..47b22a3 --- /dev/null +++ b/jcloud/jcloud/pagetype/jerp_site_settings/test_jerp_site_settings.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestJERPSiteSettings(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/jingrow_version/__init__.py b/jcloud/jcloud/pagetype/jingrow_version/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jingrow_version/jingrow_version.js b/jcloud/jcloud/pagetype/jingrow_version/jingrow_version.js new file mode 100644 index 0000000..e0bcc1f --- /dev/null +++ b/jcloud/jcloud/pagetype/jingrow_version/jingrow_version.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Jingrow Version', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/jingrow_version/jingrow_version.json b/jcloud/jcloud/pagetype/jingrow_version/jingrow_version.json new file mode 100644 index 0000000..fb6a955 --- /dev/null +++ b/jcloud/jcloud/pagetype/jingrow_version/jingrow_version.json @@ -0,0 +1,99 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "Prompt", + "creation": "2020-10-19 17:58:10.458990", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "public", + "number", + "column_break_ramy", + "default", + "status", + "dependency_table_section", + "dependencies" + ], + "fields": [ + { + "default": "1", + "fieldname": "public", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Public" + }, + { + "fieldname": "number", + "fieldtype": "Int", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Number", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "default", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Default" + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Develop\nBeta\nStable\nEnd of Life", + "reqd": 1 + }, + { + "fieldname": "column_break_ramy", + "fieldtype": "Column Break" + }, + { + "fieldname": "dependency_table_section", + "fieldtype": "Section Break", + "label": "Dependency Table" + }, + { + "fieldname": "dependencies", + "fieldtype": "Table", + "label": "Version Dependencies", + "options": "Jingrow Version Dependency" + } + ], + "index_web_pages_for_search": 1, + "links": [ + { + "link_pagetype": "Release Group", + "link_fieldname": "version" + } + ], + "modified": "2023-07-19 16:24:24.727292", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jingrow Version", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jingrow_version/jingrow_version.py b/jcloud/jcloud/pagetype/jingrow_version/jingrow_version.py new file mode 100644 index 0000000..d03d109 --- /dev/null +++ b/jcloud/jcloud/pagetype/jingrow_version/jingrow_version.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +import copy + +from jingrow.model.document import Document + +DEFAULT_DEPENDENCIES = [ + {"dependency": "NVM_VERSION", "version": "0.36.0"}, + {"dependency": "NODE_VERSION", "version": "18.16.0"}, + {"dependency": "PYTHON_VERSION", "version": "3.11"}, + {"dependency": "WKHTMLTOPDF_VERSION", "version": "0.12.5"}, + {"dependency": "BENCH_VERSION", "version": "5.22.6"}, +] + + +class JingrowVersion(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.jingrow_version_dependency.jingrow_version_dependency import ( + JingrowVersionDependency, + ) + + default: DF.Check + dependencies: DF.Table[JingrowVersionDependency] + number: DF.Int + public: DF.Check + status: DF.Literal["Develop", "Beta", "Stable", "End of Life"] + # end: auto-generated types + + def before_insert(self): + self.set_dependencies() + + def set_dependencies(self): + dependencies = copy.deepcopy(DEFAULT_DEPENDENCIES) + if not hasattr(self, "dependencies") or not self.dependencies: + self.extend("dependencies", dependencies) diff --git a/jcloud/jcloud/pagetype/jingrow_version/test_jingrow_version.py b/jcloud/jcloud/pagetype/jingrow_version/test_jingrow_version.py new file mode 100644 index 0000000..2d3a3d1 --- /dev/null +++ b/jcloud/jcloud/pagetype/jingrow_version/test_jingrow_version.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +import unittest + +import jingrow + + +class TestJingrowVersion(unittest.TestCase): + def test_create_jingrow_version_with_default_dependencies(self): + number = 99 # version with no fixtures + jingrow_version = jingrow.get_pg( + { + "pagetype": "Jingrow Version", + "name": f"Version {number}", + "number": number, + } + ).insert() + self.assertEqual(len(jingrow_version.dependencies), 5) diff --git a/jcloud/jcloud/pagetype/jingrow_version_dependency/__init__.py b/jcloud/jcloud/pagetype/jingrow_version_dependency/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/jingrow_version_dependency/jingrow_version_dependency.json b/jcloud/jcloud/pagetype/jingrow_version_dependency/jingrow_version_dependency.json new file mode 100644 index 0000000..87c89eb --- /dev/null +++ b/jcloud/jcloud/pagetype/jingrow_version_dependency/jingrow_version_dependency.json @@ -0,0 +1,41 @@ +{ + "actions": [], + "creation": "2023-07-13 12:18:06.259601", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "dependency", + "version" + ], + "fields": [ + { + "fieldname": "dependency", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Dependency", + "reqd": 1 + }, + { + "fieldname": "version", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Version", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-07-19 15:45:52.544440", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jingrow Version Dependency", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/jingrow_version_dependency/jingrow_version_dependency.py b/jcloud/jcloud/pagetype/jingrow_version_dependency/jingrow_version_dependency.py new file mode 100644 index 0000000..b7d62e8 --- /dev/null +++ b/jcloud/jcloud/pagetype/jingrow_version_dependency/jingrow_version_dependency.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class JingrowVersionDependency(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + dependency: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + version: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/log_counter/__init__.py b/jcloud/jcloud/pagetype/log_counter/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/log_counter/log_counter.js b/jcloud/jcloud/pagetype/log_counter/log_counter.js new file mode 100644 index 0000000..84a92eb --- /dev/null +++ b/jcloud/jcloud/pagetype/log_counter/log_counter.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Log Counter", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/log_counter/log_counter.json b/jcloud/jcloud/pagetype/log_counter/log_counter.json new file mode 100644 index 0000000..d09a062 --- /dev/null +++ b/jcloud/jcloud/pagetype/log_counter/log_counter.json @@ -0,0 +1,94 @@ +{ + "actions": [], + "creation": "2024-04-19 11:57:50.263215", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "logtype", + "groupby", + "column_break_nyqx", + "date", + "section_break_epfa", + "counts", + "total" + ], + "fields": [ + { + "fieldname": "date", + "fieldtype": "Date", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Collection Date", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "column_break_nyqx", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_epfa", + "fieldtype": "Section Break", + "label": "Counts" + }, + { + "fieldname": "counts", + "fieldtype": "JSON", + "label": "Counts", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "total", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Total", + "non_negative": 1, + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "groupby", + "fieldtype": "Data", + "label": "Group By", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "logtype", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Log Type", + "options": "PageType", + "read_only": 1, + "reqd": 1, + "search_index": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-04-19 13:17:13.153028", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Log Counter", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/log_counter/log_counter.py b/jcloud/jcloud/pagetype/log_counter/log_counter.py new file mode 100644 index 0000000..1f45ca7 --- /dev/null +++ b/jcloud/jcloud/pagetype/log_counter/log_counter.py @@ -0,0 +1,140 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import datetime +import json +from typing import Optional, TypedDict + +import jingrow +import jingrow.utils +from jingrow.model.document import Document +from jingrow.query_builder import PageType +from jingrow.query_builder.functions import Count +from pypika import Order + +# PageType: groupby +RECORD_FOR: dict[str, str] = { + "Error Log": "method", +} + +Counts = TypedDict( + "Counts", + { + "counts": dict[str, int], + "date": datetime.date, + "total": int, + }, +) + + +class LogCounter(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + counts: DF.JSON + date: DF.Date + groupby: DF.Data + logtype: DF.Link + total: DF.Int + # end: auto-generated types + + def autoname(self): + self.name = get_name(self.logtype, self.date) + + +def record_counts(): + date = jingrow.utils.now_datetime().date() - datetime.timedelta(days=1) + for pagetype, groupby in RECORD_FOR.items(): + record_for_date(pagetype, groupby, date) + jingrow.db.commit() + + +def record_for_date( + pagetype: str = "Error Log", + groupby: str = "method", + date: Optional[datetime.date] = None, +): + counts = get_counts( + pagetype, + groupby, + date, + ) + name = get_name(pagetype, counts["date"]) + counts_json = json.dumps(counts["counts"], indent=2) + + # Update counts if name value exists + if jingrow.db.exists("Log Counter", name): + jingrow.db.set_value("Log Counter", name, "counts", counts_json) + jingrow.db.set_value("Log Counter", name, "total", counts["total"]) + return + + lc = jingrow.get_pg( + { + "pagetype": "Log Counter", + "logtype": pagetype, + "groupby": groupby, + "counts": counts_json, + "total": counts["total"], + "date": counts["date"], + } + ) + lc.insert() + + +def get_counts( + pagetype: str = "Error Log", + groupby: str = "method", + date: Optional[datetime.date] = None, +) -> Counts: + date_to = date if date else jingrow.utils.now_datetime().date() + date_from = date_to - datetime.timedelta(days=1) + + table = PageType(pagetype) + column = table[groupby] + + q = jingrow.qb.from_(table) + q = q.select(column, Count("*", alias="count")) + q = q.where(table.creation[date_from:date_to]) + q = q.groupby(column) + q = q.orderby("count", order=Order.desc) + r = q.run() + + counts = {c[0]: c[1] for c in r} + total = sum(c[1] for c in r) + return dict(counts=counts, date=date_to, total=total) + + +def get_name(pagetype: str, date: datetime.date): + dt_stub = pagetype.lower().replace(" ", "_") + date_iso = date.isoformat().replace("-", "_") + return f"{dt_stub}-{date_iso}" + + +def top_k( + k: int = 5, + log_type: str = "Error Log", + since: Optional[datetime.date] = None, +): + if not since: + since = jingrow.utils.now_datetime().date() - datetime.timedelta(days=30) + + res = jingrow.get_all( + "Log Counter", + fields=["total", "date", "counts"], + filters={ + "logtype": log_type, + "creation": [">", since], + }, + ) + for r in res: + counts = json.loads(r["counts"]) + counts = [{"error": k, "count": i} for k, i in counts.items()] + counts.sort(key=lambda x: x["count"], reverse=True) + r["counts"] = counts[:k] + + return res diff --git a/jcloud/jcloud/pagetype/log_counter/test_log_counter.py b/jcloud/jcloud/pagetype/log_counter/test_log_counter.py new file mode 100644 index 0000000..90aa827 --- /dev/null +++ b/jcloud/jcloud/pagetype/log_counter/test_log_counter.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestLogCounter(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/log_server/__init__.py b/jcloud/jcloud/pagetype/log_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/log_server/log_server.js b/jcloud/jcloud/pagetype/log_server/log_server.js new file mode 100644 index 0000000..a6183ad --- /dev/null +++ b/jcloud/jcloud/pagetype/log_server/log_server.js @@ -0,0 +1,60 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Log Server', { + refresh: function (frm) { + [ + [__('Ping Agent'), 'ping_agent', false, frm.pg.is_server_setup], + [__('Ping Ansible'), 'ping_ansible', true], + [__('Ping Ansible Unprepared'), 'ping_ansible_unprepared', true], + [__('Update Agent'), 'update_agent', true, frm.pg.is_server_setup], + [__('Prepare Server'), 'prepare_server', true, !frm.pg.is_server_setup], + [__('Setup Server'), 'setup_server', true, !frm.pg.is_server_setup], + [__('Archive'), 'archive', true, frm.pg.provider === 'AWS EC2'], + [ + __('Fetch Keys'), + 'fetch_keys', + false, + frm.pg.is_server_setup && + (!frm.pg.jingrow_public_key || !frm.pg.root_public_key), + ], + [ + __('Show Kibana Password'), + 'show_kibana_password', + false, + frm.pg.is_server_setup, + ], + [__('Update TLS Certificate'), 'update_tls_certificate', true], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/log_server/log_server.json b/jcloud/jcloud/pagetype/log_server/log_server.json new file mode 100644 index 0000000..8aa5d90 --- /dev/null +++ b/jcloud/jcloud/pagetype/log_server/log_server.json @@ -0,0 +1,224 @@ +{ + "actions": [], + "creation": "2021-05-11 20:38:48.560155", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "hostname", + "domain", + "column_break_4", + "cluster", + "provider", + "virtual_machine", + "is_server_setup", + "networking_section", + "ip", + "column_break_9", + "private_ip", + "private_mac_address", + "private_vlan_id", + "agent_section", + "agent_password", + "ssh_section", + "jingrow_user_password", + "jingrow_public_key", + "column_break_20", + "root_public_key", + "monitoring_section", + "monitoring_password", + "kibana_section", + "kibana_password" + ], + "fields": [ + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nInstalling\nActive\nBroken\nArchived", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "hostname", + "fieldtype": "Data", + "label": "Hostname", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "label": "Domain", + "options": "Root Domain", + "read_only": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "default": "Generic", + "fieldname": "provider", + "fieldtype": "Select", + "label": "Provider", + "options": "Generic\nScaleway\nAWS EC2\nOCI", + "set_only_once": 1 + }, + { + "default": "0", + "fieldname": "is_server_setup", + "fieldtype": "Check", + "label": "Server Setup", + "read_only": 1 + }, + { + "fieldname": "networking_section", + "fieldtype": "Section Break", + "label": "Networking" + }, + { + "fetch_from": "virtual_machine.public_ip_address", + "fieldname": "ip", + "fieldtype": "Data", + "in_list_view": 1, + "label": "IP", + "set_only_once": 1 + }, + { + "fieldname": "column_break_9", + "fieldtype": "Column Break" + }, + { + "fetch_from": "virtual_machine.private_ip_address", + "fieldname": "private_ip", + "fieldtype": "Data", + "label": "Private IP", + "reqd": 1, + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_mac_address", + "fieldtype": "Data", + "label": "Private Mac Address", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_vlan_id", + "fieldtype": "Data", + "label": "Private VLAN ID", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "fieldname": "agent_section", + "fieldtype": "Section Break", + "label": "Agent" + }, + { + "fieldname": "agent_password", + "fieldtype": "Password", + "label": "Agent Password", + "set_only_once": 1 + }, + { + "fieldname": "ssh_section", + "fieldtype": "Section Break", + "label": "SSH" + }, + { + "fieldname": "jingrow_user_password", + "fieldtype": "Password", + "label": "Jingrow User Password", + "set_only_once": 1 + }, + { + "fieldname": "jingrow_public_key", + "fieldtype": "Code", + "label": "Jingrow Public Key", + "read_only": 1 + }, + { + "fieldname": "column_break_20", + "fieldtype": "Column Break" + }, + { + "fieldname": "root_public_key", + "fieldtype": "Code", + "label": "Root Public Key", + "read_only": 1 + }, + { + "fieldname": "monitoring_section", + "fieldtype": "Section Break", + "label": "Monitoring" + }, + { + "fieldname": "monitoring_password", + "fieldtype": "Password", + "label": "Monitoring Password", + "set_only_once": 1 + }, + { + "fieldname": "kibana_section", + "fieldtype": "Section Break", + "label": "Kibana" + }, + { + "fieldname": "kibana_password", + "fieldtype": "Password", + "label": "Kibana Password" + }, + { + "depends_on": "eval:pg.provider === \"AWS EC2\"", + "fieldname": "virtual_machine", + "fieldtype": "Link", + "label": "Virtual Machine", + "mandatory_depends_on": "eval:pg.provider === \"AWS EC2\"", + "options": "Virtual Machine" + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster" + } + ], + "links": [ + { + "link_pagetype": "Ansible Play", + "link_fieldname": "server" + } + ], + "modified": "2023-12-13 15:09:14.473225", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Log Server", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/log_server/log_server.py b/jcloud/jcloud/pagetype/log_server/log_server.py new file mode 100644 index 0000000..8e183f7 --- /dev/null +++ b/jcloud/jcloud/pagetype/log_server/log_server.py @@ -0,0 +1,111 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow + +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class LogServer(BaseServer): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + agent_password: DF.Password | None + cluster: DF.Link | None + domain: DF.Link | None + jingrow_public_key: DF.Code | None + jingrow_user_password: DF.Password | None + hostname: DF.Data + ip: DF.Data | None + is_server_setup: DF.Check + kibana_password: DF.Password | None + monitoring_password: DF.Password | None + private_ip: DF.Data + private_mac_address: DF.Data | None + private_vlan_id: DF.Data | None + provider: DF.Literal["Generic", "Scaleway", "AWS EC2", "OCI"] + root_public_key: DF.Code | None + status: DF.Literal["Pending", "Installing", "Active", "Broken", "Archived"] + virtual_machine: DF.Link | None + # end: auto-generated types + + def validate(self): + self.validate_agent_password() + self.validate_monitoring_password() + self.validate_kibana_password() + + def validate_monitoring_password(self): + if not self.monitoring_password: + self.monitoring_password = jingrow.generate_hash() + + def validate_kibana_password(self): + if not self.kibana_password: + self.kibana_password = jingrow.generate_hash() + + def _setup_server(self): + agent_password = self.get_password("agent_password") + agent_repository_url = self.get_agent_repository_url() + kibana_password = self.get_password("kibana_password") + monitoring_password = self.get_password("monitoring_password") + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": self.domain}, "name" + ) + certificate = jingrow.get_pg("TLS Certificate", certificate_name) + try: + ansible = Ansible( + playbook="log.yml", + server=self, + variables={ + "server": self.name, + "workers": 1, + "domain": self.domain, + "log_server": self.name, + "agent_password": agent_password, + "agent_repository_url": agent_repository_url, + "kibana_password": kibana_password, + "monitoring_password": monitoring_password, + "private_ip": self.private_ip, + "certificate_private_key": certificate.private_key, + "certificate_full_chain": certificate.full_chain, + "certificate_intermediate_chain": certificate.intermediate_chain, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_server_setup = True + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Log Server Setup Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def show_kibana_password(self): + return self.get_password("kibana_password") + + @jingrow.whitelist() + def install_elasticsearch_exporter(self): + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_install_elasticsearch_exporter", + queue="long", + timeout=1200, + ) + + def _install_elasticsearch_exporter(self): + try: + ansible = Ansible(playbook="elasticsearch_exporter.yml", server=self) + ansible.run() + except Exception: + log_error("Elasticsearch Exporter Install Exception", server=self.as_dict()) diff --git a/jcloud/jcloud/pagetype/log_server/test_log_server.py b/jcloud/jcloud/pagetype/log_server/test_log_server.py new file mode 100644 index 0000000..87f0a3f --- /dev/null +++ b/jcloud/jcloud/pagetype/log_server/test_log_server.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestLogServer(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/mail_log/__init__.py b/jcloud/jcloud/pagetype/mail_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/mail_log/mail_log.js b/jcloud/jcloud/pagetype/mail_log/mail_log.js new file mode 100644 index 0000000..c50d12d --- /dev/null +++ b/jcloud/jcloud/pagetype/mail_log/mail_log.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Mail Log', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/mail_log/mail_log.json b/jcloud/jcloud/pagetype/mail_log/mail_log.json new file mode 100644 index 0000000..2d049e5 --- /dev/null +++ b/jcloud/jcloud/pagetype/mail_log/mail_log.json @@ -0,0 +1,135 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "QML.#####", + "creation": "2021-10-24 09:26:16.759715", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "unique_token", + "message_id", + "status", + "column_break_drnq", + "site", + "subscription_key", + "column_break_erbe", + "sender", + "recipient", + "date", + "section_break_uslz", + "message", + "log" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Data", + "label": "Site", + "read_only": 1 + }, + { + "fieldname": "sender", + "fieldtype": "Data", + "ignore_xss_filter": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Sender", + "read_only": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "read_only": 1 + }, + { + "fieldname": "recipient", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Recipient", + "read_only": 1 + }, + { + "fieldname": "log", + "fieldtype": "Code", + "label": "log", + "options": "JSON", + "read_only": 1 + }, + { + "default": "Today", + "fieldname": "date", + "fieldtype": "Date", + "label": "Date", + "read_only": 1 + }, + { + "fieldname": "unique_token", + "fieldtype": "Data", + "hidden": 1, + "label": "Unique Token", + "unique": 1 + }, + { + "fieldname": "message_id", + "fieldtype": "Data", + "label": "Message Id", + "read_only": 1 + }, + { + "fieldname": "subscription_key", + "fieldtype": "Data", + "label": "Subscription Key", + "read_only": 1 + }, + { + "fieldname": "message", + "fieldtype": "Code", + "label": "Message", + "read_only": 1 + }, + { + "fieldname": "column_break_drnq", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_erbe", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_uslz", + "fieldtype": "Section Break" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-06-02 16:54:57.203891", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Mail Log", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/mail_log/mail_log.py b/jcloud/jcloud/pagetype/mail_log/mail_log.py new file mode 100644 index 0000000..7a27051 --- /dev/null +++ b/jcloud/jcloud/pagetype/mail_log/mail_log.py @@ -0,0 +1,35 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + + +class MailLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + date: DF.Date | None + log: DF.Code | None + message: DF.Code | None + message_id: DF.Data | None + recipient: DF.Data | None + sender: DF.Data | None + site: DF.Data | None + status: DF.Data | None + subscription_key: DF.Data | None + unique_token: DF.Data | None + # end: auto-generated types + + pass + + +def on_pagetype_update(): + jingrow.db.add_index("Mail Log", ["site", "status"]) + jingrow.db.add_index("Mail Log", ["site", "creation"]) diff --git a/jcloud/jcloud/pagetype/mail_log/test_mail_log.py b/jcloud/jcloud/pagetype/mail_log/test_mail_log.py new file mode 100644 index 0000000..d56954d --- /dev/null +++ b/jcloud/jcloud/pagetype/mail_log/test_mail_log.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestMailLog(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/mail_setup/__init__.py b/jcloud/jcloud/pagetype/mail_setup/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/mail_setup/mail_setup.js b/jcloud/jcloud/pagetype/mail_setup/mail_setup.js new file mode 100644 index 0000000..969482c --- /dev/null +++ b/jcloud/jcloud/pagetype/mail_setup/mail_setup.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Mail Setup', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/mail_setup/mail_setup.json b/jcloud/jcloud/pagetype/mail_setup/mail_setup.json new file mode 100644 index 0000000..4dc796c --- /dev/null +++ b/jcloud/jcloud/pagetype/mail_setup/mail_setup.json @@ -0,0 +1,51 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-02-28 16:42:45.716872", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "is_complete" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "label": "Site", + "options": "Site", + "unique": 1 + }, + { + "default": "0", + "fieldname": "is_complete", + "fieldtype": "Check", + "label": "is complete" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-02-28 19:15:00.462629", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Mail Setup", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/mail_setup/mail_setup.py b/jcloud/jcloud/pagetype/mail_setup/mail_setup.py new file mode 100644 index 0000000..900df00 --- /dev/null +++ b/jcloud/jcloud/pagetype/mail_setup/mail_setup.py @@ -0,0 +1,21 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class MailSetup(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + is_complete: DF.Check + site: DF.Link | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/mail_setup/test_mail_setup.py b/jcloud/jcloud/pagetype/mail_setup/test_mail_setup.py new file mode 100644 index 0000000..d2e6b2b --- /dev/null +++ b/jcloud/jcloud/pagetype/mail_setup/test_mail_setup.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestMailSetup(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/malware_scan/__init__.py b/jcloud/jcloud/pagetype/malware_scan/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/malware_scan/malware_scan.js b/jcloud/jcloud/pagetype/malware_scan/malware_scan.js new file mode 100644 index 0000000..5d5f503 --- /dev/null +++ b/jcloud/jcloud/pagetype/malware_scan/malware_scan.js @@ -0,0 +1,14 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Malware Scan', { + refresh: function (frm) { + if (['Installed', 'Failure', 'Success'].includes(frm.pg.status)) { + frm.add_custom_button(__('Start'), () => { + jingrow.confirm('Are you sure you want to scan?', () => + frm.call('start'), + ); + }); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/malware_scan/malware_scan.json b/jcloud/jcloud/pagetype/malware_scan/malware_scan.json new file mode 100644 index 0000000..bb5bd90 --- /dev/null +++ b/jcloud/jcloud/pagetype/malware_scan/malware_scan.json @@ -0,0 +1,78 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "hash", + "creation": "2022-05-22 02:41:30.828632", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "server_type", + "server", + "column_break_6", + "play", + "status" + ], + "fields": [ + { + "fieldname": "server_type", + "fieldtype": "Select", + "label": "Server Type", + "options": "Server\nDatabase Server\nProxy Server", + "reqd": 1 + }, + { + "fieldname": "server", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Server", + "options": "server_type", + "reqd": 1 + }, + { + "fieldname": "play", + "fieldtype": "Link", + "label": "Play", + "options": "Ansible Play", + "read_only": 1 + }, + { + "fieldname": "column_break_6", + "fieldtype": "Column Break" + }, + { + "allow_in_quick_entry": 1, + "fieldname": "status", + "fieldtype": "Select", + "label": "Status", + "options": "Pending\nInstalled\nNot Installed\nRunning\nClean\nInfected\nFailure", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-11-22 17:24:35.104095", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Malware Scan", + "naming_rule": "Random", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_seen": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/malware_scan/malware_scan.py b/jcloud/jcloud/pagetype/malware_scan/malware_scan.py new file mode 100644 index 0000000..4f0791d --- /dev/null +++ b/jcloud/jcloud/pagetype/malware_scan/malware_scan.py @@ -0,0 +1,120 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class MalwareScan(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + play: DF.Link | None + server: DF.DynamicLink + server_type: DF.Literal["Server", "Database Server", "Proxy Server"] + status: DF.Literal[ + "Pending", "Installed", "Not Installed", "Running", "Clean", "Infected", "Failure" + ] + # end: auto-generated types + + def after_insert(self): + self.check_clamav() + + def check_clamav(self): + self.status = "Pending" + self.save() + jingrow.db.commit() + jingrow.enqueue_pg(self.pagetype, self.name, "_check_clamav", queue="default") + + def _check_clamav(self): + try: + ansible = Ansible( + playbook="pkg_exists.yml", + server=jingrow.get_pg(self.server_type, self.server), + variables={"pkg": "clamav"}, + ) + self.reload() + self.play = ansible.play + self.status = "Running" + self.save() + jingrow.db.commit() + play = ansible.run() + if play.status == "Success": + self.status = "Installed" + else: + self.status = "Not Installed" + except Exception: + log_error("ClamAV Install Exception", scan=self.as_dict()) + self.status = "Failure" + self.save() + + @jingrow.whitelist() + def start(self): + self.status = "Pending" + self.save() + jingrow.db.commit() + jingrow.enqueue_pg(self.pagetype, self.name, "_start", queue="long", timeout=32000) + + def _start(self): + try: + ansible = Ansible( + playbook="malware_scan.yml", + server=jingrow.get_pg(self.server_type, self.server), + ) + self.reload() + self.play = ansible.play + self.status = "Running" + self.save() + jingrow.db.commit() + play = ansible.run() + if play.status == "Success": + self.succeed() + else: + self.fail() + except Exception: + log_error("Malware Scan Exception", scan=self.as_dict()) + self.fail() + self.save() + + def succeed(self): + output = jingrow.db.get_value( + "Ansible Task", {"task": "Scan home directory", "play": self.play}, "output" + ) + if "Infected files:" in output: + if "Infected files: 0" in output: + self.status = "Clean" + else: + self.status = "Infected" + self.send_infected_alert() + return + self.fail() + + def send_infected_alert(self): + domain = jingrow.get_value("Jcloud Settings", "Jcloud Settings", "domain") + message = f""" +Malware Scan for *{self.server}* found malware, review. + +[Malware Scan]({domain}{self.get_url()}) +""" + self.send_alert(message) + + def fail(self): + self.status = "Failure" + domain = jingrow.get_value("Jcloud Settings", "Jcloud Settings", "domain") + message = f""" +Malware Scan for *{self.server}* failed. + +[Malware Scan]({domain}{self.get_url()}) +""" + self.send_alert(message) + + def send_alert(self, message): + pass diff --git a/jcloud/jcloud/pagetype/malware_scan/test_malware_scan.py b/jcloud/jcloud/pagetype/malware_scan/test_malware_scan.py new file mode 100644 index 0000000..9f3dc81 --- /dev/null +++ b/jcloud/jcloud/pagetype/malware_scan/test_malware_scan.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestMalwareScan(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/managed_database_service/__init__.py b/jcloud/jcloud/pagetype/managed_database_service/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/managed_database_service/managed_database_service.js b/jcloud/jcloud/pagetype/managed_database_service/managed_database_service.js new file mode 100644 index 0000000..2c8f6be --- /dev/null +++ b/jcloud/jcloud/pagetype/managed_database_service/managed_database_service.js @@ -0,0 +1,16 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Managed Database Service', { + refresh(frm) { + let command = `mysql -h ${frm.pg.name} -p -u ${frm.pg.database_root_user} -P ${frm.pg.port}`; + frm.add_custom_button('Console Access', () => { + jingrow.msgprint(`
${command}
`); + }); + frm.add_custom_button('Show Root Password', () => { + frm.call('show_root_password').then((r) => { + jingrow.msgprint(`
${r.message}
`);
+			});
+		});
+	},
+});
diff --git a/jcloud/jcloud/pagetype/managed_database_service/managed_database_service.json b/jcloud/jcloud/pagetype/managed_database_service/managed_database_service.json
new file mode 100644
index 0000000..f867274
--- /dev/null
+++ b/jcloud/jcloud/pagetype/managed_database_service/managed_database_service.json
@@ -0,0 +1,103 @@
+{
+ "actions": [],
+ "allow_rename": 1,
+ "autoname": "field:database_host",
+ "creation": "2024-05-17 16:35:50.107617",
+ "pagetype": "PageType",
+ "engine": "InnoDB",
+ "field_order": [
+  "database_section",
+  "service_provider",
+  "database_host",
+  "database_root_user",
+  "column_break_dboe",
+  "root_user_password",
+  "port",
+  "team_section",
+  "team"
+ ],
+ "fields": [
+  {
+   "fieldname": "service_provider",
+   "fieldtype": "Select",
+   "in_list_view": 1,
+   "label": "Service Provider",
+   "options": "AWS RDS",
+   "reqd": 1
+  },
+  {
+   "fieldname": "database_host",
+   "fieldtype": "Data",
+   "in_list_view": 1,
+   "label": "Database Host",
+   "reqd": 1,
+   "unique": 1
+  },
+  {
+   "fieldname": "column_break_dboe",
+   "fieldtype": "Column Break"
+  },
+  {
+   "default": "3306",
+   "fieldname": "port",
+   "fieldtype": "Data",
+   "label": "Port"
+  },
+  {
+   "fieldname": "database_section",
+   "fieldtype": "Section Break",
+   "label": "Database "
+  },
+  {
+   "fieldname": "team_section",
+   "fieldtype": "Section Break",
+   "label": "Team"
+  },
+  {
+   "fieldname": "team",
+   "fieldtype": "Link",
+   "label": "Team",
+   "options": "Team",
+   "reqd": 1
+  },
+  {
+   "fieldname": "database_root_user",
+   "fieldtype": "Data",
+   "in_list_view": 1,
+   "label": "Database Root User",
+   "reqd": 1
+  },
+  {
+   "fieldname": "root_user_password",
+   "fieldtype": "Password",
+   "in_list_view": 1,
+   "label": "Root User Password",
+   "reqd": 1
+  }
+ ],
+ "index_web_pages_for_search": 1,
+ "links": [],
+ "modified": "2024-05-29 19:11:14.644480",
+ "modified_by": "Administrator",
+ "module": "Jcloud",
+ "name": "Managed Database Service",
+ "naming_rule": "By fieldname",
+ "owner": "Administrator",
+ "permissions": [
+  {
+   "create": 1,
+   "delete": 1,
+   "email": 1,
+   "export": 1,
+   "print": 1,
+   "read": 1,
+   "report": 1,
+   "role": "System Manager",
+   "share": 1,
+   "write": 1
+  }
+ ],
+ "sort_field": "modified",
+ "sort_order": "DESC",
+ "states": []
+}
\ No newline at end of file
diff --git a/jcloud/jcloud/pagetype/managed_database_service/managed_database_service.py b/jcloud/jcloud/pagetype/managed_database_service/managed_database_service.py
new file mode 100644
index 0000000..57e9905
--- /dev/null
+++ b/jcloud/jcloud/pagetype/managed_database_service/managed_database_service.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2024, JINGROW
+# For license information, please see license.txt
+from __future__ import annotations
+
+import jingrow
+from jingrow.model.document import Document
+
+
+class ManagedDatabaseService(Document):
+	# begin: auto-generated types
+	# This code is auto-generated. Do not modify anything in this block.
+
+	from typing import TYPE_CHECKING
+
+	if TYPE_CHECKING:
+		from jingrow.types import DF
+
+		database_host: DF.Data
+		database_root_user: DF.Data
+		port: DF.Data | None
+		root_user_password: DF.Password
+		service_provider: DF.Literal["AWS RDS"]
+		team: DF.Link
+	# end: auto-generated types
+
+	pass
+
+	@jingrow.whitelist()
+	def show_root_password(self):
+		jingrow.only_for("System Manager")
+		return self.get_password("root_user_password")
diff --git a/jcloud/jcloud/pagetype/managed_database_service/test_managed_database_service.py b/jcloud/jcloud/pagetype/managed_database_service/test_managed_database_service.py
new file mode 100644
index 0000000..d076b3b
--- /dev/null
+++ b/jcloud/jcloud/pagetype/managed_database_service/test_managed_database_service.py
@@ -0,0 +1,9 @@
+# Copyright (c) 2024, JINGROW
+# See license.txt
+
+# import jingrow
+from jingrow.tests.utils import JingrowTestCase
+
+
+class TestManagedDatabaseService(JingrowTestCase):
+	pass
diff --git a/jcloud/jcloud/pagetype/mariadb_stalk/__init__.py b/jcloud/jcloud/pagetype/mariadb_stalk/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/jcloud/jcloud/pagetype/mariadb_stalk/mariadb_stalk.js b/jcloud/jcloud/pagetype/mariadb_stalk/mariadb_stalk.js
new file mode 100644
index 0000000..935cd54
--- /dev/null
+++ b/jcloud/jcloud/pagetype/mariadb_stalk/mariadb_stalk.js
@@ -0,0 +1,8 @@
+// Copyright (c) 2023, JINGROW
+// For license information, please see license.txt
+
+// jingrow.ui.form.on("MariaDB Stalk", {
+// 	refresh(frm) {
+
+// 	},
+// });
diff --git a/jcloud/jcloud/pagetype/mariadb_stalk/mariadb_stalk.json b/jcloud/jcloud/pagetype/mariadb_stalk/mariadb_stalk.json
new file mode 100644
index 0000000..aaa71c1
--- /dev/null
+++ b/jcloud/jcloud/pagetype/mariadb_stalk/mariadb_stalk.json
@@ -0,0 +1,75 @@
+{
+ "actions": [],
+ "allow_rename": 1,
+ "creation": "2023-12-21 16:47:05.329972",
+ "pagetype": "PageType",
+ "engine": "InnoDB",
+ "field_order": [
+  "server",
+  "column_break_apst",
+  "timestamp",
+  "section_break_bqjv",
+  "diagnostics"
+ ],
+ "fields": [
+  {
+   "fieldname": "server",
+   "fieldtype": "Link",
+   "in_filter": 1,
+   "in_list_view": 1,
+   "in_standard_filter": 1,
+   "label": "Server",
+   "options": "Database Server",
+   "read_only": 1,
+   "search_index": 1
+  },
+  {
+   "fieldname": "column_break_apst",
+   "fieldtype": "Column Break"
+  },
+  {
+   "fieldname": "timestamp",
+   "fieldtype": "Datetime",
+   "in_list_view": 1,
+   "in_standard_filter": 1,
+   "label": "Timestamp",
+   "read_only": 1,
+   "search_index": 1
+  },
+  {
+   "fieldname": "section_break_bqjv",
+   "fieldtype": "Section Break"
+  },
+  {
+   "fieldname": "diagnostics",
+   "fieldtype": "Table",
+   "label": "Diagnostics",
+   "options": "MariaDB Stalk Diagnostic",
+   "read_only": 1
+  }
+ ],
+ "index_web_pages_for_search": 1,
+ "links": [],
+ "modified": "2023-12-21 18:55:52.236854",
+ "modified_by": "Administrator",
+ "module": "Jcloud",
+ "name": "MariaDB Stalk",
+ "owner": "Administrator",
+ "permissions": [
+  {
+   "create": 1,
+   "delete": 1,
+   "email": 1,
+   "export": 1,
+   "print": 1,
+   "read": 1,
+   "report": 1,
+   "role": "System Manager",
+   "share": 1,
+   "write": 1
+  }
+ ],
+ "sort_field": "modified",
+ "sort_order": "DESC",
+ "states": []
+}
\ No newline at end of file
diff --git a/jcloud/jcloud/pagetype/mariadb_stalk/mariadb_stalk.py b/jcloud/jcloud/pagetype/mariadb_stalk/mariadb_stalk.py
new file mode 100644
index 0000000..ad51ba3
--- /dev/null
+++ b/jcloud/jcloud/pagetype/mariadb_stalk/mariadb_stalk.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2023, JINGROW
+# For license information, please see license.txt
+
+import gzip
+from datetime import datetime
+
+import jingrow
+from jingrow.model.document import Document
+from jingrow.query_builder import Interval
+from jingrow.query_builder.functions import Now
+from jingrow.utils import add_to_date, convert_utc_to_system_timezone, now_datetime
+
+from jcloud.utils import log_error
+
+
+class MariaDBStalk(Document):
+	# begin: auto-generated types
+	# This code is auto-generated. Do not modify anything in this block.
+
+	from typing import TYPE_CHECKING
+
+	if TYPE_CHECKING:
+		from jingrow.types import DF
+
+		from jcloud.jcloud.pagetype.mariadb_stalk_diagnostic.mariadb_stalk_diagnostic import (
+			MariaDBStalkDiagnostic,
+		)
+
+		diagnostics: DF.Table[MariaDBStalkDiagnostic]
+		server: DF.Link | None
+		timestamp: DF.Datetime | None
+	# end: auto-generated types
+
+	@staticmethod
+	def clear_old_logs(days=30):
+		table = jingrow.qb.PageType("MariaDB Stalk")
+		stalks = jingrow.db.get_values(
+			table, filters=table.creation < (Now() - Interval(days=days))
+		)
+		for stalk in stalks:
+			try:
+				stalk = jingrow.get_pg("MariaDB Stalk", stalk)
+				stalk.create_json_gz_file()
+				stalk.delete(delete_permanently=True)
+				jingrow.db.commit()
+			except Exception:
+				log_error("MariaDB Stalk Delete Error")
+				jingrow.db.rollback()
+
+	def create_json_gz_file(self):
+		filename = f"mariadb-stalk-{self.server}-{self.timestamp}.json.gz"
+		encoded = jingrow.safe_encode(self.as_json())
+		compressed = gzip.compress(encoded)
+		if jingrow.db.exists("File", {"file_name": filename}):
+			return
+		file = jingrow.get_pg(
+			{
+				"pagetype": "File",
+				"file_name": filename,
+				"content": compressed,
+				"is_private": True,
+			}
+		)
+		file.insert()
+
+
+def fetch_stalks():
+	for server in jingrow.get_all(
+		"Database Server", {"status": "Active", "is_stalk_setup": True}, pluck="name"
+	):
+		jingrow.enqueue(
+			"jcloud.jcloud.pagetype.mariadb_stalk.mariadb_stalk.fetch_server_stalks",
+			server=server,
+			job_id=f"fetch_mariadb_stalk:{server}",
+		)
+
+
+def fetch_server_stalks(server):
+	server = jingrow.get_cached_pg("Database Server", server)
+	for stalk in server.get_stalks():
+		timestamp = convert_utc_to_system_timezone(
+			datetime.fromisoformat(stalk["timestamp"])
+		).replace(tzinfo=None)
+		# To avoid fetching incomplete stalks, wait for 5 minutes
+		if not now_datetime() > add_to_date(timestamp, minutes=5):
+			continue
+		# Don't fetch old stalks
+		if now_datetime() > add_to_date(timestamp, days=15):
+			continue
+		if jingrow.db.exists("MariaDB Stalk", {"server": server.name, "timestamp": timestamp}):
+			continue
+		try:
+			pg = jingrow.new_pg("MariaDB Stalk")
+			pg.server = server.name
+			pg.timestamp = timestamp
+			for diagnostic in server.get_stalk(stalk["name"]):
+				pg.append("diagnostics", diagnostic)
+			pg.insert()
+			jingrow.db.commit()
+		except Exception:
+			log_error("MariaDB Stalk Error", server=server, stalk=stalk)
+			jingrow.db.rollback()
diff --git a/jcloud/jcloud/pagetype/mariadb_stalk/test_mariadb_stalk.py b/jcloud/jcloud/pagetype/mariadb_stalk/test_mariadb_stalk.py
new file mode 100644
index 0000000..35ca149
--- /dev/null
+++ b/jcloud/jcloud/pagetype/mariadb_stalk/test_mariadb_stalk.py
@@ -0,0 +1,9 @@
+# Copyright (c) 2023, JINGROW
+# See license.txt
+
+# import jingrow
+from jingrow.tests.utils import JingrowTestCase
+
+
+class TestMariaDBStalk(JingrowTestCase):
+	pass
diff --git a/jcloud/jcloud/pagetype/mariadb_stalk_diagnostic/__init__.py b/jcloud/jcloud/pagetype/mariadb_stalk_diagnostic/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/jcloud/jcloud/pagetype/mariadb_stalk_diagnostic/mariadb_stalk_diagnostic.json b/jcloud/jcloud/pagetype/mariadb_stalk_diagnostic/mariadb_stalk_diagnostic.json
new file mode 100644
index 0000000..214b94d
--- /dev/null
+++ b/jcloud/jcloud/pagetype/mariadb_stalk_diagnostic/mariadb_stalk_diagnostic.json
@@ -0,0 +1,41 @@
+{
+ "actions": [],
+ "allow_rename": 1,
+ "creation": "2023-12-21 16:57:50.050145",
+ "pagetype": "PageType",
+ "engine": "InnoDB",
+ "field_order": [
+  "type",
+  "output"
+ ],
+ "fields": [
+  {
+   "fieldname": "type",
+   "fieldtype": "Data",
+   "in_filter": 1,
+   "in_list_view": 1,
+   "in_standard_filter": 1,
+   "label": "Type",
+   "read_only": 1,
+   "search_index": 1
+  },
+  {
+   "fieldname": "output",
+   "fieldtype": "Code",
+   "label": "Output",
+   "read_only": 1
+  }
+ ],
+ "index_web_pages_for_search": 1,
+ "istable": 1,
+ "links": [],
+ "modified": "2023-12-21 18:57:47.641855",
+ "modified_by": "Administrator",
+ "module": "Jcloud",
+ "name": "MariaDB Stalk Diagnostic",
+ "owner": "Administrator",
+ "permissions": [],
+ "sort_field": "modified",
+ "sort_order": "DESC",
+ "states": []
+}
\ No newline at end of file
diff --git a/jcloud/jcloud/pagetype/mariadb_stalk_diagnostic/mariadb_stalk_diagnostic.py b/jcloud/jcloud/pagetype/mariadb_stalk_diagnostic/mariadb_stalk_diagnostic.py
new file mode 100644
index 0000000..21d0544
--- /dev/null
+++ b/jcloud/jcloud/pagetype/mariadb_stalk_diagnostic/mariadb_stalk_diagnostic.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2023, JINGROW
+# For license information, please see license.txt
+
+# import jingrow
+from jingrow.model.document import Document
+
+
+class MariaDBStalkDiagnostic(Document):
+	# begin: auto-generated types
+	# This code is auto-generated. Do not modify anything in this block.
+
+	from typing import TYPE_CHECKING
+
+	if TYPE_CHECKING:
+		from jingrow.types import DF
+
+		output: DF.Code | None
+		parent: DF.Data
+		parentfield: DF.Data
+		parenttype: DF.Data
+		type: DF.Data | None
+	# end: auto-generated types
+
+	pass
diff --git a/jcloud/jcloud/pagetype/mariadb_variable/__init__.py b/jcloud/jcloud/pagetype/mariadb_variable/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/jcloud/jcloud/pagetype/mariadb_variable/mariadb_variable.js b/jcloud/jcloud/pagetype/mariadb_variable/mariadb_variable.js
new file mode 100644
index 0000000..0b1c94b
--- /dev/null
+++ b/jcloud/jcloud/pagetype/mariadb_variable/mariadb_variable.js
@@ -0,0 +1,26 @@
+// Copyright (c) 2023, JINGROW
+// For license information, please see license.txt
+
+jingrow.ui.form.on('MariaDB Variable', {
+	refresh(frm) {
+		let root = 'https://mariadb.com/kb/en/';
+		frm.add_web_link(
+			`${root}${frm.pg.pg_section}-system-variables/#${frm.pg.name}`,
+			__('Check MariaDB Documentation'),
+		);
+		frm.add_custom_button(__('Set on all servers'), () => {
+			jingrow.confirm(
+				`Are you sure you want to set variable on all servers?
+								If variable is not dynamic, mariadb will be restarted`,
+				() =>
+					frm.call('set_on_all_servers').then((r) => {
+						if (r.message) {
+							jingrow.msgprint(r.message);
+						} else {
+							frm.refresh();
+						}
+					}),
+			);
+		});
+	},
+});
diff --git a/jcloud/jcloud/pagetype/mariadb_variable/mariadb_variable.json b/jcloud/jcloud/pagetype/mariadb_variable/mariadb_variable.json
new file mode 100644
index 0000000..22df947
--- /dev/null
+++ b/jcloud/jcloud/pagetype/mariadb_variable/mariadb_variable.json
@@ -0,0 +1,91 @@
+{
+ "actions": [],
+ "allow_import": 1,
+ "allow_rename": 1,
+ "autoname": "prompt",
+ "creation": "2023-04-29 00:00:12.372588",
+ "default_view": "List",
+ "pagetype": "PageType",
+ "editable_grid": 1,
+ "engine": "InnoDB",
+ "field_order": [
+  "dynamic",
+  "datatype",
+  "pg_section",
+  "skippable",
+  "column_break_yrfg",
+  "default_value",
+  "set_on_new_servers"
+ ],
+ "fields": [
+  {
+   "default": "0",
+   "fieldname": "dynamic",
+   "fieldtype": "Check",
+   "in_list_view": 1,
+   "label": "Dynamic",
+   "reqd": 1
+  },
+  {
+   "fieldname": "datatype",
+   "fieldtype": "Select",
+   "label": "Datatype",
+   "options": "Int\nFloat\nStr",
+   "reqd": 1
+  },
+  {
+   "fieldname": "default_value",
+   "fieldtype": "Data",
+   "label": "Default Value"
+  },
+  {
+   "default": "0",
+   "fieldname": "skippable",
+   "fieldtype": "Check",
+   "label": "Skippable"
+  },
+  {
+   "default": "server",
+   "fieldname": "pg_section",
+   "fieldtype": "Select",
+   "label": "Pg Section",
+   "options": "server\nreplication-and-binary-log\ninnodb",
+   "reqd": 1
+  },
+  {
+   "fieldname": "column_break_yrfg",
+   "fieldtype": "Column Break"
+  },
+  {
+   "default": "0",
+   "fieldname": "set_on_new_servers",
+   "fieldtype": "Check",
+   "label": "Set on new servers"
+  }
+ ],
+ "index_web_pages_for_search": 1,
+ "links": [],
+ "modified": "2024-07-03 07:14:39.263413",
+ "modified_by": "Administrator",
+ "module": "Jcloud",
+ "name": "MariaDB Variable",
+ "naming_rule": "Set by user",
+ "owner": "Administrator",
+ "permissions": [
+  {
+   "create": 1,
+   "delete": 1,
+   "email": 1,
+   "export": 1,
+   "print": 1,
+   "read": 1,
+   "report": 1,
+   "role": "System Manager",
+   "share": 1,
+   "write": 1
+  }
+ ],
+ "sort_field": "modified",
+ "sort_order": "DESC",
+ "states": []
+}
\ No newline at end of file
diff --git a/jcloud/jcloud/pagetype/mariadb_variable/mariadb_variable.py b/jcloud/jcloud/pagetype/mariadb_variable/mariadb_variable.py
new file mode 100644
index 0000000..1255336
--- /dev/null
+++ b/jcloud/jcloud/pagetype/mariadb_variable/mariadb_variable.py
@@ -0,0 +1,51 @@
+# Copyright (c) 2023, JINGROW
+# For license information, please see license.txt
+
+import jingrow
+from jingrow.model.document import Document
+
+from jcloud.jcloud.pagetype.database_server.database_server import DatabaseServer
+
+
+class MariaDBVariable(Document):
+	# begin: auto-generated types
+	# This code is auto-generated. Do not modify anything in this block.
+
+	from typing import TYPE_CHECKING
+
+	if TYPE_CHECKING:
+		from jingrow.types import DF
+
+		datatype: DF.Literal["Int", "Float", "Str"]
+		default_value: DF.Data | None
+		pg_section: DF.Literal["server", "replication-and-binary-log", "innodb"]
+		dynamic: DF.Check
+		set_on_new_servers: DF.Check
+		skippable: DF.Check
+	# end: auto-generated types
+
+	def get_default_value(self):
+		if not (value := self.default_value):
+			jingrow.throw("Default Value is required")
+		match self.datatype:
+			case "Int":
+				return int(value)
+			case "Float":
+				return float(value)
+		return value
+
+	@jingrow.whitelist()
+	def set_on_all_servers(self):
+		value = self.get_default_value()
+		servers = jingrow.get_all(
+			"Database Server", {"status": "Active", "is_self_hosted": False}, pluck="name"
+		)
+		for server_name in servers:
+			server: DatabaseServer = jingrow.get_pg("Database Server", server_name)
+			server.add_mariadb_variable(self.name, f"value_{self.datatype.lower()}", value)
+
+	@jingrow.whitelist()
+	def set_on_server(self, server_name):
+		value = self.get_default_value()
+		server: DatabaseServer = jingrow.get_pg("Database Server", server_name)
+		server.add_mariadb_variable(self.name, f"value_{self.datatype.lower()}", value)
diff --git a/jcloud/jcloud/pagetype/mariadb_variable/test_mariadb_variable.py b/jcloud/jcloud/pagetype/mariadb_variable/test_mariadb_variable.py
new file mode 100644
index 0000000..b489956
--- /dev/null
+++ b/jcloud/jcloud/pagetype/mariadb_variable/test_mariadb_variable.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2023, JINGROW
+# See license.txt
+
+import jingrow
+from jingrow.tests.utils import JingrowTestCase
+
+from jcloud.jcloud.pagetype.database_server.test_database_server import (
+	create_test_database_server,
+)
+
+
+class TestMariaDBVariable(JingrowTestCase):
+	def tearDown(self):
+		jingrow.db.rollback()
+
+	def test_set_on_all_servers_sets_on_all_servers(self):
+		db_1 = create_test_database_server()
+		db_2 = create_test_database_server()
+		db_1.add_mariadb_variable("tmp_disk_table_size", "value_int", 1024)
+		db_1.add_mariadb_variable("innodb_old_blocks_time", "value_str", "1000")
+
+		variable = jingrow.get_pg("MariaDB Variable", "tmp_disk_table_size")  # in fixture
+		variable.default_value = "5120"
+		variable.save()
+
+		variable.set_on_all_servers()
+		db_1.reload()
+		db_2.reload()
+		self.assertEqual(db_1.mariadb_system_variables[0].value, 5120 * 1024 * 1024)
+		self.assertEqual(db_2.mariadb_system_variables[0].value, 5120 * 1024 * 1024)
+
+		variable = jingrow.get_pg("MariaDB Variable", "innodb_old_blocks_time")
+		variable.default_value = "5000"
+		variable.save()
+
+		variable.set_on_all_servers()
+		db_1.reload()
+		db_2.reload()
+		self.assertEqual(db_1.mariadb_system_variables[1].value, "5000")
+		self.assertEqual(db_2.mariadb_system_variables[1].value, "5000")
+
+	def test_set_on_server_sets_on_one_server(self):
+		db_1 = create_test_database_server()
+		db_2 = create_test_database_server()
+		db_2.add_mariadb_variable("tmp_disk_table_size", "value_int", 1024)
+		db_1.add_mariadb_variable("tmp_disk_table_size", "value_int", 1024)
+		db_1.add_mariadb_variable("innodb_old_blocks_time", "value_str", "1000")
+
+		variable = jingrow.get_pg("MariaDB Variable", "tmp_disk_table_size")
+		variable.default_value = "5120"
+		variable.save()
+
+		variable.set_on_server(db_1.name)
+		db_1.reload()
+		db_2.reload()
+		self.assertEqual(db_1.mariadb_system_variables[0].value, 5120 * 1024 * 1024)
+		self.assertEqual(db_2.mariadb_system_variables[0].value, 1024 * 1024 * 1024)
+
+		variable = jingrow.get_pg("MariaDB Variable", "innodb_old_blocks_time")
+		variable.default_value = "5000"
+		variable.save()
+
+		variable.set_on_server(db_2.name)
+		db_1.reload()
+		db_2.reload()
+		self.assertEqual(db_1.mariadb_system_variables[1].value, "1000")
+		self.assertEqual(db_2.mariadb_system_variables[1].value, "5000")
diff --git a/jcloud/jcloud/pagetype/marketplace_app/__init__.py b/jcloud/jcloud/pagetype/marketplace_app/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/jcloud/jcloud/pagetype/marketplace_app/events.py b/jcloud/jcloud/pagetype/marketplace_app/events.py
new file mode 100644
index 0000000..5cfc741
--- /dev/null
+++ b/jcloud/jcloud/pagetype/marketplace_app/events.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2024, JINGROW
+# For license information, please see license.txt
+
+import jingrow
+
+
+def auto_review_for_missing_steps():
+	for app in jingrow.get_all(
+		"Marketplace App",
+		{
+			"status": ("in", ["Draft", "Attention Required", "In Review"]),
+			"stop_auto_review": False,
+		},
+		pluck="name",
+	):
+		app_pg = jingrow.get_pg("Marketplace App", app)
+		release = (
+			True if jingrow.db.exists("App Release Approval Request", {"app": app}) else False
+		)
+		logo = True if app_pg.image else False
+		desc = True if ("Please add a short" not in app_pg.description) else False
+		links = (
+			True
+			if app_pg.website
+			and app_pg.support
+			and app_pg.documentation
+			and app_pg.privacy_policy
+			and app_pg.terms_of_service
+			else False
+		)
+
+		notify_email = jingrow.db.get_value("Team", app_pg.team, "notify_email")
+		if notify_email and not (logo and desc and links and release):
+			jingrow.sendmail(
+				subject=f"Marketplace App Review: {app_pg.title}",
+				recipients=[notify_email],
+				template="marketplace_auto_review",
+				reference_pagetype="Marketplace App",
+				reference_name=app,
+				args={
+					"logo": logo,
+					"links": links,
+					"desc": desc,
+					"release": release,
+					"review_page_link": f"{jingrow.local.site}/dashboard/marketplace/apps/{app}/review",
+				},
+			)
diff --git a/jcloud/jcloud/pagetype/marketplace_app/marketplace_app.js b/jcloud/jcloud/pagetype/marketplace_app/marketplace_app.js
new file mode 100644
index 0000000..9fdbcf5
--- /dev/null
+++ b/jcloud/jcloud/pagetype/marketplace_app/marketplace_app.js
@@ -0,0 +1,11 @@
+// Copyright (c) 2020, JINGROW
+// For license information, please see license.txt
+
+jingrow.ui.form.on('Marketplace App', {
+	refresh: function (frm) {
+		frm.add_web_link(
+			`/dashboard/apps/${frm.pg.name}/`,
+			__('Open in dashboard'),
+		);
+	},
+});
diff --git a/jcloud/jcloud/pagetype/marketplace_app/marketplace_app.json b/jcloud/jcloud/pagetype/marketplace_app/marketplace_app.json
new file mode 100644
index 0000000..e162825
--- /dev/null
+++ b/jcloud/jcloud/pagetype/marketplace_app/marketplace_app.json
@@ -0,0 +1,447 @@
+{
+ "actions": [],
+ "allow_guest_to_view": 1,
+ "allow_rename": 1,
+ "creation": "2022-02-04 19:53:27.058972",
+ "pagetype": "PageType",
+ "editable_grid": 1,
+ "engine": "InnoDB",
+ "field_order": [
+  "image",
+  "app",
+  "title",
+  "column_break_3",
+  "team",
+  "route",
+  "section_break_7",
+  "jingrow_approved",
+  "subscription_type",
+  "column_break_10",
+  "categories",
+  "published",
+  "section_break_5",
+  "sources",
+  "descriptions_tab",
+  "description",
+  "app_screenshots_section",
+  "screenshots",
+  "section_break_15",
+  "long_description",
+  "links_section",
+  "website",
+  "support",
+  "documentation",
+  "column_break_16",
+  "privacy_policy",
+  "terms_of_service",
+  "saas_tab",
+  "outgoing_email",
+  "outgoing_sender_name",
+  "signup_email_template_section",
+  "message",
+  "column_break_32",
+  "signature",
+  "column_break_30",
+  "subject",
+  "poll_method",
+  "column_break_33",
+  "custom_verify_template",
+  "subscription_update_hook",
+  "site_config_section",
+  "site_config",
+  "scripts_tab",
+  "run_after_install_script",
+  "after_install_script",
+  "run_after_uninstall_script",
+  "after_uninstall_script",
+  "review_tab",
+  "stop_auto_review",
+  "review_stage",
+  "status",
+  "dashboard_tab",
+  "onboarding_related_section",
+  "show_for_site_creation",
+  "localisation_apps",
+  "section_break_tlpw",
+  "average_rating"
+ ],
+ "fields": [
+  {
+   "fieldname": "image",
+   "fieldtype": "Attach Image",
+   "hidden": 1,
+   "label": "Image"
+  },
+  {
+   "fetch_from": "app.title",
+   "fetch_if_empty": 1,
+   "fieldname": "title",
+   "fieldtype": "Data",
+   "in_list_view": 1,
+   "label": "Title",
+   "reqd": 1
+  },
+  {
+   "fieldname": "column_break_3",
+   "fieldtype": "Column Break"
+  },
+  {
+   "default": "Draft",
+   "fieldname": "status",
+   "fieldtype": "Select",
+   "in_list_view": 1,
+   "in_standard_filter": 1,
+   "label": "Status",
+   "options": "Draft\nPublished\nIn Review\nAttention Required\nRejected\nDisabled"
+  },
+  {
+   "fieldname": "section_break_5",
+   "fieldtype": "Section Break"
+  },
+  {
+   "default": "Please add a short description about your app here...",
+   "fieldname": "description",
+   "fieldtype": "Small Text",
+   "in_list_view": 1,
+   "label": "Description",
+   "reqd": 1
+  },
+  {
+   "fieldname": "long_description",
+   "fieldtype": "Text Editor",
+   "label": "Long Description"
+  },
+  {
+   "fieldname": "links_section",
+   "fieldtype": "Tab Break",
+   "label": "Support Links"
+  },
+  {
+   "fieldname": "website",
+   "fieldtype": "Data",
+   "label": "Website",
+   "options": "URL"
+  },
+  {
+   "fieldname": "support",
+   "fieldtype": "Data",
+   "label": "Support",
+   "options": "URL"
+  },
+  {
+   "fieldname": "privacy_policy",
+   "fieldtype": "Data",
+   "label": "Privacy Policy",
+   "options": "URL"
+  },
+  {
+   "fieldname": "documentation",
+   "fieldtype": "Data",
+   "label": "Documentation",
+   "options": "URL"
+  },
+  {
+   "fieldname": "terms_of_service",
+   "fieldtype": "Data",
+   "label": "Terms of Service",
+   "options": "URL"
+  },
+  {
+   "fieldname": "route",
+   "fieldtype": "Data",
+   "label": "Route"
+  },
+  {
+   "default": "0",
+   "fieldname": "published",
+   "fieldtype": "Check",
+   "hidden": 1,
+   "label": "Published",
+   "read_only": 1
+  },
+  {
+   "fieldname": "app",
+   "fieldtype": "Link",
+   "label": "App",
+   "options": "App",
+   "reqd": 1,
+   "unique": 1
+  },
+  {
+   "fieldname": "column_break_16",
+   "fieldtype": "Column Break"
+  },
+  {
+   "fieldname": "team",
+   "fieldtype": "Link",
+   "in_standard_filter": 1,
+   "label": "Team",
+   "options": "Team"
+  },
+  {
+   "fieldname": "sources",
+   "fieldtype": "Table",
+   "label": "Sources",
+   "options": "Marketplace App Version"
+  },
+  {
+   "fieldname": "app_screenshots_section",
+   "fieldtype": "Section Break",
+   "label": "App Screenshots"
+  },
+  {
+   "fieldname": "screenshots",
+   "fieldtype": "Table",
+   "label": "Screenshots",
+   "options": "Marketplace App Screenshot"
+  },
+  {
+   "fieldname": "section_break_15",
+   "fieldtype": "Section Break"
+  },
+  {
+   "fieldname": "categories",
+   "fieldtype": "Table",
+   "label": "Categories",
+   "options": "Marketplace App Categories"
+  },
+  {
+   "default": "Free",
+   "fieldname": "subscription_type",
+   "fieldtype": "Select",
+   "in_list_view": 1,
+   "in_standard_filter": 1,
+   "label": "Subscription Type",
+   "options": "Free\nPaid\nFreemium"
+  },
+  {
+   "fieldname": "signup_email_template_section",
+   "fieldtype": "Section Break",
+   "label": "Signup Email Template"
+  },
+  {
+   "default": "0",
+   "fieldname": "custom_verify_template",
+   "fieldtype": "Check",
+   "label": "Use custom verify template"
+  },
+  {
+   "fieldname": "subject",
+   "fieldtype": "Data",
+   "label": "Subject"
+  },
+  {
+   "fieldname": "message",
+   "fieldtype": "Text Editor",
+   "label": "Message"
+  },
+  {
+   "fieldname": "signature",
+   "fieldtype": "Text Editor",
+   "label": "Signature"
+  },
+  {
+   "fieldname": "poll_method",
+   "fieldtype": "Data",
+   "label": "Poll Method"
+  },
+  {
+   "fieldname": "subscription_update_hook",
+   "fieldtype": "Data",
+   "label": "Subscription Update Hook"
+  },
+  {
+   "fieldname": "saas_tab",
+   "fieldtype": "Tab Break",
+   "label": "SaaS"
+  },
+  {
+   "fieldname": "column_break_32",
+   "fieldtype": "Column Break"
+  },
+  {
+   "fieldname": "column_break_30",
+   "fieldtype": "Section Break"
+  },
+  {
+   "fieldname": "column_break_33",
+   "fieldtype": "Column Break"
+  },
+  {
+   "fieldname": "descriptions_tab",
+   "fieldtype": "Tab Break",
+   "label": "Descriptions"
+  },
+  {
+   "fieldname": "section_break_7",
+   "fieldtype": "Section Break"
+  },
+  {
+   "fieldname": "column_break_10",
+   "fieldtype": "Column Break"
+  },
+  {
+   "fieldname": "outgoing_email",
+   "fieldtype": "Data",
+   "label": "Outgoing Email"
+  },
+  {
+   "fieldname": "outgoing_sender_name",
+   "fieldtype": "Data",
+   "label": "Outgoing Sender Name"
+  },
+  {
+   "fieldname": "scripts_tab",
+   "fieldtype": "Tab Break",
+   "label": "Scripts"
+  },
+  {
+   "fieldname": "after_install_script",
+   "fieldtype": "Code",
+   "label": "After Install Script",
+   "options": "Python"
+  },
+  {
+   "fieldname": "after_uninstall_script",
+   "fieldtype": "Code",
+   "label": "After Uninstall Script",
+   "options": "Python"
+  },
+  {
+   "default": "0",
+   "fieldname": "run_after_install_script",
+   "fieldtype": "Check",
+   "label": "Rut After Install Script"
+  },
+  {
+   "default": "0",
+   "fieldname": "run_after_uninstall_script",
+   "fieldtype": "Check",
+   "label": "Run After Uninstall Script"
+  },
+  {
+   "fieldname": "review_tab",
+   "fieldtype": "Tab Break",
+   "label": "Review"
+  },
+  {
+   "default": "Not Started",
+   "fieldname": "review_stage",
+   "fieldtype": "Select",
+   "in_list_view": 1,
+   "label": "Review Stage",
+   "options": "Not Started\nDescription Missing\nLogo Missing\nApp Release Not Reviewed\nReady for Review\nReady to Publish\nRejected"
+  },
+  {
+   "default": "0",
+   "fieldname": "jingrow_approved",
+   "fieldtype": "Check",
+   "label": "Jingrow Approved"
+  },
+  {
+   "default": "0",
+   "fieldname": "stop_auto_review",
+   "fieldtype": "Check",
+   "label": "Stop Auto Review"
+  },
+  {
+   "description": "This keys are added to site config on saas signup",
+   "fieldname": "site_config",
+   "fieldtype": "JSON",
+   "label": "Site Config"
+  },
+  {
+   "fieldname": "site_config_section",
+   "fieldtype": "Section Break",
+   "label": "Site Config"
+  },
+  {
+   "fieldname": "onboarding_related_section",
+   "fieldtype": "Section Break",
+   "label": "Onboarding/Site Creation Related"
+  },
+  {
+   "default": "0",
+   "fieldname": "show_for_site_creation",
+   "fieldtype": "Check",
+   "label": "Show for site creation"
+  },
+  {
+   "fieldname": "dashboard_tab",
+   "fieldtype": "Tab Break",
+   "label": "Dashboard"
+  },
+  {
+   "fieldname": "section_break_tlpw",
+   "fieldtype": "Section Break"
+  },
+  {
+   "default": "0",
+   "fieldname": "average_rating",
+   "fieldtype": "Float",
+   "hidden": 1,
+   "label": "Average Rating",
+   "precision": "2"
+  },
+  {
+   "fieldname": "localisation_apps",
+   "fieldtype": "Table",
+   "label": "Localisation Apps",
+   "options": "Marketplace Localisation App"
+  }
+ ],
+ "has_web_view": 1,
+ "image_field": "image",
+ "index_web_pages_for_search": 1,
+ "is_published_field": "published",
+ "links": [
+  {
+   "group": "General",
+   "link_pagetype": "App Release Approval Request",
+   "link_fieldname": "marketplace_app"
+  },
+  {
+   "group": "App Subscription",
+   "link_pagetype": "Marketplace App Plan",
+   "link_fieldname": "app"
+  },
+  {
+   "group": "App Subscription",
+   "link_pagetype": "Marketplace App Subscription",
+   "link_fieldname": "app"
+  }
+ ],
+ "modified": "2025-02-13 17:16:24.333531",
+ "modified_by": "Administrator",
+ "module": "Jcloud",
+ "name": "Marketplace App",
+ "owner": "Administrator",
+ "permissions": [
+  {
+   "create": 1,
+   "delete": 1,
+   "email": 1,
+   "export": 1,
+   "print": 1,
+   "read": 1,
+   "report": 1,
+   "role": "System Manager",
+   "share": 1,
+   "write": 1
+  }
+ ],
+ "sort_field": "modified",
+ "sort_order": "DESC",
+ "states": [
+  {
+   "color": "Gray",
+   "title": "Draft"
+  },
+  {
+   "color": "Green",
+   "title": "Published"
+  }
+ ],
+ "title_field": "title",
+ "track_changes": 1
+}
\ No newline at end of file
diff --git a/jcloud/jcloud/pagetype/marketplace_app/marketplace_app.py b/jcloud/jcloud/pagetype/marketplace_app/marketplace_app.py
new file mode 100644
index 0000000..1fa81e8
--- /dev/null
+++ b/jcloud/jcloud/pagetype/marketplace_app/marketplace_app.py
@@ -0,0 +1,676 @@
+# Copyright (c) 2021, JINGROW
+# For license information, please see license.txt
+
+from __future__ import annotations
+
+from base64 import b64decode
+from typing import TYPE_CHECKING, ClassVar
+
+import jingrow
+import requests
+from jingrow.query_builder.functions import Cast_
+from jingrow.utils.caching import redis_cache
+from jingrow.utils.safe_exec import safe_exec
+from jingrow.website.utils import cleanup_page_name
+from jingrow.website.website_generator import WebsiteGenerator
+
+from jcloud.api.client import dashboard_whitelist
+from jcloud.api.github import get_access_token
+from jcloud.marketplace.pagetype.marketplace_app_plan.marketplace_app_plan import (
+	get_app_plan_features,
+)
+from jcloud.jcloud.pagetype.app.app import new_app as new_app_pg
+from jcloud.jcloud.pagetype.app_release_approval_request.app_release_approval_request import (
+	AppReleaseApprovalRequest,
+)
+from jcloud.jcloud.pagetype.marketplace_app.utils import get_rating_percentage_distribution
+from jcloud.utils import get_current_team, get_last_pg
+
+if TYPE_CHECKING:
+	from jcloud.jcloud.pagetype.site.site import Site
+
+
+class MarketplaceApp(WebsiteGenerator):
+	# begin: auto-generated types
+	# This code is auto-generated. Do not modify anything in this block.
+
+	from typing import TYPE_CHECKING
+
+	if TYPE_CHECKING:
+		from jingrow.types import DF
+
+		from jcloud.jcloud.pagetype.marketplace_app_categories.marketplace_app_categories import (
+			MarketplaceAppCategories,
+		)
+		from jcloud.jcloud.pagetype.marketplace_app_screenshot.marketplace_app_screenshot import (
+			MarketplaceAppScreenshot,
+		)
+		from jcloud.jcloud.pagetype.marketplace_app_version.marketplace_app_version import MarketplaceAppVersion
+		from jcloud.jcloud.pagetype.marketplace_localisation_app.marketplace_localisation_app import (
+			MarketplaceLocalisationApp,
+		)
+
+		after_install_script: DF.Code | None
+		after_uninstall_script: DF.Code | None
+		app: DF.Link
+		average_rating: DF.Float
+		categories: DF.Table[MarketplaceAppCategories]
+		custom_verify_template: DF.Check
+		description: DF.SmallText
+		documentation: DF.Data | None
+		jingrow_approved: DF.Check
+		image: DF.AttachImage | None
+		localisation_apps: DF.Table[MarketplaceLocalisationApp]
+		long_description: DF.TextEditor | None
+		message: DF.TextEditor | None
+		outgoing_email: DF.Data | None
+		outgoing_sender_name: DF.Data | None
+		poll_method: DF.Data | None
+		privacy_policy: DF.Data | None
+		published: DF.Check
+		review_stage: DF.Literal[
+			"Not Started",
+			"Description Missing",
+			"Logo Missing",
+			"App Release Not Reviewed",
+			"Ready for Review",
+			"Ready to Publish",
+			"Rejected",
+		]
+		route: DF.Data | None
+		run_after_install_script: DF.Check
+		run_after_uninstall_script: DF.Check
+		screenshots: DF.Table[MarketplaceAppScreenshot]
+		show_for_site_creation: DF.Check
+		signature: DF.TextEditor | None
+		site_config: DF.JSON | None
+		sources: DF.Table[MarketplaceAppVersion]
+		status: DF.Literal["Draft", "Published", "In Review", "Attention Required", "Rejected", "Disabled"]
+		stop_auto_review: DF.Check
+		subject: DF.Data | None
+		subscription_type: DF.Literal["Free", "Paid", "Freemium"]
+		subscription_update_hook: DF.Data | None
+		support: DF.Data | None
+		team: DF.Link | None
+		terms_of_service: DF.Data | None
+		title: DF.Data
+		website: DF.Data | None
+	# end: auto-generated types
+
+	dashboard_fields: ClassVar = [
+		"image",
+		"title",
+		"status",
+		"description",
+		"review_stage",
+	]
+
+	def autoname(self):
+		self.name = self.app
+
+	@dashboard_whitelist()
+	def delete(self):
+		if self.status != "Draft":
+			jingrow.throw("You can only delete an app in Draft status")
+
+		if get_current_team() != self.team:
+			jingrow.throw("You are not authorized to delete this app")
+
+		super().delete()
+
+	def on_trash(self):
+		jingrow.db.delete("Marketplace App Plan", {"app": self.name})
+		jingrow.db.delete("App Release Approval Request", {"marketplace_app": self.name})
+
+	@dashboard_whitelist()
+	def create_approval_request(self, app_release: str):
+		"""Create a new Approval Request for given `app_release`"""
+		AppReleaseApprovalRequest.create(self.app, app_release)
+
+	@dashboard_whitelist()
+	def cancel_approval_request(self, app_release: str):
+		approval_requests = jingrow.get_all(
+			"App Release Approval Request",
+			filters={"app_release": app_release},
+			pluck="name",
+			order_by="creation desc",
+		)
+
+		if len(approval_requests) == 0:
+			jingrow.throw("No approval request exists for the given app release")
+
+		jingrow.get_pg("App Release Approval Request", approval_requests[0]).cancel()
+
+	def before_insert(self):
+		if not jingrow.flags.in_test:
+			self.check_if_duplicate()
+			self.create_app_and_source_if_needed()
+			self.long_description = jingrow.utils.md_to_html(self.fetch_readme())
+
+		self.set_route()
+
+	def set_route(self):
+		self.route = "marketplace/apps/" + cleanup_page_name(self.app)
+
+	def check_if_duplicate(self):
+		if jingrow.db.exists("Marketplace App", self.name):
+			jingrow.throw(f"App {self.name} already exists. Please contact support.")
+
+	def create_app_and_source_if_needed(self):
+		if jingrow.db.exists("App", self.app or self.name):
+			app_pg = jingrow.get_pg("App", self.app or self.name)
+		else:
+			app_pg = new_app_pg(self.name, self.title)
+
+		if not self.sources:
+			source = app_pg.add_source(
+				self.version,
+				self.repository_url,
+				self.branch,
+				self.team,
+				self.github_installation_id,
+				public=True,
+			)
+			self.app = source.app
+			self.append("sources", {"version": self.version, "source": source.name})
+
+	def validate(self):
+		self.published = self.status == "Published"
+		self.validate_sources()
+		self.validate_number_of_screenshots()
+
+	def validate_sources(self):
+		for source in self.sources:
+			app_source = jingrow.get_pg("App Source", source.source)
+
+			if app_source.app != self.app:
+				jingrow.throw(f"App Source {jingrow.bold(source.source)} does not belong to this app!")
+
+			app_source_versions = [v.version for v in app_source.versions]
+			if source.version not in app_source_versions:
+				jingrow.throw(
+					f"App Source {jingrow.bold(source.source)} does not contain"
+					f" version: {jingrow.bold(source.version)}"
+				)
+
+	def validate_number_of_screenshots(self):
+		max_allowed_screenshots = jingrow.db.get_single_value("Jcloud Settings", "max_allowed_screenshots")
+		if len(self.screenshots) > max_allowed_screenshots:
+			jingrow.throw(f"You cannot add more than {max_allowed_screenshots} screenshots for an app.")
+
+	def change_branch(self, source, version, to_branch):
+		existing_source = jingrow.db.exists(
+			"App Source",
+			{
+				"name": ("!=", self.name),
+				"app": self.app,
+				"repository_url": jingrow.db.get_value("App Source", {"name": source}, "repository_url"),
+				"branch": to_branch,
+				"team": self.team,
+			},
+		)
+		if existing_source:
+			# If source with branch to switch already exists, just add version to child table of source and use the same
+			try:
+				source_pg = jingrow.get_pg("App Source", existing_source)
+				source_pg.append("versions", {"version": version})
+				source_pg.save()
+			except Exception:
+				pass
+
+			for source in self.sources:
+				if source.source == source:
+					source.source = existing_source
+					self.save()
+		else:
+			# if a different source with the branch to switch doesn't exists update the existing source
+			source_pg = jingrow.get_pg("App Source", source)
+			source_pg.branch = to_branch
+			source_pg.save()
+
+	@dashboard_whitelist()
+	def add_version(self, version, branch):
+		existing_source = jingrow.db.exists(
+			"App Source",
+			[
+				["App Source", "app", "=", self.app],
+				["App Source", "team", "=", self.team],
+				["App Source", "branch", "=", branch],
+			],
+		)
+		if existing_source:
+			# If source with branch to switch already exists, just add version to child table of source and use the same
+			source_pg = jingrow.get_pg("App Source", existing_source)
+			try:
+				source_pg.append("versions", {"version": version})
+				source_pg.public = 1
+				source_pg.save()
+			except Exception:
+				pass
+		else:
+			# create new app source for version and branch to switch
+			source_pg = jingrow.get_pg(
+				{
+					"pagetype": "App Source",
+					"app": self.app,
+					"team": self.team,
+					"branch": branch,
+					"repository_url": jingrow.db.get_value(
+						"App Source", {"name": self.sources[0].source}, "repository_url"
+					),
+					"public": 1,
+				}
+			)
+			source_pg.append("versions", {"version": version})
+			source_pg.insert(ignore_permissions=True)
+
+		self.append("sources", {"version": version, "source": source_pg.name})
+		self.save()
+
+	@dashboard_whitelist()
+	def remove_version(self, version):
+		if self.status == "Published" and len(self.sources) == 1:
+			jingrow.throw("Failed to remove. Need at least 1 version for a published app")
+
+		for i, source in enumerate(self.sources):
+			if source.version == version:
+				# remove from marketplace app source child table
+				self.sources.pop(i)
+				self.save()
+
+				app_source = jingrow.get_cached_pg("App Source", source.source)
+				for j, source_version in enumerate(app_source.versions):
+					if source_version.version == version and len(app_source.versions) > 1:
+						# remove from app source versions child table
+						app_source.versions.pop(j)
+						app_source.save()
+						break
+				break
+
+	def get_app_source(self):
+		return jingrow.get_pg("App Source", {"app": self.app})
+
+	def fetch_readme(self):
+		source = self.get_app_source()
+
+		if source.github_installation_id:
+			github_access_token = get_access_token(source.github_installation_id)
+		else:
+			github_access_token = jingrow.get_value("Jcloud Settings", None, "github_access_token")
+
+		headers = {
+			"Authorization": f"token {github_access_token}",
+		}
+		owner = source.repository_owner
+		repository = source.repository
+		branch = source.branch
+
+		readme_content = None
+		variants = ["README.md", "readme.md", "readme", "README", "Readme"]
+		for variant in variants:
+			try:
+				readme = requests.get(
+					f"http://git.jingrow.com:3000/api/v1/repos/{owner}/{repository}/contents/{variant}",
+					headers=headers,
+					params={"ref": branch},
+				).json()
+				readme_content = b64decode(readme["content"]).decode()
+				if readme_content:
+					break
+			except Exception:
+				print(jingrow.get_traceback())
+				continue
+
+		return readme_content
+
+	def get_context(self, context):
+		context.no_cache = True
+		context.app = self
+
+		supported_versions = []
+		public_rgs = jingrow.get_all("Release Group", filters={"public": True}, fields=["version", "name"])
+
+		unique_public_rgs = {}
+		for rg in public_rgs:
+			if rg.version not in unique_public_rgs:
+				unique_public_rgs[rg.version] = rg.name
+
+		for source in self.sources:
+			if source.version not in unique_public_rgs:
+				continue
+
+			jingrow_source_name = jingrow.get_pg(
+				"Release Group App",
+				{"app": "jingrow", "parent": unique_public_rgs[source.version]},
+			).source
+			jingrow_source = jingrow.db.get_value(
+				"App Source",
+				jingrow_source_name,
+				["repository_url", "branch"],
+				as_dict=True,
+			)
+
+			app_source = jingrow.db.get_value(
+				"App Source",
+				source.source,
+				["repository_url", "branch", "public"],
+				as_dict=True,
+			)
+
+			supported_versions.append(
+				jingrow._dict(
+					{
+						"version": source.version,
+						"app_source": app_source,
+						"jingrow_source": jingrow_source,
+					}
+				)
+			)
+
+		# Sort based on version
+		supported_versions.sort(key=lambda x: x.version, reverse=True)
+
+		context.supported_versions = supported_versions
+
+		# Add publisher info
+		publisher_profile = jingrow.get_all(
+			"Marketplace Publisher Profile",
+			filters={"team": self.team},
+			fields=["display_name", "contact_email"],
+			limit=1,
+		)
+
+		if publisher_profile:
+			context.publisher_profile = publisher_profile[0]
+
+		context.no_of_installs = self.get_analytics().get("total_installs")
+		context.plans = self.get_plans()
+
+		user_reviews = self.get_user_reviews()
+		for review in user_reviews:
+			review["developer_reply"] = jingrow.get_all(
+				"Developer Review Reply",
+				filters={"review": review.name},
+				pluck="description",
+				order_by="creation asc",
+			)
+
+		ratings_summary = self.get_user_ratings_summary(user_reviews)
+
+		context.user_reviews = user_reviews
+		context.ratings_summary = ratings_summary
+
+	def get_user_reviews(self) -> list:
+		app_user_review = jingrow.qb.PageType("App User Review")
+		user = jingrow.qb.PageType("User")
+
+		query = (
+			jingrow.qb.from_(app_user_review)
+			.join(user)
+			.on(user.name == app_user_review.reviewer)
+			.select(
+				app_user_review.name,
+				app_user_review.title,
+				Cast_(5 * app_user_review.rating, "INT").as_("rating"),
+				app_user_review.review,
+				app_user_review.creation,
+				app_user_review.reviewer,
+				user.full_name.as_("user_name"),
+			)
+			.where(app_user_review.app == self.name)
+		)
+		return query.run(as_dict=True)
+
+	def get_user_ratings_summary(self, reviews: list) -> dict:
+		total_num_reviews = len(reviews)
+		avg_rating = 0.0
+
+		if len(reviews) > 0:
+			avg_rating = sum([r.rating for r in reviews]) / len(reviews)
+			avg_rating = jingrow.utils.rounded(avg_rating, 1)
+
+		rating_percentages = get_rating_percentage_distribution(reviews)
+
+		return {
+			"total_num_reviews": total_num_reviews,
+			"avg_rating": avg_rating,
+			"rating_percentages": rating_percentages,
+		}
+
+	def get_deploy_information(self):
+		"""Return the deploy information this marketplace app"""
+		# Public Release Groups, Benches
+		# Is on release group, but not on bench -> awaiting deploy
+		deploy_info = {}
+
+		for source in self.sources:
+			version = source.version
+			deploy_info[version] = "Not Deployed"
+
+			release_groups = jingrow.get_all(
+				"Release Group", filters={"public": 1, "version": version}, pluck="name"
+			)
+
+			for rg_name in release_groups:
+				release_group = jingrow.get_pg("Release Group", rg_name)
+				sources_on_rg = [a.source for a in release_group.apps]
+
+				latest_active_bench = get_last_pg("Bench", filters={"status": "Active", "group": rg_name})
+
+				if latest_active_bench:
+					sources_on_bench = [a.source for a in latest_active_bench.apps]
+					if source.source in sources_on_bench:
+						# Is deployed on a bench
+						deploy_info[version] = "Deployed"
+
+				if (source.source in sources_on_rg) and (deploy_info[version] != "Deployed"):
+					# Added to release group, but not yet deployed to a bench
+					deploy_info[version] = "Awaiting Deploy"
+
+		return deploy_info
+
+	def total_installs(self):
+		return jingrow.db.count("Site App", filters={"app": self.app})
+
+	def total_active_sites(self):
+		return jingrow.db.sql(
+			"""
+			SELECT
+				count(*)
+			FROM
+				tabSite site
+			LEFT JOIN
+				`tabSite App` app
+			ON
+				app.parent = site.name
+			WHERE
+				site.status = "Active" AND app.app = %s
+		""",
+			(self.app,),
+		)[0][0]
+
+	def total_active_benches(self):
+		return jingrow.db.sql(
+			"""
+			SELECT
+				count(*)
+			FROM
+				tabBench bench
+			LEFT JOIN
+				`tabBench App` app
+			ON
+				app.parent = bench.name
+			WHERE
+				bench.status = "Active" AND app.app = %s
+		""",
+			(self.app,),
+		)[0][0]
+
+	def get_payout_amount(self, status: str = "", total_for: str = "net_amount"):
+		"""Return the payout amount for this app"""
+		filters = {"recipient": self.team}
+		if status:
+			filters["status"] = status
+		payout_orders = jingrow.get_all("Payout Order", filters=filters, pluck="name")
+		payout = jingrow.get_all(
+			"Payout Order Item",
+			filters={"parent": ("in", payout_orders), "document_name": self.name},
+			fields=[
+				f"SUM(CASE WHEN currency = 'USD' THEN {total_for} ELSE 0 END) AS usd_amount",
+				f"SUM(CASE WHEN currency = 'CNY' THEN {total_for} ELSE 0 END) AS cny_amount",
+			],
+		)
+		return payout[0] if payout else {"usd_amount": 0, "cny_amount": 0}
+
+	@dashboard_whitelist()
+	def site_installs(self):
+		site = jingrow.qb.PageType("Site")
+		site_app = jingrow.qb.PageType("Site App")
+		site_plan = jingrow.qb.PageType("Site Plan")
+		team = jingrow.qb.PageType("Team")
+
+		query = (
+			jingrow.qb.from_(site)
+			.left_join(team)
+			.on(team.name == site.team)
+			.left_outer_join(site_app)
+			.on(site.name == site_app.parent)
+			.left_outer_join(site_plan)
+			.on(site_app.plan == site_plan.name)
+			.select(site.name, site.plan, team.user)
+			.where((site.status == "Active") & (site_app.app == self.app) & (site_plan.price_usd >= 0))
+		)
+		return query.run(as_dict=True)
+
+	@dashboard_whitelist()
+	def listing_details(self):
+		return {
+			"support": self.support,
+			"website": self.website,
+			"documentation": self.documentation,
+			"privacy_policy": self.privacy_policy,
+			"terms_of_service": self.terms_of_service,
+			"description": self.description,
+			"long_description": self.long_description,
+			"screenshots": [screenshot.image for screenshot in self.screenshots],
+		}
+
+	@dashboard_whitelist()
+	def mark_app_ready_for_review(self):
+		# TODO: Start security check and auto deploy process here
+		self.review_stage = "Ready for Review"
+		self.save()
+
+	@dashboard_whitelist()
+	def update_listing(self, *args):
+		data = jingrow._dict(args[0])
+		self.title = data.get("title") or self.title
+		self.description = data.get("description")
+		self.long_description = data.get("long_description")
+		self.support = data.get("support")
+		self.website = data.get("website")
+		self.documentation = data.get("documentation")
+		self.privacy_policy = data.get("privacy_policy")
+		self.terms_of_service = data.get("terms_of_service")
+		self.save()
+
+	def get_analytics(self):
+		today = jingrow.utils.today()
+		last_week = jingrow.utils.add_days(today, -7)
+
+		return {
+			"total_installs": self.total_installs(),
+			"installs_active_sites": self.total_active_sites(),
+			"installs_active_benches": self.total_active_benches(),
+			"installs_last_week": jingrow.db.count(
+				"Site Activity",
+				{
+					"action": "Install App",
+					"reason": self.app,
+					"creation": (">=", last_week),
+				},
+			),
+			"total_payout": self.get_payout_amount(),
+			"paid_payout": self.get_payout_amount(status="Paid"),
+			"pending_payout": self.get_payout_amount(status="Draft"),
+			"commission": self.get_payout_amount(total_for="commission"),
+		}
+
+	def get_plans(self, jingrow_version: str | None = None) -> list:
+		return get_plans_for_app(self.name, jingrow_version)
+
+	def can_charge_for_subscription(self, subscription):
+		return subscription.enabled == 1 and subscription.team and subscription.team != "Administrator"
+
+
+def get_plans_for_app(
+	app_name, jingrow_version=None, include_free=True, include_disabled=False
+):  # Unused for now, might use later
+	plans = []
+	filters = {"app": app_name}
+
+	if not include_free:
+		filters["is_free"] = False
+
+	if not include_disabled:
+		filters["enabled"] = True
+
+	marketplace_app_plans = jingrow.get_all(
+		"Marketplace App Plan",
+		filters=filters,
+		fields=[
+			"name",
+			"title",
+			"enabled",
+			"price_cny",
+			"price_usd",
+		],
+	)
+
+	for app_plan in marketplace_app_plans:
+		plan_data = {}
+		plan_data.update(app_plan)
+		plan_data["features"] = get_app_plan_features(app_plan.name)
+		plans.append(plan_data)
+
+	plans.sort(key=lambda x: x["price_usd"])
+	plans.sort(key=lambda x: x["enabled"], reverse=True)  # Enabled Plans First
+
+	return plans
+
+
+def marketplace_app_hook(app=None, site: Site | None = None, op="install"):
+	if app is None:
+		site_apps = jingrow.get_all("Site App", filters={"parent": site.name}, pluck="app")
+		for app in site_apps:
+			run_script(app, site, op)
+	else:
+		run_script(app, site, op)
+
+
+def get_script_name(app, op):
+	if op == "install" and jingrow.db.get_value("Marketplace App", app, "run_after_install_script"):
+		return "after_install_script"
+
+	if op == "uninstall" and jingrow.db.get_value("Marketplace App", app, "run_after_uninstall_script"):
+		return "after_uninstall_script"
+	return ""
+
+
+def run_script(app, site: Site, op):
+	script = get_script_name(app, op)
+	if script:
+		script = jingrow.db.get_value("Marketplace App", app, script)
+		local = {"pg": site}
+		safe_exec(script, _locals=local)
+
+
+@redis_cache(ttl=60 * 60 * 24)
+def get_total_installs_by_app():
+	total_installs = jingrow.db.get_all(
+		"Site App",
+		fields=["app", "count(*) as count"],
+		group_by="app",
+		order_by=None,
+	)
+	return {installs["app"]: installs["count"] for installs in total_installs}
diff --git a/jcloud/jcloud/pagetype/marketplace_app/patches/change_field_from_first_site_creation_to_site_creation.py b/jcloud/jcloud/pagetype/marketplace_app/patches/change_field_from_first_site_creation_to_site_creation.py
new file mode 100644
index 0000000..981ae66
--- /dev/null
+++ b/jcloud/jcloud/pagetype/marketplace_app/patches/change_field_from_first_site_creation_to_site_creation.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2024, JINGROW
+# For license information, please see license.txt
+
+import jingrow
+import pymysql
+
+
+def execute():
+	try:
+		jingrow.db.sql(
+			"UPDATE `tabMarketplace App` SET show_for_site_creation = show_for_first_site_creation"
+		)
+	except pymysql.err.OperationalError:
+		pass
diff --git a/jcloud/jcloud/pagetype/marketplace_app/patches/convert_images_to_webp.py b/jcloud/jcloud/pagetype/marketplace_app/patches/convert_images_to_webp.py
new file mode 100644
index 0000000..4fb53d2
--- /dev/null
+++ b/jcloud/jcloud/pagetype/marketplace_app/patches/convert_images_to_webp.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2024, JINGROW
+# For license information, please see license.txt
+
+from io import BytesIO
+
+import jingrow
+import requests
+from PIL import Image
+from tqdm import tqdm
+
+
+def execute():
+	IMAGE_FORMATS_TO_CONVERT = ["png", "jpeg", "jpg"]
+
+	def convert_to_webp(screenshot):
+		if screenshot.startswith("files") or screenshot.startswith("/files"):
+			image_content = jingrow.get_pg("File", {"file_url": screenshot}).get_content()
+			image = Image.open(BytesIO(image_content))
+		else:
+			# load from url
+			url = screenshot
+			response = requests.get(url, stream=True)
+			image = Image.open(response.raw)
+
+		image = image.convert("RGB")
+		filename = f"{screenshot.split('/')[-1].split('.')[0]}.webp"
+
+		# convert to bytes
+		image_bytes = BytesIO()
+		image.save(image_bytes, "webp")
+		image_bytes = image_bytes.getvalue()
+		_file = jingrow.get_pg(
+			{
+				"pagetype": "File",
+				"attached_to_field": "image",
+				"folder": "Home/Attachments",
+				"file_name": filename,
+				"is_private": 0,
+				"content": image_bytes,
+			}
+		)
+		_file.save(ignore_permissions=True)
+		return _file.file_url
+
+	marketplace_app_names = jingrow.get_all("Marketplace App", pluck="name")
+
+	for app_name in tqdm(marketplace_app_names):
+		app = jingrow.get_pg("Marketplace App", app_name)
+
+		if app.image and app.image.split(".")[-1] in IMAGE_FORMATS_TO_CONVERT:
+			app.image = convert_to_webp(app.image)
+
+		screenshots = app.screenshots
+
+		for screenshot in screenshots:
+			if screenshot.image.split(".")[-1] not in IMAGE_FORMATS_TO_CONVERT:
+				continue
+
+			screenshot.image = convert_to_webp(screenshot.image)
+
+		app.save()
diff --git a/jcloud/jcloud/pagetype/marketplace_app/templates/marketplace_app.html b/jcloud/jcloud/pagetype/marketplace_app/templates/marketplace_app.html
new file mode 100644
index 0000000..557d468
--- /dev/null
+++ b/jcloud/jcloud/pagetype/marketplace_app/templates/marketplace_app.html
@@ -0,0 +1,296 @@
+{% extends "templates/marketplace/base.html" %}
+{%- from "templates/marketplace/macros.html" import button, link, breadcrumbs, badge_gray, badge_green,
+five_star_rating, approved_badge -%}
+
+{%- block title -%}
+{{ app.title }} - Jingrow Marketplace
+{%- endblock -%}
+
+{%- block content -%}
+
+
+
+
+ {{ breadcrumbs([ { 'label': 'Apps', 'url': '/marketplace' }, { 'label': + app.title, 'url': '' } ]) }} +
+
+
+ {{ app_image(app, 'hidden md:block') }} +
{{ app_image(app) }}
+
+

+ {{ app.title }} + {%- if app.jingrow_approved -%} +
+ {{ approved_badge() }} +
+ {%- endif -%} +

+

{{ app.description }}

+ +
+
+ +
+ {{ button('Install Now', '/dashboard/install-app/' + app.name, 'primary') }} +
+ + {{ no_of_installs | number_k_format }} {{ 'install' if no_of_installs + == 1 else 'installs' }} +
+
+
+
+
+
+
+ {{ sidebar() }} {{ main() }} +
+
+
+{%- endblock -%} + + +{% macro sidebar() %} + +{% endmacro %} + + +{% macro main() %} +
+ + {%- if app.screenshots -%} +
+ {%- for image in app.screenshots -%} + + {%- endfor -%} +
+ {%- endif -%} +
+ {{ jingrow.utils.md_to_html(app.long_description) }} +
+ + +
+
+
+

User Reviews

+ {{ button('Write a review', link='/dashboard/user-review/' + app.name) }} +
+ + {%- if (user_reviews | length) > 0 -%} +
+
+

{{ ratings_summary.avg_rating }}

+

+ {{ ratings_summary.total_num_reviews }} + {{ 'rating' if ratings_summary.total_num_reviews == 1 else 'ratings'}} +

+
+ {{ five_star_rating(ratings_summary.avg_rating) }} +
+

{{ ratings_summary.avg_rating }} out of 5

+
+ +
+ {%for i in range(5, 0, -1) %} +
+

{{i}} Star

+
+
+
+

{{ ratings_summary.rating_percentages[i] }}%

+
+ {% endfor %} +
+
+ + +
+ {% for review in user_reviews %} +
+
+

+ {{ review.title }} +

+
+ {{ five_star_rating(review.rating) }} +
+
+

+ {{ review.review }} +

+
+ {{ review.user_name }} • {{ jingrow.utils.pretty_date(review.creation) }} + • {{ link('Reply', url='/dashboard/developer-reply/' + app.name + '/' + review.name) }} +
+ {% for reply in review.developer_reply %} +
+ + + +
+ {% if reply[0] == ',' %} + {{ reply[1:] }} + {% else %} + {{ reply }} + {% endif %} + {% if reply %} +
Developer + {% endif %} +
+
+ {% endfor %} +
+
+
+ {% endfor %} +
+ {%- else -%} +
+

No reviews yet, be the first to review.

+
+ {%- endif -%} +
+
+
+{% endmacro %} + + +{% macro app_image(app, class='') %} +
+ {%- if app.image -%} + {{ app.title }} + {%- else -%} +
+ {{ app.title[0].upper() }} +
+ {%- endif -%} +
+{% endmacro %} + + +{% macro app_plans_list(plans) %} +
+ {%- for plan in plans -%} + {{ app_plan_card(plan) }} + {%- endfor -%} +
+{% endmacro %} + + +{% macro app_plan_card(plan) %} +
+
+

+ {%- if plan.is_free -%} + Free + {%- else -%} + + + ${{ jingrow.utils.rounded(plan.price_usd) }} + /mo + + {%- endif -%} +

+ +
+ {{ feature_list(plan.features) }} +
+
+ +
+ {{ button('Buy', '/dashboard/install-app/' + app.name, 'primary') }} +
+
+{% endmacro %} + + +{% macro feature_list(features) %} +
    + {%- for feature in features -%} +
  • +
    + +
    + + + +
    +
    + {{ feature }} +
  • + {%- endfor -%} +
+{% endmacro %} diff --git a/jcloud/jcloud/pagetype/marketplace_app/templates/marketplace_app_row.html b/jcloud/jcloud/pagetype/marketplace_app/templates/marketplace_app_row.html new file mode 100644 index 0000000..8ef1ac6 --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app/templates/marketplace_app_row.html @@ -0,0 +1,4 @@ + + diff --git a/jcloud/jcloud/pagetype/marketplace_app/test_marketplace_app.py b/jcloud/jcloud/pagetype/marketplace_app/test_marketplace_app.py new file mode 100644 index 0000000..31d28ca --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app/test_marketplace_app.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + +import unittest +from typing import Optional + +import jingrow + +from jcloud.jcloud.pagetype.marketplace_app.utils import ( + get_rating_percentage_distribution, + number_k_format, +) + + +def create_test_marketplace_app( + app: str, team: Optional[str] = None, sources: Optional[list[dict]] = None +): + marketplace_app = jingrow.get_pg( + { + "pagetype": "Marketplace App", + "app": app, + "description": "Test App", + "team": team, + "sources": sources, + } + ).insert(ignore_if_duplicate=True) + marketplace_app.reload() + return marketplace_app + + +class TestMarketplaceApp(unittest.TestCase): + def test_number_format_util(self): + test_cases_map = { + 0: "0", + 10: "10", + 999: "999", + 1000: "1k", + 8100: "8.1k", + 8900: "8.9k", + 8990: "9k", + 7102: "7.1k", + 10031: "10k", + 708609: "708.6k", + } + + for input_value, expected_output in test_cases_map.items(): + self.assertEqual(number_k_format(input_value), expected_output) + + def test_rating_percentage_distribution(self): + test_table = [ + { + "test_reviews": [{"rating": 4}, {"rating": 5}, {"rating": 1}], + "expected_result": {1: 33, 2: 0, 3: 0, 4: 33, 5: 33}, + }, + { + "test_reviews": [{"rating": 5}, {"rating": 5}, {"rating": 5}], + "expected_result": {1: 0, 2: 0, 3: 0, 4: 0, 5: 100}, + }, + { + "test_reviews": [], + "expected_result": {1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + }, + ] + + for test_case in test_table: + test_reviews = test_case["test_reviews"] + test_reviews = [jingrow._dict(r) for r in test_reviews] + got = get_rating_percentage_distribution(test_reviews) + + self.assertDictEqual(got, test_case["expected_result"]) diff --git a/jcloud/jcloud/pagetype/marketplace_app/utils.py b/jcloud/jcloud/pagetype/marketplace_app/utils.py new file mode 100644 index 0000000..9937c2f --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app/utils.py @@ -0,0 +1,42 @@ +# Copyright (c) 2021, JINGROW +from typing import Dict, List + +import jingrow + + +def number_k_format(number: int): + """Returns a '101.6k' like string representation""" + if number < 1000: + return str(number) + else: + value = jingrow.utils.rounded(number / 1000, precision=1) + + # To handle cases like 8.0, 9.0 etc. + if value == (number // 1000): + value = int(value) + # To handle cases like 8999 -> 9k and not 9.0k + elif (value - 1) == (number // 1000): + value = int(value) + + return f"{value}k" + + +def get_rating_percentage_distribution(reviews: List) -> Dict: + """ + Takes a list of app reviews and returns percentage distribution for ratings 1-5 + """ + total_num_reviews = len(reviews) + rating_frequencies = dict((i, 0) for i in range(1, 6)) + + for review in reviews: + rating_frequencies[review.rating] += 1 + + if total_num_reviews > 0: + rating_percentages = dict( + (k, (v * 100 // total_num_reviews)) for k, v in rating_frequencies.items() + ) + return rating_percentages + else: + # To handle the case when no reviews are present + # All ratings are 0% + return rating_frequencies diff --git a/jcloud/jcloud/pagetype/marketplace_app_categories/__init__.py b/jcloud/jcloud/pagetype/marketplace_app_categories/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/marketplace_app_categories/marketplace_app_categories.js b/jcloud/jcloud/pagetype/marketplace_app_categories/marketplace_app_categories.js new file mode 100644 index 0000000..392ec21 --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_categories/marketplace_app_categories.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Marketplace App Categories', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/marketplace_app_categories/marketplace_app_categories.json b/jcloud/jcloud/pagetype/marketplace_app_categories/marketplace_app_categories.json new file mode 100644 index 0000000..546d833 --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_categories/marketplace_app_categories.json @@ -0,0 +1,32 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-02-22 14:07:05.005498", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "category" + ], + "fields": [ + { + "fieldname": "category", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Category", + "options": "Marketplace App Category", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-02-23 14:57:18.196374", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Marketplace App Categories", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/marketplace_app_categories/marketplace_app_categories.py b/jcloud/jcloud/pagetype/marketplace_app_categories/marketplace_app_categories.py new file mode 100644 index 0000000..c2cda9c --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_categories/marketplace_app_categories.py @@ -0,0 +1,23 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class MarketplaceAppCategories(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + category: DF.Link + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/marketplace_app_categories/test_marketplace_app_categories.py b/jcloud/jcloud/pagetype/marketplace_app_categories/test_marketplace_app_categories.py new file mode 100644 index 0000000..4dd0abb --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_categories/test_marketplace_app_categories.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestMarketplaceAppCategories(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/marketplace_app_category/__init__.py b/jcloud/jcloud/pagetype/marketplace_app_category/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/marketplace_app_category/marketplace_app_category.js b/jcloud/jcloud/pagetype/marketplace_app_category/marketplace_app_category.js new file mode 100644 index 0000000..96d81eb --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_category/marketplace_app_category.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Marketplace App Category', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/marketplace_app_category/marketplace_app_category.json b/jcloud/jcloud/pagetype/marketplace_app_category/marketplace_app_category.json new file mode 100644 index 0000000..f5642ef --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_category/marketplace_app_category.json @@ -0,0 +1,52 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "Prompt", + "creation": "2020-09-03 00:13:41.932937", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "description", + "slug" + ], + "fields": [ + { + "fieldname": "description", + "fieldtype": "Small Text", + "label": "Description" + }, + { + "fieldname": "slug", + "fieldtype": "Data", + "label": "Slug" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-03-14 16:39:22.547107", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Marketplace App Category", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/marketplace_app_category/marketplace_app_category.py b/jcloud/jcloud/pagetype/marketplace_app_category/marketplace_app_category.py new file mode 100644 index 0000000..594297c --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_category/marketplace_app_category.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow.model.document import Document +from jingrow.website.utils import cleanup_page_name + + +class MarketplaceAppCategory(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + description: DF.SmallText | None + slug: DF.Data | None + # end: auto-generated types + + def before_insert(self): + self.slug = cleanup_page_name(self.name) diff --git a/jcloud/jcloud/pagetype/marketplace_app_category/test_marketplace_app_category.py b/jcloud/jcloud/pagetype/marketplace_app_category/test_marketplace_app_category.py new file mode 100644 index 0000000..d53e5be --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_category/test_marketplace_app_category.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestMarketplaceAppCategory(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/marketplace_app_screenshot/marketplace_app_screenshot.json b/jcloud/jcloud/pagetype/marketplace_app_screenshot/marketplace_app_screenshot.json new file mode 100644 index 0000000..908c5ce --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_screenshot/marketplace_app_screenshot.json @@ -0,0 +1,37 @@ +{ + "actions": [], + "creation": "2021-09-02 12:52:39.587791", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "image", + "caption" + ], + "fields": [ + { + "fieldname": "image", + "fieldtype": "Attach", + "in_list_view": 1, + "label": "Image" + }, + { + "fieldname": "caption", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Caption" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2021-09-02 13:21:05.209666", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Marketplace App Screenshot", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/marketplace_app_screenshot/marketplace_app_screenshot.py b/jcloud/jcloud/pagetype/marketplace_app_screenshot/marketplace_app_screenshot.py new file mode 100644 index 0000000..fda3fcb --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_screenshot/marketplace_app_screenshot.py @@ -0,0 +1,24 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class MarketplaceAppScreenshot(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + caption: DF.Data | None + image: DF.Attach | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/marketplace_app_version/__init__.py b/jcloud/jcloud/pagetype/marketplace_app_version/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/marketplace_app_version/marketplace_app_version.json b/jcloud/jcloud/pagetype/marketplace_app_version/marketplace_app_version.json new file mode 100644 index 0000000..ccfa7f9 --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_version/marketplace_app_version.json @@ -0,0 +1,41 @@ +{ + "actions": [], + "creation": "2021-07-19 12:11:08.469343", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "version", + "source" + ], + "fields": [ + { + "fieldname": "version", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Version", + "options": "Jingrow Version", + "reqd": 1 + }, + { + "fieldname": "source", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Source", + "options": "App Source", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2021-07-19 12:11:08.469343", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Marketplace App Version", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/marketplace_app_version/marketplace_app_version.py b/jcloud/jcloud/pagetype/marketplace_app_version/marketplace_app_version.py new file mode 100644 index 0000000..a780b96 --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_app_version/marketplace_app_version.py @@ -0,0 +1,24 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class MarketplaceAppVersion(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + source: DF.Link + version: DF.Link + # end: auto-generated types + + dashboard_fields = ["name", "version", "source"] diff --git a/jcloud/jcloud/pagetype/marketplace_localisation_app/__init__.py b/jcloud/jcloud/pagetype/marketplace_localisation_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/marketplace_localisation_app/marketplace_localisation_app.json b/jcloud/jcloud/pagetype/marketplace_localisation_app/marketplace_localisation_app.json new file mode 100644 index 0000000..7acf21b --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_localisation_app/marketplace_localisation_app.json @@ -0,0 +1,38 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-09-11 22:58:01.233719", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "marketplace_app", + "country" + ], + "fields": [ + { + "fieldname": "marketplace_app", + "fieldtype": "Link", + "label": "Marketplace App", + "options": "Marketplace App" + }, + { + "fieldname": "country", + "fieldtype": "Link", + "label": "Country", + "options": "Country" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-09-13 10:16:22.693868", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Marketplace Localisation App", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/marketplace_localisation_app/marketplace_localisation_app.py b/jcloud/jcloud/pagetype/marketplace_localisation_app/marketplace_localisation_app.py new file mode 100644 index 0000000..2e2318f --- /dev/null +++ b/jcloud/jcloud/pagetype/marketplace_localisation_app/marketplace_localisation_app.py @@ -0,0 +1,24 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class MarketplaceLocalisationApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + country: DF.Link | None + marketplace_app: DF.Link | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/module_setup_guide/__init__.py b/jcloud/jcloud/pagetype/module_setup_guide/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/module_setup_guide/module_setup_guide.js b/jcloud/jcloud/pagetype/module_setup_guide/module_setup_guide.js new file mode 100644 index 0000000..b80b72e --- /dev/null +++ b/jcloud/jcloud/pagetype/module_setup_guide/module_setup_guide.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Module Setup Guide', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/module_setup_guide/module_setup_guide.json b/jcloud/jcloud/pagetype/module_setup_guide/module_setup_guide.json new file mode 100644 index 0000000..95257fd --- /dev/null +++ b/jcloud/jcloud/pagetype/module_setup_guide/module_setup_guide.json @@ -0,0 +1,45 @@ +{ + "actions": [], + "creation": "2020-06-02 14:20:54.808451", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "enabled", + "industry", + "setup_guide" + ], + "fields": [ + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "setup_guide", + "fieldtype": "Attach", + "in_list_view": 1, + "label": "Setup Guide" + }, + { + "fieldname": "industry", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Industry", + "options": "\nManufacturing\nDistribution\nRetail\nServices\nEducation\nHealthcare\nNon Profit\nOther", + "reqd": 1 + } + ], + "istable": 1, + "links": [], + "modified": "2021-04-22 05:29:06.620370", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Module Setup Guide", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/module_setup_guide/module_setup_guide.py b/jcloud/jcloud/pagetype/module_setup_guide/module_setup_guide.py new file mode 100644 index 0000000..dfa6a38 --- /dev/null +++ b/jcloud/jcloud/pagetype/module_setup_guide/module_setup_guide.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class ModuleSetupGuide(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + enabled: DF.Check + industry: DF.Literal[ + "", + "Manufacturing", + "Distribution", + "Retail", + "Services", + "Education", + "Healthcare", + "Non Profit", + "Other", + ] + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + setup_guide: DF.Attach | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/module_setup_guide/test_module_setup_guide.py b/jcloud/jcloud/pagetype/module_setup_guide/test_module_setup_guide.py new file mode 100644 index 0000000..676a20f --- /dev/null +++ b/jcloud/jcloud/pagetype/module_setup_guide/test_module_setup_guide.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestModuleSetupGuide(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/monitor_server/__init__.py b/jcloud/jcloud/pagetype/monitor_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/monitor_server/monitor_server.js b/jcloud/jcloud/pagetype/monitor_server/monitor_server.js new file mode 100644 index 0000000..3145480 --- /dev/null +++ b/jcloud/jcloud/pagetype/monitor_server/monitor_server.js @@ -0,0 +1,66 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Monitor Server', { + refresh: function (frm) { + [ + [__('Ping Agent'), 'ping_agent', false, frm.pg.is_server_setup], + [__('Ping Ansible'), 'ping_ansible', true], + [__('Ping Ansible Unprepared'), 'ping_ansible_unprepared', true], + [__('Update Agent'), 'update_agent', true, frm.pg.is_server_setup], + [__('Prepare Server'), 'prepare_server', true, !frm.pg.is_server_setup], + [__('Setup Server'), 'setup_server', true, !frm.pg.is_server_setup], + [__('Archive'), 'archive', true, frm.pg.provider === 'AWS EC2'], + [ + __('Reconfigure Monitor Server'), + 'reconfigure_monitor_server', + true, + frm.pg.is_server_setup, + ], + [ + __('Fetch Keys'), + 'fetch_keys', + false, + frm.pg.is_server_setup && + (!frm.pg.jingrow_public_key || !frm.pg.root_public_key), + ], + [ + __('Show Grafana Password'), + 'show_grafana_password', + false, + frm.pg.is_server_setup, + ], + [__('Update TLS Certificate'), 'update_tls_certificate', true], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/monitor_server/monitor_server.json b/jcloud/jcloud/pagetype/monitor_server/monitor_server.json new file mode 100644 index 0000000..9290edd --- /dev/null +++ b/jcloud/jcloud/pagetype/monitor_server/monitor_server.json @@ -0,0 +1,263 @@ +{ + "actions": [], + "creation": "2021-03-30 16:42:06.785344", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "hostname", + "domain", + "column_break_4", + "cluster", + "provider", + "virtual_machine", + "is_server_setup", + "networking_section", + "ip", + "column_break_9", + "private_ip", + "private_mac_address", + "private_vlan_id", + "agent_section", + "agent_password", + "grafana_section", + "grafana_username", + "default_server", + "column_break_ilpd", + "grafana_password", + "node_exporter_dashboard_path", + "ssh_section", + "jingrow_user_password", + "jingrow_public_key", + "column_break_20", + "root_public_key", + "monitoring_section", + "monitoring_password", + "column_break_nzet", + "prometheus_data_directory" + ], + "fields": [ + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nInstalling\nActive\nBroken\nArchived", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "hostname", + "fieldtype": "Data", + "label": "Hostname", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "label": "Domain", + "options": "Root Domain", + "read_only": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "default": "Generic", + "fieldname": "provider", + "fieldtype": "Select", + "label": "Provider", + "options": "Generic\nScaleway\nAWS EC2\nOCI", + "set_only_once": 1 + }, + { + "default": "0", + "fieldname": "is_server_setup", + "fieldtype": "Check", + "label": "Server Setup", + "read_only": 1 + }, + { + "fieldname": "networking_section", + "fieldtype": "Section Break", + "label": "Networking" + }, + { + "fetch_from": "virtual_machine.public_ip_address", + "fieldname": "ip", + "fieldtype": "Data", + "in_list_view": 1, + "label": "IP", + "set_only_once": 1 + }, + { + "fieldname": "column_break_9", + "fieldtype": "Column Break" + }, + { + "fetch_from": "virtual_machine.private_ip_address", + "fieldname": "private_ip", + "fieldtype": "Data", + "label": "Private IP", + "reqd": 1, + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_mac_address", + "fieldtype": "Data", + "label": "Private Mac Address", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_vlan_id", + "fieldtype": "Data", + "label": "Private VLAN ID", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "fieldname": "agent_section", + "fieldtype": "Section Break", + "label": "Agent" + }, + { + "fieldname": "agent_password", + "fieldtype": "Password", + "label": "Agent Password", + "set_only_once": 1 + }, + { + "fieldname": "grafana_section", + "fieldtype": "Section Break", + "label": "Grafana" + }, + { + "fieldname": "grafana_password", + "fieldtype": "Password", + "label": "Grafana Password", + "set_only_once": 1 + }, + { + "fieldname": "ssh_section", + "fieldtype": "Section Break", + "label": "SSH" + }, + { + "fieldname": "jingrow_user_password", + "fieldtype": "Password", + "label": "Jingrow User Password", + "set_only_once": 1 + }, + { + "fieldname": "jingrow_public_key", + "fieldtype": "Code", + "label": "Jingrow Public Key", + "read_only": 1 + }, + { + "fieldname": "column_break_20", + "fieldtype": "Column Break" + }, + { + "fieldname": "root_public_key", + "fieldtype": "Code", + "label": "Root Public Key", + "read_only": 1 + }, + { + "fieldname": "monitoring_section", + "fieldtype": "Section Break", + "label": "Monitoring", + "set_only_once": 1 + }, + { + "fieldname": "monitoring_password", + "fieldtype": "Password", + "label": "Monitoring Password", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.provider === \"AWS EC2\"", + "fieldname": "virtual_machine", + "fieldtype": "Link", + "label": "Virtual Machine", + "mandatory_depends_on": "eval:pg.provider === \"AWS EC2\"", + "options": "Virtual Machine" + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster" + }, + { + "fieldname": "column_break_nzet", + "fieldtype": "Column Break" + }, + { + "default": "/home/jingrow/prometheus/data", + "fieldname": "prometheus_data_directory", + "fieldtype": "Data", + "label": "Prometheus Data Directory" + }, + { + "fieldname": "grafana_username", + "fieldtype": "Data", + "label": "Grafana Username" + }, + { + "fieldname": "column_break_ilpd", + "fieldtype": "Column Break" + }, + { + "description": "This is the server that is selected by default in node exporter dashboard due to sorting. Used by incident detection to collect data.", + "fieldname": "default_server", + "fieldtype": "Data", + "label": "Default Server" + }, + { + "description": "Begin with / but don't end with /", + "fieldname": "node_exporter_dashboard_path", + "fieldtype": "Data", + "label": "Node Exporter Dashboard Path" + } + ], + "links": [ + { + "link_pagetype": "Ansible Play", + "link_fieldname": "server" + } + ], + "modified": "2025-01-24 00:35:26.093683", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Monitor Server", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/monitor_server/monitor_server.py b/jcloud/jcloud/pagetype/monitor_server/monitor_server.py new file mode 100644 index 0000000..64af11b --- /dev/null +++ b/jcloud/jcloud/pagetype/monitor_server/monitor_server.py @@ -0,0 +1,204 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import json + +import jingrow + +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class MonitorServer(BaseServer): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + agent_password: DF.Password | None + cluster: DF.Link | None + default_server: DF.Data | None + domain: DF.Link | None + jingrow_public_key: DF.Code | None + jingrow_user_password: DF.Password | None + grafana_password: DF.Password | None + grafana_username: DF.Data | None + hostname: DF.Data + ip: DF.Data | None + is_server_setup: DF.Check + monitoring_password: DF.Password | None + node_exporter_dashboard_path: DF.Data | None + private_ip: DF.Data + private_mac_address: DF.Data | None + private_vlan_id: DF.Data | None + prometheus_data_directory: DF.Data | None + provider: DF.Literal["Generic", "Scaleway", "AWS EC2", "OCI"] + root_public_key: DF.Code | None + status: DF.Literal["Pending", "Installing", "Active", "Broken", "Archived"] + virtual_machine: DF.Link | None + # end: auto-generated types + + def validate(self): + self.validate_agent_password() + self.validate_grafana_password() + self.validate_monitoring_password() + + def validate_monitoring_password(self): + if not self.monitoring_password: + self.monitoring_password = jingrow.generate_hash() + + def validate_grafana_password(self): + if not self.grafana_password: + self.grafana_password = jingrow.generate_hash(length=32) + + def _setup_server(self): + agent_password = self.get_password("agent_password") + agent_repository_url = self.get_agent_repository_url() + monitoring_password = self.get_password("monitoring_password") + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": self.domain}, "name" + ) + certificate = jingrow.get_pg("TLS Certificate", certificate_name) + + registries = [] + for registry in jingrow.get_all("Registry Server"): + registry = jingrow.get_pg("Registry Server", registry.name) + registries.append( + { + "name": registry.name, + "monitoring_password": registry.get_password("monitoring_password"), + } + ) + + log_servers = [] + for log in jingrow.get_all("Log Server"): + log = jingrow.get_pg("Log Server", log.name) + log_servers.append( + { + "name": log.name, + "monitoring_password": log.get_password("monitoring_password"), + } + ) + + clusters = [] + for cluster in jingrow.get_all("Cluster"): + cluster = jingrow.get_pg("Cluster", cluster.name) + clusters.append( + { + "name": cluster.name, + "monitoring_password": cluster.get_password("monitoring_password"), + } + ) + jcloud_url = jingrow.utils.get_url() + settings = jingrow.get_single("Jcloud Settings") + monitor_token = settings.monitor_token + jcloud_monitoring_password = settings.get_password("jcloud_monitoring_password") + try: + ansible = Ansible( + playbook="monitor.yml", + server=self, + variables={ + "server": self.name, + "workers": 1, + "domain": self.domain, + "agent_password": agent_password, + "agent_repository_url": agent_repository_url, + "monitor": True, + "monitoring_password": monitoring_password, + "jcloud_monitoring_password": jcloud_monitoring_password, + "jcloud_app_server": jingrow.local.site, + "jcloud_db_server": f"db.{jingrow.local.site}", + "jcloud_db_replica_server": f"db2.{jingrow.local.site}" if jingrow.conf.replica_host else "", + "jcloud_url": jcloud_url, + "prometheus_data_directory": self.prometheus_data_directory, + "monitor_token": monitor_token, + "registries_json": json.dumps(registries), + "log_servers_json": json.dumps(log_servers), + "clusters_json": json.dumps(clusters), + "private_ip": self.private_ip, + "grafana_password": self.get_password("grafana_password"), + "certificate_private_key": certificate.private_key, + "certificate_full_chain": certificate.full_chain, + "certificate_intermediate_chain": certificate.intermediate_chain, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_server_setup = True + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Monitor Server Setup Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def reconfigure_monitor_server(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_reconfigure_monitor_server", queue="long", timeout=1200) + + def _reconfigure_monitor_server(self): + settings = jingrow.get_single("Jcloud Settings") + jcloud_monitoring_password = settings.get_password("jcloud_monitoring_password") + monitoring_password = self.get_password("monitoring_password") + + registries = [] + for registry in jingrow.get_all("Registry Server"): + registry = jingrow.get_pg("Registry Server", registry.name) + registries.append( + { + "name": registry.name, + "monitoring_password": registry.get_password("monitoring_password"), + } + ) + + log_servers = [] + for log in jingrow.get_all("Log Server"): + log = jingrow.get_pg("Log Server", log.name) + log_servers.append( + { + "name": log.name, + "monitoring_password": log.get_password("monitoring_password"), + } + ) + + clusters = [] + for cluster in jingrow.get_all("Cluster"): + cluster = jingrow.get_pg("Cluster", cluster.name) + clusters.append( + { + "name": cluster.name, + "monitoring_password": cluster.get_password("monitoring_password"), + } + ) + + try: + ansible = Ansible( + playbook="reconfigure_monitoring.yml", + server=self, + variables={ + "server": self.name, + "monitoring_password": monitoring_password, + "jcloud_monitoring_password": jcloud_monitoring_password, + "jcloud_app_server": jingrow.local.site, + "jcloud_db_server": f"db.{jingrow.local.site}", + "jcloud_db_replica_server": f"db2.{jingrow.local.site}" if jingrow.conf.replica_host else "", + "registries_json": json.dumps(registries), + "log_servers_json": json.dumps(log_servers), + "clusters_json": json.dumps(clusters), + "grafana_password": self.get_password("grafana_password"), + }, + ) + ansible.run() + except Exception: + log_error("Monitoring Server Setup Exception", server=self.as_dict()) + + @jingrow.whitelist() + def show_grafana_password(self): + return self.get_password("grafana_password") diff --git a/jcloud/jcloud/pagetype/monitor_server/test_monitor_server.py b/jcloud/jcloud/pagetype/monitor_server/test_monitor_server.py new file mode 100644 index 0000000..12ccf5c --- /dev/null +++ b/jcloud/jcloud/pagetype/monitor_server/test_monitor_server.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestMonitorServer(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/mpesa_payment_record/__init__.py b/jcloud/jcloud/pagetype/mpesa_payment_record/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/mpesa_payment_record/mpesa_payment_record.js b/jcloud/jcloud/pagetype/mpesa_payment_record/mpesa_payment_record.js new file mode 100644 index 0000000..b83c86c --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_payment_record/mpesa_payment_record.js @@ -0,0 +1,8 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Mpesa Payment Record", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/mpesa_payment_record/mpesa_payment_record.json b/jcloud/jcloud/pagetype/mpesa_payment_record/mpesa_payment_record.json new file mode 100644 index 0000000..be3e8a4 --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_payment_record/mpesa_payment_record.json @@ -0,0 +1,218 @@ +{ + "actions": [], + "allow_copy": 1, + "allow_import": 1, + "autoname": "MP.-.YY.-.MM.-.####", + "creation": "2025-01-18 10:49:30.186896", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "merchant_request_id", + "transaction_id", + "transaction_type", + "transaction_time", + "grand_total", + "amount", + "amount_usd", + "mpesa_receipt_number", + "exchange_rate", + "column_break_14", + "phone_number", + "payment_partner", + "invoice_number", + "posting_date", + "posting_time", + "default_currency", + "amended_from", + "balance_transaction", + "local_invoice" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "fieldname": "merchant_request_id", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Request ID", + "no_copy": 1, + "read_only": 1 + }, + { + "fieldname": "transaction_type", + "fieldtype": "Select", + "label": "Transaction Type", + "options": "\nMpesa Exjcloud\nMpesa C2B" + }, + { + "fieldname": "grand_total", + "fieldtype": "Currency", + "label": "累计 (Ksh)" + }, + { + "fieldname": "amount_usd", + "fieldtype": "Float", + "label": "Amount (USD)" + }, + { + "fieldname": "exchange_rate", + "fieldtype": "Float", + "label": "Exchange Rate", + "precision": "9" + }, + { + "fieldname": "column_break_14", + "fieldtype": "Column Break" + }, + { + "fieldname": "payment_partner", + "fieldtype": "Link", + "label": "Payment Partner", + "options": "Team" + }, + { + "fieldname": "invoice_number", + "fieldtype": "Data", + "label": "Invoice Number", + "no_copy": 1, + "read_only": 1 + }, + { + "default": "Today", + "fieldname": "posting_date", + "fieldtype": "Date", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Posting Date", + "no_copy": 1, + "read_only": 1 + }, + { + "default": "Now", + "fieldname": "posting_time", + "fieldtype": "Time", + "label": "Posting Time", + "no_copy": 1, + "read_only": 1 + }, + { + "default": "KES", + "fetch_from": "company.default_currency", + "fieldname": "default_currency", + "fieldtype": "Data", + "label": "Default Currency", + "read_only": 1 + }, + { + "fieldname": "amended_from", + "fieldtype": "Link", + "label": "Amended From", + "no_copy": 1, + "options": "Mpesa Payment Record", + "print_hide": 1, + "read_only": 1 + }, + { + "fieldname": "balance_transaction", + "fieldtype": "Link", + "label": "Balance Transaction", + "options": "Balance Transaction" + }, + { + "fieldname": "local_invoice", + "fieldtype": "Small Text", + "label": "Local Invoice" + }, + { + "fieldname": "transaction_id", + "fieldtype": "Data", + "label": "Transaction ID", + "no_copy": 1, + "read_only": 1 + }, + { + "fieldname": "transaction_time", + "fieldtype": "Datetime", + "label": "Transaction Time", + "no_copy": 1, + "read_only": 1 + }, + { + "fieldname": "amount", + "fieldtype": "Float", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Amount (Ksh)", + "no_copy": 1, + "read_only": 1 + }, + { + "fieldname": "mpesa_receipt_number", + "fieldtype": "Data", + "label": "Mpesa Receipt Number", + "no_copy": 1, + "read_only": 1 + }, + { + "fieldname": "phone_number", + "fieldtype": "Data", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Phone Number", + "no_copy": 1, + "read_only": 1 + } + ], + "in_create": 1, + "index_web_pages_for_search": 1, + "is_submittable": 1, + "links": [], + "modified": "2025-02-02 17:52:49.719724", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Mpesa Payment Record", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + } + ], + "show_preview_popup": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "transaction_id", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/mpesa_payment_record/mpesa_payment_record.py b/jcloud/jcloud/pagetype/mpesa_payment_record/mpesa_payment_record.py new file mode 100644 index 0000000..5eb4716 --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_payment_record/mpesa_payment_record.py @@ -0,0 +1,59 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + + +class MpesaPaymentRecord(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + amended_from: DF.Link | None + amount: DF.Float + amount_usd: DF.Float + balance_transaction: DF.Link | None + default_currency: DF.Data | None + exchange_rate: DF.Float + grand_total: DF.Currency + invoice_number: DF.Data | None + local_invoice: DF.SmallText | None + merchant_request_id: DF.Data | None + mpesa_receipt_number: DF.Data | None + payment_partner: DF.Link | None + phone_number: DF.Data | None + posting_date: DF.Date | None + posting_time: DF.Time | None + team: DF.Link | None + transaction_id: DF.Data | None + transaction_time: DF.Datetime | None + transaction_type: DF.Literal["", "Mpesa Exjcloud", "Mpesa C2B"] + # end: auto-generated types + + dashboard_fields = ( + "name", + "posting_date", + "amount", + "default_currency", + "local_invoice", + "amount_usd", + "payment_partner", + "exchange_rate", + "grand_total", + ) + + def before_insert(self): + self.validate_duplicate() + + def validate_duplicate(self): + if jingrow.db.exists( + "Mpesa Payment Record", + {"transaction_id": self.transaction_id, "docstatus": 1}, + ): + jingrow.throw(f"Mpesa Payment Record for transaction {self.transaction_id} already exists") diff --git a/jcloud/jcloud/pagetype/mpesa_payment_record/test_mpesa_payment_record.py b/jcloud/jcloud/pagetype/mpesa_payment_record/test_mpesa_payment_record.py new file mode 100644 index 0000000..650b4c6 --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_payment_record/test_mpesa_payment_record.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestMpesaPaymentRecord(UnitTestCase): + """ + Unit tests for MpesaPaymentRecord. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestMpesaPaymentRecord(IntegrationTestCase): + """ + Integration tests for MpesaPaymentRecord. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/mpesa_request_log/__init__.py b/jcloud/jcloud/pagetype/mpesa_request_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/mpesa_request_log/mpesa_request_log.js b/jcloud/jcloud/pagetype/mpesa_request_log/mpesa_request_log.js new file mode 100644 index 0000000..fe14521 --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_request_log/mpesa_request_log.js @@ -0,0 +1,8 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Mpesa Request Log", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/mpesa_request_log/mpesa_request_log.json b/jcloud/jcloud/pagetype/mpesa_request_log/mpesa_request_log.json new file mode 100644 index 0000000..1c9de3d --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_request_log/mpesa_request_log.json @@ -0,0 +1,138 @@ +{ + "actions": [], + "creation": "2025-01-18 10:47:18.786442", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "request_id", + "integration_request_service", + "is_remote_request", + "column_break_5", + "request_description", + "status", + "section_break_8", + "url", + "request_headers", + "data", + "response_section", + "output", + "error" + ], + "fields": [ + { + "fieldname": "request_id", + "fieldtype": "Data", + "label": "Request ID", + "read_only": 1 + }, + { + "fieldname": "integration_request_service", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Service", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "is_remote_request", + "fieldtype": "Check", + "label": "Is Remote Request?", + "read_only": 1 + }, + { + "fieldname": "column_break_5", + "fieldtype": "Column Break" + }, + { + "fieldname": "request_description", + "fieldtype": "Data", + "label": "Request Description", + "read_only": 1 + }, + { + "default": "Queued", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "\nQueued\nAuthorized\nCompleted\nCancelled\nFailed", + "read_only": 1 + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break" + }, + { + "fieldname": "url", + "fieldtype": "Small Text", + "label": "URL", + "read_only": 1 + }, + { + "fieldname": "request_headers", + "fieldtype": "Code", + "label": "Request Headers", + "read_only": 1 + }, + { + "fieldname": "data", + "fieldtype": "Code", + "label": "Request Data", + "read_only": 1 + }, + { + "fieldname": "response_section", + "fieldtype": "Section Break", + "label": "Response" + }, + { + "fieldname": "output", + "fieldtype": "Code", + "label": "Output", + "read_only": 1 + }, + { + "fieldname": "error", + "fieldtype": "Code", + "label": "Error", + "read_only": 1 + } + ], + "in_create": 1, + "links": [], + "modified": "2025-02-02 18:48:36.387604", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Mpesa Request Log", + "owner": "Administrator", + "permissions": [ + { + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "select": 1, + "share": 1 + }, + { + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "request_id", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/mpesa_request_log/mpesa_request_log.py b/jcloud/jcloud/pagetype/mpesa_request_log/mpesa_request_log.py new file mode 100644 index 0000000..3d2ed18 --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_request_log/mpesa_request_log.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +from jingrow.model.document import Document + + +class MpesaRequestLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + data: DF.Code | None + error: DF.Code | None + integration_request_service: DF.Data | None + is_remote_request: DF.Check + output: DF.Code | None + request_description: DF.Data | None + request_headers: DF.Code | None + request_id: DF.Data | None + status: DF.Literal["", "Queued", "Authorized", "Completed", "Cancelled", "Failed"] + url: DF.SmallText | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/mpesa_request_log/test_mpesa_request_log.py b/jcloud/jcloud/pagetype/mpesa_request_log/test_mpesa_request_log.py new file mode 100644 index 0000000..a2c07e8 --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_request_log/test_mpesa_request_log.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestMpesaRequestLog(UnitTestCase): + """ + Unit tests for MpesaRequestLog. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestMpesaRequestLog(IntegrationTestCase): + """ + Integration tests for MpesaRequestLog. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/mpesa_setup/__init__.py b/jcloud/jcloud/pagetype/mpesa_setup/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/mpesa_setup/account_balance.html b/jcloud/jcloud/pagetype/mpesa_setup/account_balance.html new file mode 100644 index 0000000..6614cab --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_setup/account_balance.html @@ -0,0 +1,27 @@ +{% if not jQuery.isEmptyObject(data) %} +
{{ __("Balance Details") }}
+ + + + + + + + + + + + {% for(const [key, value] of Object.entries(data)) { %} + + + + + + + + {% } %} + +
{{ __("Account Type") }}{{ __("Current Balance") }}{{ __("Available Balance") }}{{ __("Reserved Balance") }}{{ __("Uncleared Balance") }}
{%= key %} {%= value["current_balance"] %} {%= value["available_balance"] %} {%= value["reserved_balance"] %} {%= value["uncleared_balance"] %}
+{% else %} +

Account Balance Information Not Available.

+{% endif %} diff --git a/jcloud/jcloud/pagetype/mpesa_setup/mpesa_connector.py b/jcloud/jcloud/pagetype/mpesa_setup/mpesa_connector.py new file mode 100644 index 0000000..29dfabc --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_setup/mpesa_connector.py @@ -0,0 +1,145 @@ +import base64 +import datetime + +import requests +from requests.auth import HTTPBasicAuth + + +class MpesaConnector: + def __init__( + self, + env="sandbox", + app_key=None, + app_secret=None, + sandbox_url="https://sandbox.safaricom.co.ke", + live_url="https://api.safaricom.co.ke", + ): + """Setup configuration for Mpesa connector and generate new access token.""" + self.env = env + self.app_key = app_key + self.app_secret = app_secret + if env == "sandbox": + self.base_url = sandbox_url + else: + self.base_url = live_url + self.authenticate() + + def authenticate(self): + """ + This method is used to fetch the access token required by Mpesa. + + Returns: + access_token (str): This token is to be used with the Bearer header for further API calls to Mpesa. + """ + authenticate_uri = "/oauth/v1/generate?grant_type=client_credentials" + authenticate_url = f"{self.base_url}{authenticate_uri}" + r = requests.get(authenticate_url, auth=HTTPBasicAuth(self.app_key, self.app_secret)) + self.authentication_token = r.json()["access_token"] + return r.json()["access_token"] + + def get_balance( + self, + initiator=None, + security_credential=None, + party_a=None, + identifier_type=None, + remarks=None, + queue_timeout_url=None, + result_url=None, + ): + """ + This method uses Mpesa's Account Balance API to to enquire the balance on a MPesa BuyGoods (Till Number). + + Args: + initiator (str): Username used to authenticate the transaction. + security_credential (str): Generate from developer portal. + command_id (str): AccountBalance. + party_a (int): Till number being queried. + identifier_type (int): Type of organization receiving the transaction. (MSISDN/Till Number/Organization short code) + remarks (str): Comments that are sent along with the transaction(maximum 100 characters). + queue_timeout_url (str): The url that handles information of timed out transactions. + result_url (str): The url that receives results from MPesa api call. + + Returns: + OriginatorConverstionID (str): The unique request ID for tracking a transaction. + ConversationID (str): The unique request ID returned by mpesa for each request made + ResponseDescription (str): Response Description message + """ + + payload = { + "Initiator": initiator, + "SecurityCredential": security_credential, + "CommandID": "AccountBalance", + "PartyA": party_a, + "IdentifierType": identifier_type, + "Remarks": remarks, + "QueueTimeOutURL": queue_timeout_url, + "ResultURL": result_url, + } + headers = { + "Authorization": f"Bearer {self.authentication_token}", + "Content-Type": "application/json", + } + saf_url = "{}{}".format(self.base_url, "/mpesa/accountbalance/v1/query") + r = requests.post(saf_url, headers=headers, json=payload) + return r.json() + + def stk_push( + self, + business_shortcode=None, + passcode=None, + amount=None, + callback_url=None, + reference_code=None, + phone_number=None, + description=None, + ): + """ + This method uses Mpesa's Exjcloud API to initiate online payment on behalf of a customer. + + Args: + business_shortcode (int): The short code of the organization. + passcode (str): Get from developer portal + amount (int): The amount being transacted + callback_url (str): A CallBack URL is a valid secure URL that is used to receive notifications from MPesa API. + reference_code(str): Account Reference: This is an Alpha-Numeric parameter that is defined by your system as an Identifier of the transaction for CustomerPayBillOnline transaction type. + phone_number(int): The Mobile Number to receive the STK Pin Prompt. + description(str): This is any additional information/comment that can be sent along with the request from your system. MAX 13 characters + + Success Response: + CustomerMessage(str): Messages that customers can understand. + CheckoutRequestID(str): This is a global unique identifier of the processed checkout transaction request. + ResponseDescription(str): Describes Success or failure + MerchantRequestID(str): This is a global unique Identifier for any submitted payment request. + ResponseCode(int): 0 means success all others are error codes. e.g.404.001.03 + + Error Response: + requestId(str): This is a unique requestID for the payment request + errorCode(str): This is a predefined code that indicates the reason for request failure. + errorMessage(str): This is a predefined code that indicates the reason for request failure. + """ + + time = str(datetime.datetime.now()).split(".")[0].replace("-", "").replace(" ", "").replace(":", "") + password = f"{business_shortcode!s}{passcode!s}{time}" + encoded = base64.b64encode(bytes(password, encoding="utf8")) + payload = { + "BusinessShortCode": business_shortcode, + "Password": encoded.decode("utf-8"), + "Timestamp": time, + "Amount": amount, + "PartyA": int(phone_number), + "PartyB": reference_code, + "PhoneNumber": int(phone_number), + "CallBackURL": callback_url, + "AccountReference": reference_code, + "TransactionDesc": description, + "TransactionType": "CustomerPayBillOnline", + } + headers = { + "Authorization": f"Bearer {self.authentication_token}", + "Content-Type": "application/json", + } + + saf_url = "{}{}".format(self.base_url, "/mpesa/stkpush/v1/processrequest") + r = requests.post(saf_url, headers=headers, json=payload) + return r.json() diff --git a/jcloud/jcloud/pagetype/mpesa_setup/mpesa_setup.js b/jcloud/jcloud/pagetype/mpesa_setup/mpesa_setup.js new file mode 100644 index 0000000..7c43a46 --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_setup/mpesa_setup.js @@ -0,0 +1,38 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Mpesa Setup', { + onload_post_render: function (frm) { + frm.events.setup_account_balance_html(frm); + }, + + refresh: function (frm) { + jingrow.realtime.on('refresh_mpesa_dashboard', function () { + frm.reload_pg(); + frm.events.setup_account_balance_html(frm); + }); + }, + + get_account_balance: function (frm) { + if (!frm.pg.initiator_name && !frm.pg.security_credential) { + jingrow.throw( + __('Please set the initiator name and the security credential'), + ); + } + jingrow.call({ + method: 'get_account_balance_info', + pg: frm.pg, + }); + }, + + setup_account_balance_html: function (frm) { + if (!frm.pg.account_balance) return; + $('div').remove('.form-dashboard-section.custom'); + frm.dashboard.add_section( + jingrow.render_template('account_balance', { + data: JSON.parse(frm.pg.account_balance), + }), + ); + frm.dashboard.show(); + }, +}); diff --git a/jcloud/jcloud/pagetype/mpesa_setup/mpesa_setup.json b/jcloud/jcloud/pagetype/mpesa_setup/mpesa_setup.json new file mode 100644 index 0000000..ab8191f --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_setup/mpesa_setup.json @@ -0,0 +1,143 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "format:{mpesa_setup_id}-{api_type}", + "creation": "2025-01-17 14:42:39.157512", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "team", + "mpesa_setup_id", + "consumer_key", + "consumer_secret", + "till_number", + "api_type", + "sandbox", + "column_break_qzeb", + "transaction_limit", + "pass_key", + "initiator_name", + "business_shortcode", + "security_credential" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "consumer_key", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Consumer Key", + "reqd": 1 + }, + { + "fieldname": "consumer_secret", + "fieldtype": "Password", + "in_list_view": 1, + "label": "Consumer Secret", + "reqd": 1 + }, + { + "fieldname": "till_number", + "fieldtype": "Data", + "label": "Till Number", + "reqd": 1 + }, + { + "default": "Mpesa Exjcloud", + "fieldname": "api_type", + "fieldtype": "Select", + "label": "API Type", + "options": "Mpesa Exjcloud\nMpesa C2B" + }, + { + "default": "150000", + "fieldname": "transaction_limit", + "fieldtype": "Float", + "label": "Transaction Limit" + }, + { + "fieldname": "initiator_name", + "fieldtype": "Data", + "label": "Initiator Name" + }, + { + "default": "0", + "fieldname": "sandbox", + "fieldtype": "Check", + "label": "Sandbox" + }, + { + "fieldname": "business_shortcode", + "fieldtype": "Data", + "label": "Business Shortcode" + }, + { + "fieldname": "column_break_qzeb", + "fieldtype": "Column Break" + }, + { + "fieldname": "pass_key", + "fieldtype": "Password", + "label": "Pass Key", + "reqd": 1 + }, + { + "fieldname": "security_credential", + "fieldtype": "Small Text", + "label": "Security Credential", + "reqd": 1 + }, + { + "fieldname": "mpesa_setup_id", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Mpesa Setup ID", + "reqd": 1, + "unique": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-01-31 22:35:54.537908", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Mpesa Setup", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/mpesa_setup/mpesa_setup.py b/jcloud/jcloud/pagetype/mpesa_setup/mpesa_setup.py new file mode 100644 index 0000000..b22a22c --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_setup/mpesa_setup.py @@ -0,0 +1,32 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +# import jingrow +from jingrow.model.document import Document + + +class MpesaSetup(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + api_type: DF.Literal["Mpesa Exjcloud", "Mpesa C2B"] + business_shortcode: DF.Data | None + consumer_key: DF.Data + consumer_secret: DF.Password + initiator_name: DF.Data | None + mpesa_setup_id: DF.Data + pass_key: DF.Password + sandbox: DF.Check + security_credential: DF.SmallText + team: DF.Link + till_number: DF.Data + transaction_limit: DF.Float + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/mpesa_setup/test_mpesa_setup.py b/jcloud/jcloud/pagetype/mpesa_setup/test_mpesa_setup.py new file mode 100644 index 0000000..52bc447 --- /dev/null +++ b/jcloud/jcloud/pagetype/mpesa_setup/test_mpesa_setup.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestMpesaSetup(UnitTestCase): + """ + Unit tests for MpesaSetup. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestMpesaSetup(IntegrationTestCase): + """ + Integration tests for MpesaSetup. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/oauth_domain_mapping/__init__.py b/jcloud/jcloud/pagetype/oauth_domain_mapping/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/oauth_domain_mapping/oauth_domain_mapping.js b/jcloud/jcloud/pagetype/oauth_domain_mapping/oauth_domain_mapping.js new file mode 100644 index 0000000..e898146 --- /dev/null +++ b/jcloud/jcloud/pagetype/oauth_domain_mapping/oauth_domain_mapping.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("OAuth Domain Mapping", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/oauth_domain_mapping/oauth_domain_mapping.json b/jcloud/jcloud/pagetype/oauth_domain_mapping/oauth_domain_mapping.json new file mode 100644 index 0000000..3c2c6b4 --- /dev/null +++ b/jcloud/jcloud/pagetype/oauth_domain_mapping/oauth_domain_mapping.json @@ -0,0 +1,66 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-05-13 15:31:01.487795", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "social_login_key", + "provider_name", + "column_break_bfdt", + "email_domain" + ], + "fields": [ + { + "fieldname": "social_login_key", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Social Login Key", + "options": "Social Login Key", + "reqd": 1 + }, + { + "fieldname": "column_break_bfdt", + "fieldtype": "Column Break" + }, + { + "description": "Emails matching this domain will get custom oauth login", + "fieldname": "email_domain", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Email Domain" + }, + { + "fetch_from": "social_login_key.provider_name", + "fieldname": "provider_name", + "fieldtype": "Data", + "label": "Provider Name" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-05-13 21:34:04.985596", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "OAuth Domain Mapping", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/oauth_domain_mapping/oauth_domain_mapping.py b/jcloud/jcloud/pagetype/oauth_domain_mapping/oauth_domain_mapping.py new file mode 100644 index 0000000..454961e --- /dev/null +++ b/jcloud/jcloud/pagetype/oauth_domain_mapping/oauth_domain_mapping.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class OAuthDomainMapping(Document): + pass diff --git a/jcloud/jcloud/pagetype/oauth_domain_mapping/test_oauth_domain_mapping.py b/jcloud/jcloud/pagetype/oauth_domain_mapping/test_oauth_domain_mapping.py new file mode 100644 index 0000000..2bce4d3 --- /dev/null +++ b/jcloud/jcloud/pagetype/oauth_domain_mapping/test_oauth_domain_mapping.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestOAuthDomainMapping(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/order/__init__.py b/jcloud/jcloud/pagetype/order/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/order/order.js b/jcloud/jcloud/pagetype/order/order.js new file mode 100644 index 0000000..5afde6b --- /dev/null +++ b/jcloud/jcloud/pagetype/order/order.js @@ -0,0 +1,8 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Order", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/order/order.json b/jcloud/jcloud/pagetype/order/order.json new file mode 100644 index 0000000..333313a --- /dev/null +++ b/jcloud/jcloud/pagetype/order/order.json @@ -0,0 +1,110 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-03-23 21:29:54.329381", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "title", + "order_id", + "trade_no", + "team", + "total_amount", + "payment_method", + "order_type", + "description", + "status" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team" + }, + { + "fieldname": "order_id", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Order ID", + "read_only": 1, + "unique": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "\u5f85\u652f\u4ed8\n\u5df2\u652f\u4ed8\n\u4ea4\u6613\u6210\u529f\n\u5df2\u53d6\u6d88\n\u5df2\u9000\u6b3e", + "read_only": 1 + }, + { + "fieldname": "payment_method", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Payment Method", + "options": "\n\u652f\u4ed8\u5b9d\n\u5fae\u4fe1\u652f\u4ed8\n\u4f59\u989d\u652f\u4ed8\n\u94f6\u884c\u8f6c\u8d26\n\u5176\u4ed6", + "read_only": 1 + }, + { + "fieldname": "trade_no", + "fieldtype": "Data", + "label": "Trade No", + "read_only": 1, + "unique": 1 + }, + { + "fieldname": "total_amount", + "fieldtype": "Float", + "label": "Total Amount", + "precision": "2" + }, + { + "fieldname": "description", + "fieldtype": "Data", + "label": "Description" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Title", + "read_only": 1 + }, + { + "fieldname": "order_type", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Order Type", + "options": "\n\u4f59\u989d\u5145\u503c\n\u65b0\u5efa\u7f51\u7ad9\n\u7f51\u7ad9\u7eed\u8d39\n\u57df\u540d\u7eed\u8d39", + "read_only": 1 + } + ], + "grid_page_length": 50, + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-03-26 03:34:52.624889", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Order", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/order/order.py b/jcloud/jcloud/pagetype/order/order.py new file mode 100644 index 0000000..816f515 --- /dev/null +++ b/jcloud/jcloud/pagetype/order/order.py @@ -0,0 +1,27 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class Order(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + description: DF.Data | None + order_id: DF.Data | None + order_type: DF.Literal["", "\u4f59\u989d\u5145\u503c", "\u65b0\u5efa\u7f51\u7ad9", "\u7f51\u7ad9\u7eed\u8d39", "\u57df\u540d\u7eed\u8d39"] + payment_method: DF.Literal["", "\u652f\u4ed8\u5b9d", "\u5fae\u4fe1\u652f\u4ed8", "\u4f59\u989d\u652f\u4ed8", "\u94f6\u884c\u8f6c\u8d26", "\u5176\u4ed6"] + status: DF.Literal["\u5f85\u652f\u4ed8", "\u5df2\u652f\u4ed8", "\u4ea4\u6613\u6210\u529f", "\u5df2\u53d6\u6d88", "\u5df2\u9000\u6b3e"] + team: DF.Link | None + title: DF.Data | None + total_amount: DF.Float + trade_no: DF.Data | None + # end: auto-generated types + pass diff --git a/jcloud/jcloud/pagetype/order/test_order.py b/jcloud/jcloud/pagetype/order/test_order.py new file mode 100644 index 0000000..cacce76 --- /dev/null +++ b/jcloud/jcloud/pagetype/order/test_order.py @@ -0,0 +1,9 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestOrder(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/partner_lead/__init__.py b/jcloud/jcloud/pagetype/partner_lead/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/partner_lead/partner_lead.js b/jcloud/jcloud/pagetype/partner_lead/partner_lead.js new file mode 100644 index 0000000..985624e --- /dev/null +++ b/jcloud/jcloud/pagetype/partner_lead/partner_lead.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Partner Lead', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/partner_lead/partner_lead.json b/jcloud/jcloud/pagetype/partner_lead/partner_lead.json new file mode 100644 index 0000000..dde1eca --- /dev/null +++ b/jcloud/jcloud/pagetype/partner_lead/partner_lead.json @@ -0,0 +1,62 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "format:PL-{YYYY}-{#####}", + "creation": "2021-09-30 12:57:34.909563", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "site", + "jingrow_lead" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Site", + "options": "Site" + }, + { + "fieldname": "jingrow_lead", + "fieldtype": "Data", + "label": "Jingrow Lead" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2021-09-30 12:59:12.284003", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Partner Lead", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "team", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/partner_lead/partner_lead.py b/jcloud/jcloud/pagetype/partner_lead/partner_lead.py new file mode 100644 index 0000000..977359e --- /dev/null +++ b/jcloud/jcloud/pagetype/partner_lead/partner_lead.py @@ -0,0 +1,22 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class PartnerLead(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + jingrow_lead: DF.Data | None + site: DF.Link | None + team: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/partner_lead/test_partner_lead.py b/jcloud/jcloud/pagetype/partner_lead/test_partner_lead.py new file mode 100644 index 0000000..5c7a68c --- /dev/null +++ b/jcloud/jcloud/pagetype/partner_lead/test_partner_lead.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestPartnerLead(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/partner_payment_payout/__init__.py b/jcloud/jcloud/pagetype/partner_payment_payout/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/partner_payment_payout/partner_payment_payout.js b/jcloud/jcloud/pagetype/partner_payment_payout/partner_payment_payout.js new file mode 100644 index 0000000..c342f8b --- /dev/null +++ b/jcloud/jcloud/pagetype/partner_payment_payout/partner_payment_payout.js @@ -0,0 +1,37 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Partner Payment Payout', { + refresh(frm) { + if (frm.pg.docstatus == 0) { + frm.add_custom_button('Fetch Payments', () => { + jingrow.call({ + method: 'jcloud.api.local_payments.mpesa.utils.fetch_payments', + args: { + // transaction_pagetype: frm.pg.transaction_pagetype, + from_date: frm.pg.from_date, + to_date: frm.pg.to_date, + partner: frm.pg.partner, + payment_gateway: frm.pg.payment_gateway, + }, + callback: function (response) { + if (response.message) { + // Clear existing entries in transfer_items + frm.clear_table('transfer_items'); + + response.message.forEach((payment) => { + let row = frm.add_child('transfer_items'); + row.transaction_id = payment.name; + row.posting_date = payment.posting_date; + row.amount = payment.amount; + }); + + frm.refresh_field('transfer_items'); + // jingrow.msgprint("Payments fetched and added to the transfer items table."); + } + }, + }); + }); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/partner_payment_payout/partner_payment_payout.json b/jcloud/jcloud/pagetype/partner_payment_payout/partner_payment_payout.json new file mode 100644 index 0000000..997b3dd --- /dev/null +++ b/jcloud/jcloud/pagetype/partner_payment_payout/partner_payment_payout.json @@ -0,0 +1,148 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "format:PPT-{MM}-{#####}", + "creation": "2025-01-20 14:16:57.352757", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "section_break_l0lu", + "amended_from", + "from_date", + "partner", + "payment_gateway", + "column_break_plbi", + "to_date", + "partner_commission", + "section_break_tvae", + "transfer_items", + "section_break_qxag", + "total_amount", + "column_break_lgdh", + "commission", + "column_break_jfqp", + "net_amount" + ], + "fields": [ + { + "fieldname": "section_break_l0lu", + "fieldtype": "Section Break", + "label": "Filters" + }, + { + "fieldname": "amended_from", + "fieldtype": "Link", + "label": "Amended From", + "no_copy": 1, + "options": "Partner Payment Payout", + "print_hide": 1, + "read_only": 1, + "search_index": 1 + }, + { + "default": "Today", + "fieldname": "from_date", + "fieldtype": "Date", + "label": "From Date" + }, + { + "fieldname": "partner", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Partner", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "payment_gateway", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Payment Gateway", + "options": "Payment Gateway", + "reqd": 1 + }, + { + "fieldname": "column_break_plbi", + "fieldtype": "Column Break" + }, + { + "default": "Today", + "fieldname": "to_date", + "fieldtype": "Date", + "label": "To Date" + }, + { + "fetch_from": "partner.partner_commission", + "fieldname": "partner_commission", + "fieldtype": "Percent", + "label": "Partner Commission", + "read_only": 1 + }, + { + "fieldname": "section_break_tvae", + "fieldtype": "Section Break" + }, + { + "fieldname": "transfer_items", + "fieldtype": "Table", + "label": "Partner Payment Transfer Item", + "options": "Partner Payment Payout Item", + "reqd": 1 + }, + { + "fieldname": "section_break_qxag", + "fieldtype": "Section Break" + }, + { + "fieldname": "total_amount", + "fieldtype": "Currency", + "label": "Total Amount", + "non_negative": 1 + }, + { + "fieldname": "column_break_lgdh", + "fieldtype": "Column Break" + }, + { + "fieldname": "commission", + "fieldtype": "Currency", + "label": "Commission" + }, + { + "fieldname": "column_break_jfqp", + "fieldtype": "Column Break" + }, + { + "fieldname": "net_amount", + "fieldtype": "Currency", + "label": "Net Amount" + } + ], + "index_web_pages_for_search": 1, + "is_submittable": 1, + "links": [], + "modified": "2025-01-20 14:16:57.352757", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Partner Payment Payout", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "submit": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/partner_payment_payout/partner_payment_payout.py b/jcloud/jcloud/pagetype/partner_payment_payout/partner_payment_payout.py new file mode 100644 index 0000000..8ab0c34 --- /dev/null +++ b/jcloud/jcloud/pagetype/partner_payment_payout/partner_payment_payout.py @@ -0,0 +1,65 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + + +class PartnerPaymentPayout(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.partner_payment_payout_item.partner_payment_payout_item import ( + PartnerPaymentPayoutItem, + ) + + amended_from: DF.Link | None + commission: DF.Currency + from_date: DF.Date | None + net_amount: DF.Currency + partner: DF.Link + partner_commission: DF.Percent + payment_gateway: DF.Link + to_date: DF.Date | None + total_amount: DF.Currency + transfer_items: DF.Table[PartnerPaymentPayoutItem] + # end: auto-generated types + + def before_save(self): + self.total_amount = sum([item.amount for item in self.transfer_items]) + self.commission = self.total_amount * (self.partner_commission / 100) + self.net_amount = self.total_amount - self.commission + for item in self.transfer_items: + item.commission_amount = item.amount * (self.partner_commission / 100) + item.net_amount = item.amount - item.commission_amount + + def on_submit(self): + transaction_names = [item.transaction_id for item in self.transfer_items] + + if transaction_names: + jingrow.db.set_value( + "Payment Partner Transaction", + {"name": ["in", transaction_names], "submitted_to_jingrow": 0}, + "submitted_to_jingrow", + 1, + ) + jingrow.db.commit() + + def on_cancel(self): + transaction_names = [item.transaction_id for item in self.transfer_items] + + # Update Payment Partner Records + if transaction_names: + jingrow.db.set_value( + "Payment Partner Transaction", + {"name": ["in", transaction_names], "submitted_to_jingrow": 1}, + "submitted_to_jingrow", + 0, + ) + jingrow.db.commit() diff --git a/jcloud/jcloud/pagetype/partner_payment_payout/test_partner_payment_payout.py b/jcloud/jcloud/pagetype/partner_payment_payout/test_partner_payment_payout.py new file mode 100644 index 0000000..45d144c --- /dev/null +++ b/jcloud/jcloud/pagetype/partner_payment_payout/test_partner_payment_payout.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestPartnerPaymentPayout(UnitTestCase): + """ + Unit tests for PartnerPaymentPayout. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestPartnerPaymentPayout(IntegrationTestCase): + """ + Integration tests for PartnerPaymentPayout. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/partner_payment_payout_item/__init__.py b/jcloud/jcloud/pagetype/partner_payment_payout_item/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/partner_payment_payout_item/partner_payment_payout_item.json b/jcloud/jcloud/pagetype/partner_payment_payout_item/partner_payment_payout_item.json new file mode 100644 index 0000000..649b30f --- /dev/null +++ b/jcloud/jcloud/pagetype/partner_payment_payout_item/partner_payment_payout_item.json @@ -0,0 +1,81 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-01-20 14:16:33.396639", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "transaction_id", + "posting_date", + "amount", + "column_break_ayfd", + "commission_amount", + "amount_in_local_currency", + "net_amount" + ], + "fields": [ + { + "fieldname": "transaction_id", + "fieldtype": "Link", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Transaction Id", + "options": "Payment Partner Transaction", + "reqd": 1 + }, + { + "fieldname": "posting_date", + "fieldtype": "Date", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Posting Date" + }, + { + "fieldname": "amount", + "fieldtype": "Currency", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Amount(USD)" + }, + { + "fieldname": "column_break_ayfd", + "fieldtype": "Column Break" + }, + { + "fieldname": "commission_amount", + "fieldtype": "Currency", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Commission Amount" + }, + { + "fieldname": "amount_in_local_currency", + "fieldtype": "Currency", + "label": "Amount(LC)" + }, + { + "fieldname": "net_amount", + "fieldtype": "Currency", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Net Amount" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-01-20 14:16:33.396639", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Partner Payment Payout Item", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/partner_payment_payout_item/partner_payment_payout_item.py b/jcloud/jcloud/pagetype/partner_payment_payout_item/partner_payment_payout_item.py new file mode 100644 index 0000000..68fded7 --- /dev/null +++ b/jcloud/jcloud/pagetype/partner_payment_payout_item/partner_payment_payout_item.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +# import jingrow +from jingrow.model.document import Document + + +class PartnerPaymentPayoutItem(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + amount: DF.Currency + amount_in_local_currency: DF.Currency + commission_amount: DF.Currency + net_amount: DF.Currency + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + posting_date: DF.Date | None + transaction_id: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/payment_dispute/__init__.py b/jcloud/jcloud/pagetype/payment_dispute/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/payment_dispute/payment_dispute.js b/jcloud/jcloud/pagetype/payment_dispute/payment_dispute.js new file mode 100644 index 0000000..5b8917a --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_dispute/payment_dispute.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Payment Dispute", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/payment_dispute/payment_dispute.json b/jcloud/jcloud/pagetype/payment_dispute/payment_dispute.json new file mode 100644 index 0000000..3e4ceff --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_dispute/payment_dispute.json @@ -0,0 +1,86 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-11-30 14:26:12.167431", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "dispute_id", + "event_type", + "payment_intent", + "column_break_crci", + "email", + "reason", + "status" + ], + "fields": [ + { + "fieldname": "payment_intent", + "fieldtype": "Data", + "label": "Payment Intent", + "read_only": 1 + }, + { + "fieldname": "email", + "fieldtype": "Data", + "in_list_view": 1, + "in_preview": 1, + "label": "Email", + "read_only": 1 + }, + { + "fieldname": "dispute_id", + "fieldtype": "Data", + "label": "Dispute Id", + "read_only": 1 + }, + { + "fieldname": "event_type", + "fieldtype": "Data", + "in_list_view": 1, + "in_preview": 1, + "label": "Event Type", + "read_only": 1 + }, + { + "fieldname": "column_break_crci", + "fieldtype": "Column Break" + }, + { + "fieldname": "reason", + "fieldtype": "Data", + "label": "Reason" + }, + { + "fieldname": "status", + "fieldtype": "Data", + "label": "Status" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-09-23 12:27:14.065913", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Payment Dispute", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/payment_dispute/payment_dispute.py b/jcloud/jcloud/pagetype/payment_dispute/payment_dispute.py new file mode 100644 index 0000000..869bbb8 --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_dispute/payment_dispute.py @@ -0,0 +1,40 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +# import jingrow +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.telegram_message.telegram_message import TelegramMessage + + +class PaymentDispute(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + dispute_id: DF.Data | None + email: DF.Data | None + event_type: DF.Data | None + payment_intent: DF.Data | None + reason: DF.Data | None + status: DF.Data | None + # end: auto-generated types + + def after_insert(self): + message = f""" + Dispute {self.event_type}! + + Email: {self.email} + Dispute ID: `{self.dispute_id}` + Event: `{self.event_type}` + Reason: `{self.reason}` + Status: `{self.status}` + [Payment reference on Stripe Dashboard](https://dashboard.stripe.com/payments/{self.payment_intent}) + """ + TelegramMessage.enqueue(message=message, topic="Disputes", group="Billing") diff --git a/jcloud/jcloud/pagetype/payment_dispute/test_payment_dispute.py b/jcloud/jcloud/pagetype/payment_dispute/test_payment_dispute.py new file mode 100644 index 0000000..c285cd3 --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_dispute/test_payment_dispute.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestPaymentDispute(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/payment_gateway/__init__.py b/jcloud/jcloud/pagetype/payment_gateway/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/payment_gateway/payment_gateway.js b/jcloud/jcloud/pagetype/payment_gateway/payment_gateway.js new file mode 100644 index 0000000..d99c5a0 --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_gateway/payment_gateway.js @@ -0,0 +1,8 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Payment Gateway", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/payment_gateway/payment_gateway.json b/jcloud/jcloud/pagetype/payment_gateway/payment_gateway.json new file mode 100644 index 0000000..88068d4 --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_gateway/payment_gateway.json @@ -0,0 +1,178 @@ +{ + "actions": [], + "autoname": "field:gateway", + "creation": "2025-01-18 10:45:52.966302", + "default_view": "List", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "team", + "column_break_kvnc", + "team_name", + "column_break_pwgv", + "currency", + "gateway", + "ui_configuration_section", + "integration_logo", + "column_break_jcag", + "gateway_settings", + "column_break_noki", + "gateway_controller", + "partner_integration_section", + "url", + "print_format", + "column_break_oefu", + "api_key", + "column_break_slwm", + "api_secret", + "taxes_section", + "taxes_and_charges" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team" + }, + { + "fieldname": "column_break_kvnc", + "fieldtype": "Column Break" + }, + { + "fetch_from": "team.team_title", + "fieldname": "team_name", + "fieldtype": "Data", + "label": "Team Name", + "read_only": 1 + }, + { + "fieldname": "column_break_pwgv", + "fieldtype": "Column Break" + }, + { + "fieldname": "currency", + "fieldtype": "Link", + "label": "Currency", + "options": "Currency" + }, + { + "fieldname": "gateway", + "fieldtype": "Data", + "label": "Gateway", + "unique": 1 + }, + { + "fieldname": "ui_configuration_section", + "fieldtype": "Section Break", + "label": "UI Configuration" + }, + { + "fieldname": "integration_logo", + "fieldtype": "Attach Image", + "label": "Integration Logo" + }, + { + "fieldname": "column_break_jcag", + "fieldtype": "Column Break" + }, + { + "fieldname": "gateway_settings", + "fieldtype": "Link", + "label": "Gateway Settings", + "options": "PageType" + }, + { + "fieldname": "column_break_noki", + "fieldtype": "Column Break" + }, + { + "fieldname": "gateway_controller", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Gateway Controller", + "options": "gateway_settings" + }, + { + "fieldname": "partner_integration_section", + "fieldtype": "Section Break", + "label": "Partner Integration" + }, + { + "fieldname": "url", + "fieldtype": "Data", + "label": "URL" + }, + { + "fieldname": "column_break_oefu", + "fieldtype": "Column Break" + }, + { + "fieldname": "api_key", + "fieldtype": "Data", + "label": "API Key" + }, + { + "fieldname": "column_break_slwm", + "fieldtype": "Column Break" + }, + { + "fieldname": "api_secret", + "fieldtype": "Password", + "label": "API Secret" + }, + { + "fieldname": "taxes_section", + "fieldtype": "Section Break", + "label": "Taxes" + }, + { + "fieldname": "taxes_and_charges", + "fieldtype": "Percent", + "label": "Taxes and Charges" + }, + { + "fieldname": "print_format", + "fieldtype": "Data", + "label": "Print Format" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-02-02 19:51:36.821998", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Payment Gateway", + "naming_rule": "By fieldname", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/payment_gateway/payment_gateway.py b/jcloud/jcloud/pagetype/payment_gateway/payment_gateway.py new file mode 100644 index 0000000..f708c50 --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_gateway/payment_gateway.py @@ -0,0 +1,32 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +# import jingrow +from jingrow.model.document import Document + + +class PaymentGateway(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + api_key: DF.Data | None + api_secret: DF.Password | None + currency: DF.Link | None + gateway: DF.Data | None + gateway_controller: DF.DynamicLink | None + gateway_settings: DF.Link | None + integration_logo: DF.AttachImage | None + print_format: DF.Data | None + taxes_and_charges: DF.Percent + team: DF.Link | None + team_name: DF.Data | None + url: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/payment_gateway/test_payment_gateway.py b/jcloud/jcloud/pagetype/payment_gateway/test_payment_gateway.py new file mode 100644 index 0000000..596e7ae --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_gateway/test_payment_gateway.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestPaymentGateway(UnitTestCase): + """ + Unit tests for PaymentGateway. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestPaymentGateway(IntegrationTestCase): + """ + Integration tests for PaymentGateway. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/payment_partner_transaction/__init__.py b/jcloud/jcloud/pagetype/payment_partner_transaction/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/payment_partner_transaction/payment_partner_transaction.js b/jcloud/jcloud/pagetype/payment_partner_transaction/payment_partner_transaction.js new file mode 100644 index 0000000..fdd5ab4 --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_partner_transaction/payment_partner_transaction.js @@ -0,0 +1,8 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Payment Partner Transaction", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/payment_partner_transaction/payment_partner_transaction.json b/jcloud/jcloud/pagetype/payment_partner_transaction/payment_partner_transaction.json new file mode 100644 index 0000000..63b00bf --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_partner_transaction/payment_partner_transaction.json @@ -0,0 +1,175 @@ +{ + "actions": [], + "autoname": "PPT.-.YY.-.MM.-.####", + "creation": "2025-01-18 10:53:07.242575", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "partner_details_section", + "payment_partner", + "posting_date", + "column_break_manc", + "payment_gateway", + "column_break_ejza", + "team", + "transaction_details_section", + "amount", + "actual_amount", + "column_break_xqsh", + "currency", + "actual_currency", + "column_break_jyxx", + "exchange_rate", + "submitted_to_jingrow", + "section_break_yhqq", + "payment_transaction_details", + "section_break_7oh3", + "amended_from" + ], + "fields": [ + { + "fieldname": "partner_details_section", + "fieldtype": "Section Break", + "label": "Team Details" + }, + { + "fieldname": "payment_partner", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Payment Partner", + "options": "Team" + }, + { + "default": "Today", + "fieldname": "posting_date", + "fieldtype": "Date", + "label": "Posting Date" + }, + { + "fieldname": "column_break_manc", + "fieldtype": "Column Break" + }, + { + "fieldname": "payment_gateway", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Payment Gateway", + "options": "Payment Gateway" + }, + { + "fieldname": "column_break_ejza", + "fieldtype": "Column Break" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Team", + "options": "Team" + }, + { + "fieldname": "transaction_details_section", + "fieldtype": "Section Break", + "label": "Transaction Details" + }, + { + "fieldname": "amount", + "fieldtype": "Currency", + "label": "Amount", + "options": "currency" + }, + { + "fieldname": "actual_amount", + "fieldtype": "Currency", + "label": "Actual Amount", + "options": "actual_currency" + }, + { + "fieldname": "column_break_xqsh", + "fieldtype": "Column Break" + }, + { + "default": "USD", + "fieldname": "currency", + "fieldtype": "Link", + "label": "Currency", + "options": "Currency" + }, + { + "fetch_from": "payment_gateway.currency", + "fieldname": "actual_currency", + "fieldtype": "Link", + "label": "Actual Currency", + "options": "Currency" + }, + { + "fieldname": "column_break_jyxx", + "fieldtype": "Column Break" + }, + { + "fieldname": "exchange_rate", + "fieldtype": "Float", + "label": "Exchange Rate" + }, + { + "default": "0", + "fieldname": "submitted_to_jingrow", + "fieldtype": "Check", + "label": "Submitted To Jingrow" + }, + { + "fieldname": "section_break_yhqq", + "fieldtype": "Section Break" + }, + { + "fieldname": "payment_transaction_details", + "fieldtype": "Code", + "label": "Payment Transaction Details", + "options": "JSON", + "read_only": 1 + }, + { + "fieldname": "section_break_7oh3", + "fieldtype": "Section Break" + }, + { + "fieldname": "amended_from", + "fieldtype": "Link", + "label": "Amended From", + "no_copy": 1, + "options": "Payment Partner Transaction", + "print_hide": 1, + "read_only": 1, + "search_index": 1 + } + ], + "index_web_pages_for_search": 1, + "is_submittable": 1, + "links": [], + "modified": "2025-02-02 17:54:48.975501", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Payment Partner Transaction", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/payment_partner_transaction/payment_partner_transaction.py b/jcloud/jcloud/pagetype/payment_partner_transaction/payment_partner_transaction.py new file mode 100644 index 0000000..9b26824 --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_partner_transaction/payment_partner_transaction.py @@ -0,0 +1,32 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +# import jingrow +from jingrow.model.document import Document + + +class PaymentPartnerTransaction(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + actual_amount: DF.Currency + actual_currency: DF.Link | None + amended_from: DF.Link | None + amount: DF.Currency + currency: DF.Link | None + exchange_rate: DF.Float + payment_gateway: DF.Link | None + payment_partner: DF.Link | None + payment_transaction_details: DF.Code | None + posting_date: DF.Date | None + submitted_to_jingrow: DF.Check + team: DF.Link | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/payment_partner_transaction/test_payment_partner_transaction.py b/jcloud/jcloud/pagetype/payment_partner_transaction/test_payment_partner_transaction.py new file mode 100644 index 0000000..ee5a754 --- /dev/null +++ b/jcloud/jcloud/pagetype/payment_partner_transaction/test_payment_partner_transaction.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestPaymentPartnerTransaction(UnitTestCase): + """ + Unit tests for PaymentPartnerTransaction. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestPaymentPartnerTransaction(IntegrationTestCase): + """ + Integration tests for PaymentPartnerTransaction. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/payout_order/__init__.py b/jcloud/jcloud/pagetype/payout_order/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/payout_order/patches/change_fields_from_recipient_to_team.py b/jcloud/jcloud/pagetype/payout_order/patches/change_fields_from_recipient_to_team.py new file mode 100644 index 0000000..8a1ac87 --- /dev/null +++ b/jcloud/jcloud/pagetype/payout_order/patches/change_fields_from_recipient_to_team.py @@ -0,0 +1,8 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + jingrow.db.sql("UPDATE `tabPayout Order` SET team = recipient") diff --git a/jcloud/jcloud/pagetype/payout_order/patches/compute_total_amount.py b/jcloud/jcloud/pagetype/payout_order/patches/compute_total_amount.py new file mode 100644 index 0000000..c14e1f0 --- /dev/null +++ b/jcloud/jcloud/pagetype/payout_order/patches/compute_total_amount.py @@ -0,0 +1,28 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow +from tqdm import tqdm + + +def execute(): + exchange_rate = jingrow.db.get_single_value("Jcloud Settings", "usd_rate") + payout_orders = jingrow.get_all( + "Payout Order", + {"docstatus": 0}, + ["name", "net_total_cny", "net_total_usd", "recipient_currency"], + ) + + for payout_order in tqdm(payout_orders): + total_amount = 0 + if payout_order.recipient_currency == "USD": + cny_in_usd = 0 + if payout_order.net_total_cny > 0: + cny_in_usd = payout_order.net_total_cny / exchange_rate + total_amount = payout_order.net_total_usd + cny_in_usd + elif payout_order.recipient_currency == "CNY": + total_amount = ( + payout_order.net_total_cny + payout_order.net_total_usd * exchange_rate + ) + + jingrow.db.set_value("Payout Order", payout_order.name, "total_amount", total_amount) diff --git a/jcloud/jcloud/pagetype/payout_order/payout_order.js b/jcloud/jcloud/pagetype/payout_order/payout_order.js new file mode 100644 index 0000000..0b165a9 --- /dev/null +++ b/jcloud/jcloud/pagetype/payout_order/payout_order.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Payout Order', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/payout_order/payout_order.json b/jcloud/jcloud/pagetype/payout_order/payout_order.json new file mode 100644 index 0000000..1b10218 --- /dev/null +++ b/jcloud/jcloud/pagetype/payout_order/payout_order.json @@ -0,0 +1,218 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "format:PO-{YYYY}-{#####}", + "creation": "2022-07-05 23:04:55.997511", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "recipient_currency", + "period_start", + "due_date", + "status", + "column_break_4", + "mode_of_payment", + "period_end", + "jingrow_purchase_order", + "amended_from", + "section_break_8", + "type", + "column_break_10", + "notes", + "section_break_12", + "items", + "section_break_14", + "net_total_cny", + "currency_cny", + "total_amount", + "column_break_15", + "net_total_usd", + "currency_usd", + "section_break_21", + "ignore_commission" + ], + "fields": [ + { + "fieldname": "due_date", + "fieldtype": "Date", + "hidden": 1, + "label": "Due Date" + }, + { + "fieldname": "jingrow_purchase_order", + "fieldtype": "Data", + "label": "Jingrow Purchase Order" + }, + { + "fieldname": "amended_from", + "fieldtype": "Link", + "label": "Amended From", + "no_copy": 1, + "options": "Payout Order", + "print_hide": 1, + "read_only": 1 + }, + { + "fieldname": "mode_of_payment", + "fieldtype": "Select", + "label": "Mode Of Payment", + "options": "Cash\nCredits\nInternal" + }, + { + "default": "Draft", + "fieldname": "status", + "fieldtype": "Select", + "label": "Status", + "options": "Draft\nPaid\nCommissioned" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fieldname": "items", + "fieldtype": "Table", + "label": "Items", + "options": "Payout Order Item" + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break" + }, + { + "fieldname": "type", + "fieldtype": "Select", + "label": "Type", + "options": "Marketplace\nSaaS" + }, + { + "fieldname": "column_break_10", + "fieldtype": "Column Break" + }, + { + "fieldname": "notes", + "fieldtype": "Small Text", + "label": "Notes" + }, + { + "fieldname": "section_break_12", + "fieldtype": "Section Break" + }, + { + "fieldname": "net_total_cny", + "fieldtype": "Currency", + "label": "Net Total CNY", + "options": "currency_cny", + "read_only": 1 + }, + { + "fieldname": "net_total_usd", + "fieldtype": "Currency", + "label": "Net Total USD", + "options": "currency_usd", + "read_only": 1 + }, + { + "fieldname": "column_break_15", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_14", + "fieldtype": "Section Break" + }, + { + "fieldname": "period_start", + "fieldtype": "Date", + "label": "Period Start" + }, + { + "fieldname": "period_end", + "fieldtype": "Date", + "label": "Period End" + }, + { + "fetch_from": "recipient.currency", + "fieldname": "recipient_currency", + "fieldtype": "Data", + "label": "Currency" + }, + { + "fieldname": "section_break_21", + "fieldtype": "Section Break" + }, + { + "default": "0", + "fieldname": "ignore_commission", + "fieldtype": "Check", + "label": "Ignore Commission", + "options": "0" + }, + { + "default": "CNY", + "fieldname": "currency_cny", + "fieldtype": "Data", + "hidden": 1, + "label": "Currency CNY" + }, + { + "default": "USD", + "fieldname": "currency_usd", + "fieldtype": "Data", + "hidden": 1, + "label": "Currency USD" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "total_amount", + "fieldtype": "Currency", + "label": "Total Amount", + "options": "recipient_currency", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "is_submittable": 1, + "links": [], + "modified": "2024-07-15 18:03:18.004496", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Payout Order", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [ + { + "color": "Green", + "title": "Paid" + }, + { + "color": "Gray", + "title": "Draft" + } + ], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/payout_order/payout_order.py b/jcloud/jcloud/pagetype/payout_order/payout_order.py new file mode 100644 index 0000000..a0841c1 --- /dev/null +++ b/jcloud/jcloud/pagetype/payout_order/payout_order.py @@ -0,0 +1,334 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +from datetime import date +from itertools import groupby +from typing import List + +import jingrow +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.invoice_item.invoice_item import InvoiceItem +from jcloud.jcloud.pagetype.payout_order_item.payout_order_item import PayoutOrderItem +from jcloud.utils import log_error + + +class PayoutOrder(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + from jcloud.jcloud.pagetype.payout_order_item.payout_order_item import PayoutOrderItem + + amended_from: DF.Link | None + currency_cny: DF.Data | None + currency_usd: DF.Data | None + due_date: DF.Date | None + jingrow_purchase_order: DF.Data | None + ignore_commission: DF.Check + items: DF.Table[PayoutOrderItem] + mode_of_payment: DF.Literal["Cash", "Credits", "Internal"] + net_total_cny: DF.Currency + net_total_usd: DF.Currency + notes: DF.SmallText | None + period_end: DF.Date | None + period_start: DF.Date | None + recipient_currency: DF.Data | None + status: DF.Literal["Draft", "Paid", "Commissioned"] + team: DF.Link + total_amount: DF.Currency + type: DF.Literal["Marketplace", "SaaS"] + # end: auto-generated types + + dashboard_fields = [ + "period_end", + "team", + "mode_of_payment", + "net_total_cny", + "net_total_usd", + "status", + "total_amount", + "items", + ] + + @staticmethod + def get_list_query(query): + PayoutOrder = jingrow.qb.PageType("Payout Order") + query = query.where((PayoutOrder.docstatus != 2)) + return query + + def validate(self): + self.validate_items() + self.validate_net_totals() + self.compute_total_amount() + + def validate_items(self): + for row in self.items: + invoice_name = row.invoice + invoice = jingrow.db.get_value( + "Invoice", + invoice_name, + [ + "status", + "currency", + "transaction_fee", + "exchange_rate", + "amount_paid", + ], + as_dict=True, + ) + + if invoice.status != "Paid": + jingrow.throw(f"Invoice {invoice_name} is not paid yet.") + + invoice_item = get_invoice_item_for_po_item(invoice_name, row) + + # check to avoid app revenue ledger item's calculation + if not invoice_item: + return + + row.tax = row.tax or 0.0 + row.total_amount = invoice_item.amount + row.site = invoice_item.site + row.currency = invoice.currency + row.gateway_fee = 0.0 + + # validate commissions and thresholds + app_payment = ( + jingrow.get_cached_pg("Marketplace App Payment", row.document_name) + if jingrow.db.exists("Marketplace App Payment", row.document_name) + else jingrow.get_pg( + { + "pagetype": "Marketplace App Payment", + "app": row.document_name, + "team": self.team, + } + ).insert(ignore_permissions=True) + ) + + row.commission = ( + app_payment.get_commission(row.total_amount) if not self.ignore_commission else 0.0 + ) + + row.net_amount = row.total_amount - row.commission + + if row.currency == "CNY": + app_payment.total_cny += row.net_amount if row.net_amount > 0 else row.commission + else: + app_payment.total_usd += row.net_amount if row.net_amount > 0 else row.commission + + app_payment.save(ignore_permissions=True) + + def validate_net_totals(self): + self.net_total_usd = 0 + self.net_total_cny = 0 + + for row in self.items: + if row.currency == "CNY": + self.net_total_cny += row.net_amount + else: + self.net_total_usd += row.net_amount + + if self.net_total_usd <= 0 and self.net_total_cny <= 0: + self.status = "Commissioned" + + def compute_total_amount(self): + exchange_rate = jingrow.db.get_single_value("Jcloud Settings", "usd_rate") + if self.recipient_currency == "USD": + cny_in_usd = 0 + if self.net_total_cny > 0: + cny_in_usd = self.net_total_cny / exchange_rate + self.total_amount = self.net_total_usd + cny_in_usd + elif self.recipient_currency == "CNY": + self.total_amount = self.net_total_cny + (self.net_total_usd * exchange_rate) + + def before_submit(self): + if self.mode_of_payment == "Cash" and (not self.jingrow_purchase_order): + jingrow.throw( + "Jingrow Purchase Order is required before marking this cash payout as Paid" + ) + self.status = "Paid" + + +def get_invoice_item_for_po_item( + invoice_name: str, payout_order_item: PayoutOrderItem +) -> InvoiceItem | None: + try: + if payout_order_item.invoice_item: + item = jingrow.get_pg("Invoice Item", payout_order_item.invoice_item) + if ( + item.parent == invoice_name + and item.document_name == payout_order_item.document_name + and item.document_type == payout_order_item.document_type + and item.plan == payout_order_item.plan + and item.rate == payout_order_item.rate + ): + return item + + return jingrow.get_pg( + "Invoice Item", + { + "parent": invoice_name, + "document_name": payout_order_item.document_name, + "document_type": payout_order_item.document_type, + "plan": payout_order_item.plan, + "rate": payout_order_item.rate, + }, + ) + except jingrow.DoesNotExistError: + return None + + +def create_marketplace_payout_orders_monthly(period_start=None, period_end=None): + period_start, period_end = ( + (period_start, period_end) + if period_start and period_end + else get_current_period_boundaries() + ) + items = get_unaccounted_marketplace_invoice_items() + + # Group by teams + for app_team, items in groupby(items, key=lambda x: x["app_team"]): + try: + item_names = [i.name for i in items] + + po_exists = jingrow.db.exists( + "Payout Order", {"team": app_team, "period_end": period_end} + ) + + if not po_exists: + create_payout_order_from_invoice_item_names( + item_names, team=app_team, period_start=period_start, period_end=period_end + ) + else: + po = jingrow.get_pg("Payout Order", {"team": app_team, "period_end": period_end}) + add_invoice_items_to_po(po, item_names) + + jingrow.db.set_value( + "Invoice Item", + {"name": ("in", item_names)}, + "has_marketplace_payout_completed", + True, + ) + + if not jingrow.flags.in_test: + # Save this particular PO transaction + jingrow.db.commit() + except Exception: + jingrow.db.rollback() + log_error("Payout Order Creation Error", team=app_team, invoice_items=items) + + +def get_current_period_boundaries(): + today = jingrow.utils.today() + period_start = jingrow.utils.data.get_first_day(today) + period_end = jingrow.utils.data.get_last_day(today) + + return period_start, period_end + + +def add_invoice_items_to_po(po, invoice_item_names): + for item_name in invoice_item_names: + invoice_item = jingrow.get_pg("Invoice Item", item_name) + po.append( + "items", + { + "invoice_item": invoice_item.name, + "invoice": invoice_item.parent, + "document_type": invoice_item.document_type, + "document_name": invoice_item.document_name, + "rate": invoice_item.rate, + "plan": invoice_item.plan, + "quantity": invoice_item.quantity, + "site": invoice_item.site, + }, + ) + po.save() + + +def get_unaccounted_marketplace_invoice_items(): + # Get all marketplace app invoice items + invoice = jingrow.qb.PageType("Invoice") + invoice_item = jingrow.qb.PageType("Invoice Item") + marketplace_app = jingrow.qb.PageType("Marketplace App") + + items = ( + jingrow.qb.from_(invoice_item) + .left_join(invoice) + .on(invoice_item.parent == invoice.name) + .left_join(marketplace_app) + .on(marketplace_app.name == invoice_item.document_name) + .where(invoice.status == "Paid") + .where(invoice_item.document_type == "Marketplace App") + .where(invoice_item.has_marketplace_payout_completed == 0) + .select( + invoice_item.name, invoice_item.document_name, marketplace_app.team.as_("app_team") + ) + .distinct() + .run(as_dict=True) + ) + + return items + + +@jingrow.whitelist() +def create_payout_order_from_invoice_items( + invoice_items: List[InvoiceItem], + team: str, + period_start: date, + period_end: date, + mode_of_payment: str = "Cash", + notes: str = "", + type: str = "Marketplace", + save: bool = True, +) -> PayoutOrder: + po = jingrow.get_pg( + { + "pagetype": "Payout Order", + "team": team, + "mode_of_payment": mode_of_payment, + "notes": notes, + "type": type, + "period_start": period_start, + "period_end": period_end, + } + ) + + for invoice_item in invoice_items: + po.append( + "items", + { + "invoice_item": invoice_item.name, + "invoice": invoice_item.parent, + "document_type": invoice_item.document_type, + "document_name": invoice_item.document_name, + "rate": invoice_item.rate, + "plan": invoice_item.plan, + "quantity": invoice_item.quantity, + "site": invoice_item.site, + }, + ) + + if save: + po.insert() + + return po + + +def create_payout_order_from_invoice_item_names(item_names, *args, **kwargs): + invoice_items = (jingrow.get_pg("Invoice Item", i) for i in item_names) + return create_payout_order_from_invoice_items(invoice_items, *args, **kwargs) + + +def create_marketplace_payout_orders(): + # ONLY RUN ON LAST DAY OF THE MONTH + today = jingrow.utils.today() + period_end = jingrow.utils.data.get_last_day(today).strftime("%Y-%m-%d") + + if today != period_end: + return + + create_marketplace_payout_orders_monthly() diff --git a/jcloud/jcloud/pagetype/payout_order/test_payout_order.py b/jcloud/jcloud/pagetype/payout_order/test_payout_order.py new file mode 100644 index 0000000..f7ea7fd --- /dev/null +++ b/jcloud/jcloud/pagetype/payout_order/test_payout_order.py @@ -0,0 +1,196 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +from unittest.mock import Mock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.invoice.invoice import Invoice +from jcloud.jcloud.pagetype.marketplace_app.test_marketplace_app import ( + create_test_marketplace_app, +) +from jcloud.jcloud.pagetype.payout_order.payout_order import ( + create_marketplace_payout_orders_monthly, + create_payout_order_from_invoice_items, +) +from jcloud.jcloud.pagetype.team.test_team import create_test_team + + +@patch.object(Invoice, "create_invoice_on_jingrowio", new=Mock()) +class TestPayoutOrder(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_net_amount_calculations_cny(self): + self.create_test_cny_invoice() + # Create a PO for this period + today = jingrow.utils.today() + period_start = jingrow.utils.data.get_first_day(today) + period_end = jingrow.utils.data.get_last_day(today) + + po = create_payout_order_from_invoice_items( + self.test_invoice.items, + self.test_team.name, + period_start=period_start, + period_end=period_end, + mode_of_payment="Internal", + save=True, + ) + + self.assertEqual(len(po.items), 1) + self.assertEqual(po.items[0].invoice, self.test_invoice.name) + self.assertEqual(po.items[0].total_amount, 40.0) + self.assertEqual(po.items[0].net_amount, 40.0) + self.assertEqual(po.items[0].currency, "CNY") + + self.assertEqual(po.net_total_cny, 40.0) + self.assertEqual(po.net_total_usd, 0) + + def test_net_amount_calculations_usd(self): + self.create_test_usd_invoice() + + # Create a PO for this period + today = jingrow.utils.today() + period_start = jingrow.utils.data.get_first_day(today) + period_end = jingrow.utils.data.get_last_day(today) + + po = create_payout_order_from_invoice_items( + self.test_invoice.items, + self.test_team.name, + period_start=period_start, + period_end=period_end, + mode_of_payment="Internal", + save=True, + ) + + self.assertEqual(len(po.items), 1) + self.assertEqual(po.items[0].invoice, self.test_invoice.name) + self.assertEqual(po.items[0].total_amount, 20.0) + + self.assertEqual(po.items[0].net_amount, 20.0) + self.assertEqual(po.items[0].currency, "USD") + + self.assertEqual(po.net_total_cny, 0) + self.assertEqual(po.net_total_usd, 20.0) + + def create_test_cny_invoice(self): + self.test_team = create_test_team() + + self.test_invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.test_team.name, + transaction_amount=1000, + transaction_fee=200, + amount_paid=1000, + status="Paid", + ).insert() + + # create test marketplace app + test_app = create_test_app("test_app") + test_mp_app = create_test_marketplace_app(test_app.name, self.test_team.name) + + self.test_invoice.append( + "items", + { + "document_type": "Marketplace App", + "document_name": test_mp_app.name, + "rate": 20, + "plan": "CNY 100", + "quantity": 2, + }, + ) + + self.test_invoice.save() + self.test_invoice.submit() + + def create_test_usd_invoice(self): + self.test_team = create_test_team(country="United States") + + self.test_invoice = jingrow.get_pg( + pagetype="Invoice", + team=self.test_team.name, + transaction_amount=1800, + transaction_fee=1260, + amount_paid=25, + status="Paid", + exchange_rate=70, + ).insert() + + # create test marketplace app + test_app = create_test_app("test_app") + test_mp_app = create_test_marketplace_app(test_app.name, self.test_team.name) + + self.test_invoice.append( + "items", + { + "document_type": "Marketplace App", + "document_name": test_mp_app.name, + "rate": 10, + "plan": "USD 25", + "quantity": 2, + }, + ) + + self.test_invoice.save() + self.test_invoice.submit() + + def test_create_marketplace_monthly_payout_order(self): + self.create_test_usd_invoice() + + # No payout order before running the job + self.assertFalse(jingrow.db.exists("Payout Order", {"team": self.test_team.name})) + + # Run the monthly job + create_marketplace_payout_orders_monthly() + + # The Payout Order should have been created + self.assertTrue(jingrow.db.exists("Payout Order", {"team": self.test_team.name})) + + po = jingrow.get_pg("Payout Order", {"team": self.test_team.name}) + self.assertEqual(len(po.items), 1) + + # The invoice item must be marked as paid out + marked_completed = jingrow.db.get_value( + "Invoice Item", po.items[0].invoice_item, "has_marketplace_payout_completed" + ) + self.assertTrue(marked_completed) + + # Re-run should not create a new PO + # Since all items are already accounted for + create_marketplace_payout_orders_monthly() + po_count = jingrow.db.count("Payout Order", {"team": self.test_team.name}) + self.assertEqual(po_count, 1) + + def test_does_not_create_duplicate_monthly_payout_order(self): + self.create_test_usd_invoice() + + # Create a PO for this period + today = jingrow.utils.today() + period_start = jingrow.utils.data.get_first_day(today) + period_end = jingrow.utils.data.get_last_day(today) + + # No POs initially + num_payout_orders = jingrow.db.count("Payout Order", {"team": self.test_team.name}) + self.assertEqual(num_payout_orders, 0) + + po = create_payout_order_from_invoice_items( + [], self.test_team.name, period_start=period_start, period_end=period_end + ) + + create_marketplace_payout_orders_monthly() + + num_payout_orders = jingrow.db.count("Payout Order", {"team": self.test_team.name}) + self.assertEqual(num_payout_orders, 1) + + # The original PO must now contain the invoice item + po.reload() + self.assertEqual(len(po.items), 1) + + # The item should be the one in the invoice + # The invoice item must be marked as paid out + marked_completed = jingrow.db.get_value( + "Invoice Item", po.items[0].invoice_item, "has_marketplace_payout_completed" + ) + self.assertTrue(marked_completed) diff --git a/jcloud/jcloud/pagetype/payout_order_item/__init__.py b/jcloud/jcloud/pagetype/payout_order_item/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/payout_order_item/payout_order_item.json b/jcloud/jcloud/pagetype/payout_order_item/payout_order_item.json new file mode 100644 index 0000000..eb6e543 --- /dev/null +++ b/jcloud/jcloud/pagetype/payout_order_item/payout_order_item.json @@ -0,0 +1,143 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2022-07-05 23:16:31.202419", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "invoice", + "document_type", + "document_name", + "plan", + "quantity", + "rate", + "column_break_5", + "total_amount", + "tax", + "gateway_fee", + "commission", + "section_break_10", + "net_amount", + "site", + "column_break_13", + "currency", + "invoice_item" + ], + "fields": [ + { + "fieldname": "invoice", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Invoice", + "options": "Invoice", + "reqd": 1 + }, + { + "fieldname": "document_type", + "fieldtype": "Link", + "label": "Document Type", + "options": "PageType", + "reqd": 1 + }, + { + "fieldname": "document_name", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Document Name", + "options": "document_type", + "reqd": 1 + }, + { + "fieldname": "total_amount", + "fieldtype": "Currency", + "label": "Total Amount", + "options": "currency" + }, + { + "fieldname": "tax", + "fieldtype": "Currency", + "label": "Tax", + "options": "currency" + }, + { + "fieldname": "gateway_fee", + "fieldtype": "Currency", + "label": "Gateway Fee", + "options": "currency" + }, + { + "fieldname": "net_amount", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Net Amount", + "options": "currency" + }, + { + "fieldname": "column_break_5", + "fieldtype": "Column Break" + }, + { + "fieldname": "commission", + "fieldtype": "Currency", + "label": "Commission", + "options": "currency" + }, + { + "fieldname": "section_break_10", + "fieldtype": "Section Break" + }, + { + "fieldname": "plan", + "fieldtype": "Data", + "label": "Plan" + }, + { + "fieldname": "rate", + "fieldtype": "Currency", + "label": "Rate", + "options": "currency" + }, + { + "fieldname": "currency", + "fieldtype": "Select", + "label": "Currency", + "options": "USD\nCNY" + }, + { + "fieldname": "column_break_13", + "fieldtype": "Column Break" + }, + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Site", + "options": "Site" + }, + { + "fieldname": "quantity", + "fieldtype": "Float", + "label": "quantity" + }, + { + "fieldname": "invoice_item", + "fieldtype": "Link", + "label": "Invoice Item", + "options": "Invoice Item" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-07-05 10:28:46.559419", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Payout Order Item", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/payout_order_item/payout_order_item.py b/jcloud/jcloud/pagetype/payout_order_item/payout_order_item.py new file mode 100644 index 0000000..9f91114 --- /dev/null +++ b/jcloud/jcloud/pagetype/payout_order_item/payout_order_item.py @@ -0,0 +1,37 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class PayoutOrderItem(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + commission: DF.Currency + currency: DF.Literal["USD", "CNY"] + document_name: DF.DynamicLink + document_type: DF.Link + gateway_fee: DF.Currency + invoice: DF.Link + invoice_item: DF.Link | None + name: DF.Int | None + net_amount: DF.Currency + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + plan: DF.Data | None + quantity: DF.Float + rate: DF.Currency + site: DF.Link | None + tax: DF.Currency + total_amount: DF.Currency + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/physical_backup_group/__init__.py b/jcloud/jcloud/pagetype/physical_backup_group/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/physical_backup_group/physical_backup_group.js b/jcloud/jcloud/pagetype/physical_backup_group/physical_backup_group.js new file mode 100644 index 0000000..0d35a3f --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_group/physical_backup_group.js @@ -0,0 +1,37 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Physical Backup Group', { + refresh(frm) { + if (frm.is_new()) { + return; + } + + [ + [__('Sync'), 'sync', false], + [__('Start / Resume'), 'trigger_next_backup', false], + [__('Set DB Sizes'), 'set_db_sizes', true], + [__('Retry Failed Backups'), 'retry_failed_backups', true], + [__('Delete Backups'), 'delete_backups', true], + [__('Activate All Sites'), 'activate_all_sites', true], + [__('Create Duplicate Group'), 'create_duplicate_group', true], + ].forEach(([label, method, grouped]) => { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm + .call(method, { + freeze: true, + freeze_message: __('Please wait...'), + }) + .then(() => frm.refresh()), + ); + }, + grouped ? __('Actions') : null, + ); + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/physical_backup_group/physical_backup_group.json b/jcloud/jcloud/pagetype/physical_backup_group/physical_backup_group.json new file mode 100644 index 0000000..eb3779a --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_group/physical_backup_group.json @@ -0,0 +1,97 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "prompt", + "creation": "2025-02-18 10:25:00.790197", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "no_of_sites", + "column_break_bbil", + "successful_backups", + "column_break_vwuc", + "available_backups", + "column_break_ikwc", + "unavailable_backups", + "section_break_bvmf", + "site_backups" + ], + "fields": [ + { + "fieldname": "no_of_sites", + "fieldtype": "Int", + "in_list_view": 1, + "label": "No of Sites", + "read_only": 1 + }, + { + "fieldname": "column_break_bbil", + "fieldtype": "Column Break" + }, + { + "fieldname": "successful_backups", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Successful Backups", + "read_only": 1 + }, + { + "fieldname": "column_break_vwuc", + "fieldtype": "Column Break" + }, + { + "fieldname": "available_backups", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Available Backups", + "read_only": 1 + }, + { + "fieldname": "column_break_ikwc", + "fieldtype": "Column Break" + }, + { + "fieldname": "unavailable_backups", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Unavailable Backups", + "read_only": 1 + }, + { + "fieldname": "section_break_bvmf", + "fieldtype": "Section Break" + }, + { + "fieldname": "site_backups", + "fieldtype": "Table", + "label": "Site Backups", + "options": "Physical Backup Group Site", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-02-18 15:14:19.059871", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Physical Backup Group", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/physical_backup_group/physical_backup_group.py b/jcloud/jcloud/pagetype/physical_backup_group/physical_backup_group.py new file mode 100644 index 0000000..c31ebdf --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_group/physical_backup_group.py @@ -0,0 +1,127 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + +from jcloud.agent import Agent + + +class PhysicalBackupGroup(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.physical_backup_group_site.physical_backup_group_site import ( + PhysicalBackupGroupSite, + ) + + available_backups: DF.Int + no_of_sites: DF.Int + site_backups: DF.Table[PhysicalBackupGroupSite] + successful_backups: DF.Int + unavailable_backups: DF.Int + # end: auto-generated types + + @property + def next_site_backup(self) -> PhysicalBackupGroupSite | None: + # Fetch the last one before pending + for site in self.site_backups: + if site.status == "Pending": + return site + return None + + @property + def current_site_backup(self) -> PhysicalBackupGroupSite | None: + # Fetch the last one before pending + for site in reversed(self.site_backups): + if site.status != "Pending": + return site + return None + + @jingrow.whitelist() + def sync(self): + self.no_of_sites = len(self.site_backups) + # Check site backup's status + for site in self.site_backups: + site.sync() + self.successful_backups = len([site for site in self.site_backups if site.status == "Success"]) + self.available_backups = len([site for site in self.site_backups if site.backup_available]) + self.unavailable_backups = len( + [site for site in self.site_backups if (not site.backup_available and site.status == "Success")] + ) + self.save(ignore_permissions=True) + + @jingrow.whitelist() + def set_db_sizes(self): + for site in self.site_backups: + site.set_db_size() + + @jingrow.whitelist() + def trigger_next_backup(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_trigger_next_backup", queue="default", at_front=True) + jingrow.msgprint("Triggered next backup") + + def _trigger_next_backup(self): + current_site_backup = self.current_site_backup + if current_site_backup and current_site_backup.status == "Running": + return + + next_site_backup = self.next_site_backup + if not next_site_backup: + jingrow.msgprint("No more sites to backup") + return + next_site_backup.status = "Running" + next_site_backup.save(ignore_permissions=True) + jingrow.enqueue_pg( + "Physical Backup Group Site", + next_site_backup.name, + "physical_backup", + queue="default", + enqueue_after_commit=True, + ) + + @jingrow.whitelist() + def retry_failed_backups(self): + for site in self.site_backups: + if site.status == "Failure": + site.backup = None + site.backup_available = False + site.status = "Pending" + site.save(ignore_permissions=True) + + @jingrow.whitelist() + def delete_backups(self): + for site in self.site_backups: + site.delete_backup() + + @jingrow.whitelist() + def activate_all_sites(self): + for site_backup in self.site_backups: + site = jingrow.get_pg("Site", site_backup.site) + agent = Agent(site.server) + agent.activate_site(site) + + @jingrow.whitelist() + def create_duplicate_group(self): + suffix = 2 + name = self.name + "-" + str(suffix) + while jingrow.db.exists("Physical Backup Group", name): + suffix += 1 + name = self.name + "-" + str(suffix) + duplicate_group = jingrow.get_pg( + { + "pagetype": "Physical Backup Group", + "name": name, + "site_backups": [ + {"site": site_backup.site, "status": "Pending"} for site_backup in self.site_backups + ], + } + ).insert() + jingrow.msgprint("Created duplicate group - " + duplicate_group.name) diff --git a/jcloud/jcloud/pagetype/physical_backup_group/test_physical_backup_group.py b/jcloud/jcloud/pagetype/physical_backup_group/test_physical_backup_group.py new file mode 100644 index 0000000..15119bd --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_group/test_physical_backup_group.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestPhysicalBackupGroup(UnitTestCase): + """ + Unit tests for PhysicalBackupGroup. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestPhysicalBackupGroup(IntegrationTestCase): + """ + Integration tests for PhysicalBackupGroup. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/physical_backup_group_site/__init__.py b/jcloud/jcloud/pagetype/physical_backup_group_site/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/physical_backup_group_site/physical_backup_group_site.json b/jcloud/jcloud/pagetype/physical_backup_group_site/physical_backup_group_site.json new file mode 100644 index 0000000..405e431 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_group_site/physical_backup_group_site.json @@ -0,0 +1,74 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-02-18 10:31:17.504134", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "db_size", + "status", + "backup_available", + "backup", + "duration_seconds" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Site", + "options": "Site", + "reqd": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure", + "reqd": 1 + }, + { + "fieldname": "backup", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Backup", + "options": "Site Backup" + }, + { + "default": "0", + "fieldname": "backup_available", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Backup Available", + "read_only": 1 + }, + { + "fieldname": "db_size", + "fieldtype": "Int", + "in_list_view": 1, + "label": "DB Size (MB)", + "read_only": 1 + }, + { + "fieldname": "duration_seconds", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Duration (seconds)" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-02-18 13:39:44.803273", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Physical Backup Group Site", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/physical_backup_group_site/physical_backup_group_site.py b/jcloud/jcloud/pagetype/physical_backup_group_site/physical_backup_group_site.py new file mode 100644 index 0000000..b8b3814 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_group_site/physical_backup_group_site.py @@ -0,0 +1,115 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import time + +import jingrow +from jingrow.exceptions import DoesNotExistError +from jingrow.model.document import Document + +from jcloud.agent import Agent + + +class PhysicalBackupGroupSite(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + backup: DF.Link | None + backup_available: DF.Check + db_size: DF.Int + duration_seconds: DF.Int + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + site: DF.Link + status: DF.Literal["Pending", "Running", "Success", "Failure"] + # end: auto-generated types + + def set_db_size(self): + if self.db_size: + return + pg = jingrow.get_pg("Site", self.site) + self.db_size = pg.current_usage["database"] + self.save() + + def sync(self): + if not self.backup: + return + try: + backup = jingrow.get_pg("Site Backup", self.backup) + if backup.database_snapshot: + # sync status of snapshot + jingrow.get_pg("Virtual Disk Snapshot", backup.database_snapshot).sync() + backup.reload() + if backup.status == "Pending": + self.status = "Running" + else: + self.status = backup.status + self.backup_available = backup.files_availability == "Available" + except DoesNotExistError: + self.backup = None + self.backup_available = False + self.save() + + def physical_backup(self): + start_time = time.time() + site = jingrow.get_pg("Site", self.site) + agent = Agent(site.server) + try: + deactivate_job = agent.deactivate_site(site) + deactivate_job_status = deactivate_job.status + while True: + deactivate_job_status = jingrow.get_value("Agent Job", deactivate_job.name, "status") + jingrow.db.commit() + if deactivate_job_status in ("Success", "Failure", "Delivery Failure"): + break + time.sleep(1) + + if deactivate_job_status != "Success": + self.status = "Failure" + self.save() + return + + # backup site + backup = site.physical_backup() + self.backup = backup.name + self.save() + + backup_status = backup.status + while True: + backup_status = jingrow.get_value("Site Backup", self.backup, "status") + jingrow.db.commit() + if backup_status in ("Success", "Failure"): + break + time.sleep(5) + + if backup_status == "Success": + self.status = "Success" + else: + self.status = "Failure" + duration = time.time() - start_time + self.duration_seconds = int(duration) + self.save() + except Exception: + jingrow.log_error(title="Error while bulk physical backup") + finally: + agent.activate_site(site) + + def delete_backup(self): + if not self.backup: + return + database_snapshot = jingrow.get_value("Site Backup", self.backup, "database_snapshot") + if database_snapshot: + jingrow.get_pg("Virtual Disk Snapshot", database_snapshot).delete_snapshot() + + # def on_update(self): + # if self.has_value_changed("status") and self.status in ("Success", "Failure"): + # # trigger next backup + # jingrow.enqueue_pg("Physical Backup Group", self.parent, "_trigger_next_backup", queue="default") diff --git a/jcloud/jcloud/pagetype/physical_backup_restoration/__init__.py b/jcloud/jcloud/pagetype/physical_backup_restoration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/physical_backup_restoration/physical_backup_restoration.js b/jcloud/jcloud/pagetype/physical_backup_restoration/physical_backup_restoration.js new file mode 100644 index 0000000..c3454ed --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_restoration/physical_backup_restoration.js @@ -0,0 +1,36 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Physical Backup Restoration', { + refresh(frm) { + if (frm.is_new()) { + return; + } + + [ + [__('Start'), 'execute', frm.pg.status === 'Pending', false], + [__('Force Continue'), 'force_continue', true], + [__('Cleanup'), 'cleanup', frm.pg.status === 'Failure', true], + [__('Force Fail'), 'force_fail', frm.pg.status === 'Running', false], + ].forEach(([label, method, condition, grouped]) => { + if (condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm + .call(method, { + freeze: true, + freeze_message: __('Please wait...'), + }) + .then(() => frm.refresh()), + ); + }, + grouped ? __('Actions') : null, + ); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/physical_backup_restoration/physical_backup_restoration.json b/jcloud/jcloud/pagetype/physical_backup_restoration/physical_backup_restoration.json new file mode 100644 index 0000000..f23a0f6 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_restoration/physical_backup_restoration.json @@ -0,0 +1,220 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-01-10 13:02:39.393157", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "site", + "status", + "site_backup", + "disk_snapshot", + "column_break_zind", + "job", + "volume", + "device", + "mount_point", + "section_break_pqgo", + "source_database", + "column_break_kaja", + "destination_database", + "destination_server", + "section_break_gthb", + "restore_specific_tables", + "tables_to_restore", + "section_break_swxv", + "start", + "column_break_xqdd", + "end", + "column_break_qosz", + "duration", + "section_break_aqam", + "steps", + "section_break_weie", + "physical_restoration_test" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Site", + "options": "Site", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_zind", + "fieldtype": "Column Break" + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "disk_snapshot", + "fieldtype": "Link", + "label": "Disk Snapshot", + "options": "Virtual Disk Snapshot", + "read_only": 1 + }, + { + "fieldname": "site_backup", + "fieldtype": "Link", + "label": "Site Backup", + "options": "Site Backup", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "section_break_pqgo", + "fieldtype": "Section Break" + }, + { + "fieldname": "source_database", + "fieldtype": "Data", + "label": "Source Database", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_kaja", + "fieldtype": "Column Break" + }, + { + "fieldname": "destination_database", + "fieldtype": "Data", + "label": " Destination Database", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "section_break_aqam", + "fieldtype": "Section Break" + }, + { + "fieldname": "steps", + "fieldtype": "Table", + "label": "Steps", + "options": "Physical Backup Restoration Step" + }, + { + "fieldname": "destination_server", + "fieldtype": "Link", + "label": "Destination Server", + "options": "Database Server", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "volume", + "fieldtype": "Data", + "label": "Volume", + "read_only": 1 + }, + { + "fieldname": "device", + "fieldtype": "Data", + "label": "Device", + "read_only": 1 + }, + { + "fieldname": "job", + "fieldtype": "Link", + "label": "Job", + "options": "Agent Job", + "read_only": 1 + }, + { + "fieldname": "section_break_swxv", + "fieldtype": "Section Break" + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start", + "read_only": 1 + }, + { + "fieldname": "column_break_xqdd", + "fieldtype": "Column Break" + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End", + "read_only": 1 + }, + { + "fieldname": "column_break_qosz", + "fieldtype": "Column Break" + }, + { + "fieldname": "duration", + "fieldtype": "Duration", + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "mount_point", + "fieldtype": "Data", + "label": "Mount Point", + "read_only": 1 + }, + { + "fieldname": "section_break_gthb", + "fieldtype": "Section Break" + }, + { + "depends_on": "eval: pg.restore_specific_tables", + "fieldname": "tables_to_restore", + "fieldtype": "JSON", + "label": "Tables To Restore" + }, + { + "default": "0", + "fieldname": "restore_specific_tables", + "fieldtype": "Check", + "label": "Restore Specific Tables" + }, + { + "fieldname": "section_break_weie", + "fieldtype": "Section Break" + }, + { + "fieldname": "physical_restoration_test", + "fieldtype": "Data", + "label": "Physical Restoration Test" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-02-18 17:10:29.624598", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Physical Backup Restoration", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/physical_backup_restoration/physical_backup_restoration.py b/jcloud/jcloud/pagetype/physical_backup_restoration/physical_backup_restoration.py new file mode 100644 index 0000000..c15112d --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_restoration/physical_backup_restoration.py @@ -0,0 +1,737 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import contextlib +import json +import os +import time +from enum import Enum +from typing import TYPE_CHECKING, Callable + +import jingrow +from jingrow.model.document import Document + +from jcloud.agent import Agent +from jcloud.jcloud.pagetype.ansible_console.ansible_console import AnsibleAdHoc +from jcloud.jcloud.pagetype.physical_restoration_test.physical_restoration_test import trigger_next_restoration + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.physical_backup_restoration_step.physical_backup_restoration_step import ( + PhysicalBackupRestorationStep, + ) + from jcloud.jcloud.pagetype.virtual_disk_snapshot.virtual_disk_snapshot import VirtualDiskSnapshot + from jcloud.jcloud.pagetype.virtual_machine.virtual_machine import VirtualMachine + +StepStatus = Enum("StepStatus", ["Pending", "Running", "Skipped", "Success", "Failure"]) + + +class PhysicalBackupRestoration(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.physical_backup_restoration_step.physical_backup_restoration_step import ( + PhysicalBackupRestorationStep, + ) + + destination_database: DF.Data + destination_server: DF.Link + device: DF.Data | None + disk_snapshot: DF.Link | None + duration: DF.Duration | None + end: DF.Datetime | None + job: DF.Link | None + mount_point: DF.Data | None + physical_restoration_test: DF.Data | None + restore_specific_tables: DF.Check + site: DF.Link + site_backup: DF.Link + source_database: DF.Data + start: DF.Datetime | None + status: DF.Literal["Pending", "Running", "Success", "Failure"] + steps: DF.Table[PhysicalBackupRestorationStep] + tables_to_restore: DF.JSON | None + volume: DF.Data | None + # end: auto-generated types + + @property + def virtual_machine(self) -> VirtualMachine: + """Get virtual machine of destination server.""" + return jingrow.get_pg( + "Virtual Machine", jingrow.get_value("Database Server", self.destination_server, "virtual_machine") + ) + + @property + def migration_steps(self): + Wait = True + NoWait = False + SyncStep = False + AsyncStep = True + GeneralStep = False + CleanupStep = True + methods = [ + (self.wait_for_pending_snapshot_to_be_completed, Wait, SyncStep, GeneralStep), + (self.create_volume_from_snapshot, NoWait, SyncStep, GeneralStep), + (self.wait_for_volume_to_be_available, Wait, SyncStep, GeneralStep), + (self.attach_volume_to_instance, NoWait, SyncStep, GeneralStep), + (self.create_mount_point, NoWait, SyncStep, GeneralStep), + (self.mount_volume_to_instance, NoWait, SyncStep, GeneralStep), + (self.change_permission_of_backup_directory, NoWait, SyncStep, GeneralStep), + (self.change_permission_of_database_directory, NoWait, SyncStep, GeneralStep), + (self.restore_database, Wait, AsyncStep, GeneralStep), + (self.rollback_permission_of_database_directory, NoWait, SyncStep, CleanupStep), + (self.unmount_volume_from_instance, NoWait, SyncStep, CleanupStep), + (self.delete_mount_point, NoWait, SyncStep, CleanupStep), + (self.detach_volume_from_instance, NoWait, SyncStep, CleanupStep), + (self.wait_for_volume_to_be_detached, Wait, SyncStep, CleanupStep), + (self.delete_volume, NoWait, SyncStep, CleanupStep), + ] + + steps = [] + for method, wait_for_completion, is_async, is_cleanup_step in methods: + steps.append( + { + "step": method.__pg__, + "method": method.__name__, + "wait_for_completion": wait_for_completion, + "is_async": is_async, + "is_cleanup_step": is_cleanup_step, + } + ) + return steps + + def before_insert(self): + self.validate_aws_only() + self.set_disk_snapshot() + self.validate_snapshot_region() + self.validate_snapshot_status() + self.cleanup_restorable_tables() + + def after_insert(self): + self.set_mount_point() + self.add_steps() + self.save() + + def on_update(self): + if self.has_value_changed("status") and self.status in ["Success", "Failure"]: + from jcloud.jcloud.pagetype.site_update.site_update import ( + process_physical_backup_restoration_status_update, + ) + + process_physical_backup_restoration_status_update(self.name) + + if self.physical_restoration_test: + trigger_next_restoration(self.physical_restoration_test) + + def validate_aws_only(self): + server_provider = jingrow.db.get_value("Database Server", self.destination_server, "provider") + if server_provider != "AWS EC2": + jingrow.throw("Only AWS provider is supported currently.") + + def set_disk_snapshot(self): + if not self.disk_snapshot: + self.disk_snapshot = jingrow.get_value("Site Backup", self.site_backup, "database_snapshot") + if not self.disk_snapshot: + jingrow.throw("Disk Snapshot is not available in site backup") + + def validate_snapshot_region(self): + snapshot_region = jingrow.db.get_value("Virtual Disk Snapshot", self.disk_snapshot, "region") + if snapshot_region != self.virtual_machine.region: + jingrow.throw("Snapshot and server should be in same region.") + + def validate_snapshot_status(self): + snapshot_status = jingrow.db.get_value("Virtual Disk Snapshot", self.disk_snapshot, "status") + if snapshot_status not in ("Pending", "Completed"): + jingrow.throw("Snapshot status should be Pending or Completed.") + + def cleanup_restorable_tables(self): + if not self.restore_specific_tables: + self.tables_to_restore = "[]" + return + + tables_to_restore = [] + with contextlib.suppress(Exception): + tables_to_restore = json.loads(self.tables_to_restore) + + # If restore_specific_tables is checked, raise error if tables_to_restore is empty + if not tables_to_restore: + jingrow.throw("You must provide at least one table to restore.") + + def set_mount_point(self): + self.mount_point = f"/mnt/{self.name}" + + def wait_for_pending_snapshot_to_be_completed(self) -> StepStatus: + """Wait for pending snapshot to be completed""" + snapshot: VirtualDiskSnapshot = jingrow.get_pg("Virtual Disk Snapshot", self.disk_snapshot) + snapshot.sync() + + if snapshot.status == "Completed": + return StepStatus.Success + if snapshot.status == "Pending": + return StepStatus.Pending + return StepStatus.Failure + + def create_volume_from_snapshot(self) -> StepStatus: + """Create volume from snapshot""" + snapshot: VirtualDiskSnapshot = jingrow.get_pg("Virtual Disk Snapshot", self.disk_snapshot) + self.volume = snapshot.create_volume( + availability_zone=self.virtual_machine.availability_zone, throughput=600, iops=3000 + ) + return StepStatus.Success + + def wait_for_volume_to_be_available(self) -> StepStatus: + """Wait for volume to be available""" + status = self.virtual_machine.get_state_of_volume(self.volume) + # https://docs.aws.amazon.com/ebs/latest/userguide/ebs-describing-volumes.html + if status == "available": + return StepStatus.Success + if status == "creating": + return StepStatus.Pending + return StepStatus.Failure + + def attach_volume_to_instance(self) -> StepStatus: + """Attach volume to instance""" + self.device = self.virtual_machine.attach_volume(self.volume) + return StepStatus.Success + + def create_mount_point(self) -> StepStatus: + """Create mount point""" + result = self.ansible_run(f"mkdir -p {self.mount_point}") + if result["status"] == "Success": + return StepStatus.Success + return StepStatus.Failure + + def mount_volume_to_instance(self) -> StepStatus: # noqa: C901 + """Mount volume to instance""" + + """ + > Find out the disk name + + If the disk name is /dev/sdg, it might be renamed to /dev/xvdg in the instance. + + Next, If the volume was created from a snapshot of root volume, the volume will have multiple partitions. + + > lsblk --json -o name,fstype,type,label,serial,size -b + + > Dummy output + + { + "blockdevices":[ + { "name":"loop0", "fstype":null, "type": "loop", "label": null, "size": 16543383 }, + { "name":"loop1", "fstype":null, "type": "loop", "label": null, "size": 16543383 }, + { "name":"loop2", "fstype":null, "type": "loop", "label": null, "size": 16543383 }, + { "name":"loop3", "fstype":null, "type": "loop", "label": null, "size": 16543383 }, + { "name":"loop4", "fstype":null, "type": "loop", "label": null, "size": 16543383 }, + { + "name":"xvda","fstype":null, "type": "disk", "label": null, "size": 4294966784 + "children":[ + { + "name":"xvda1", + "fstype":"ext4", + "type":"part", + "label":"cloudimg-rootfs", + "size": 4294966784 + }, + { + "name":"xvda14", + "fstype":null, + "type":"part", + "label":null, + "size": 123345 + }, + { + "name":"xvda15", + "fstype":"vfat", + "type":"part", + "label":"UEFI", + "size": 124553 + } + ] + }, + {"name":"nvme0n1", "fstype":null, "type":"disk", "label":null, "serial":"vol0784b4423604486ea", "size": 4294966784 + "children": [ + {"name":"nvme0n1p1", "fstype":"ext4", "type":"part", "label":"cloudimg-rootfs", "serial":null, "size": 4123906784}, + {"name":"nvme0n1p14", "fstype":null, "type":"part", "label":null, "serial":null "size": 234232}, + {"name":"nvme0n1p15", "fstype":"vfat", "type":"part", "label":"UEFI", "serial":null, "size": 124553} + ] + } + ] + } + + """ + result = self.ansible_run("lsblk --json -o name,fstype,type,label,serial,size -b") + if result["status"] != "Success": + return StepStatus.Failure + + devices_info_str: str = result["output"] + devices_info = json.loads(devices_info_str)["blockdevices"] + + disk_name = self.device.split("/")[-1] # /dev/sdf -> sdf + + # If disk name is sdf, it might be possible mounted as xvdf + # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html#device-name-limits + possible_disks = [disk_name, "xvd{}".format(disk_name.lstrip("sd")[-1])] + disk_serial = self.volume.replace("-", "").lower() + disk_partition_to_mount = None + + for device_info in devices_info: + if device_info["type"] not in ["disk", "part"]: + continue + + # Check for nvme disks + is_disk_found = ( + device_info["name"].startswith("nvme") and device_info.get("serial") == disk_serial + ) + # check for normal disks + if not is_disk_found: + for possible_disk in possible_disks: + if device_info["name"] == possible_disk: + is_disk_found = True + break + + # If disk is not found, then continue to next disk + if not is_disk_found: + continue + + # If the volume was created from a snapshot of data volume + # the volume will have only one partition. + if device_info["type"] == "part": + disk_partition_to_mount = "/dev/{}".format(device_info["name"]) + break + + if device_info["type"] == "disk": + children = device_info.get("children", []) + if len(children) == 0: + # Disk doesn't have any partitions, mount the disk directly + disk_partition_to_mount = "/dev/{}".format(device_info["name"]) + else: + # Disk has multiple partitions, so find the correct partition + largest_partition_size = 1073741824 # 1GB | Disk partition should be larger than 1GB + largest_partition = None + # try to find the partition with label cloudimg-rootfs or old-rootfs + for child in children: + if child["size"] > largest_partition_size: + largest_partition_size = child["size"] + largest_partition = child["name"] + + if child["label"] == "cloudimg-rootfs" or child["label"] == "old-rootfs": + disk_partition_to_mount = "/dev/{}".format(child["name"]) + break + + # If the partitions are not labeled, try to find largest partition + if not disk_partition_to_mount and largest_partition is not None: + disk_partition_to_mount = f"/dev/{largest_partition}" + break + + if disk_partition_to_mount: + break + + if not disk_partition_to_mount: + self.log_error( + title="Not able to find disk partition to mount", + message=f"Disk name: {disk_name}, Possible disks: {possible_disks} or with serial {disk_serial}", + ) + return StepStatus.Failure + + mount_response = self.ansible_run(f"mount {disk_partition_to_mount} {self.mount_point}") + if mount_response["status"] != "Success": + return StepStatus.Failure + return StepStatus.Success + + def change_permission_of_backup_directory(self) -> StepStatus: + """Change permission of backup files""" + base_path = os.path.join(self.mount_point, "var/lib/mysql") + result = self.ansible_run(f"chmod 777 {base_path}") + if result["status"] == "Success": + db_path = os.path.join(self.mount_point, "var/lib/mysql", self.source_database) + result = self.ansible_run(f"chmod -R 777 {db_path}") + if result["status"] == "Success": + return StepStatus.Success + return StepStatus.Failure + + def change_permission_of_database_directory(self) -> StepStatus: + """Change permission of database directory""" + result = self.ansible_run(f"chmod 770 /var/lib/mysql/{self.destination_database}") + if result["status"] == "Success": + return StepStatus.Success + return StepStatus.Failure + + def restore_database(self) -> StepStatus: + """Restore database""" + if not self.job: + site = jingrow.get_pg("Site", self.site) + agent = Agent(self.destination_server, "Database Server") + self.job = agent.physical_restore_database(site, self) + return StepStatus.Pending + job_status = jingrow.db.get_value("Agent Job", self.job, "status") + if job_status in ["Undelivered", "Running", "Pending"]: + return StepStatus.Pending + if job_status == "Success": + return StepStatus.Success + return StepStatus.Failure + + def rollback_permission_of_database_directory(self) -> StepStatus: + """Rollback permission of database directory""" + + # Docs > https://mariadb.com/kb/en/specifying-permissions-for-schema-data-directories-and-tables/ + # Directory > 700 and File > 660 + + result = self.ansible_run( + f"chmod -R 660 /var/lib/mysql/{self.destination_database} && chmod 700 /var/lib/mysql/{self.destination_database} && chown -R mysql:mysql /var/lib/mysql/{self.destination_database}" + ) + if result["status"] == "Success": + return StepStatus.Success + return StepStatus.Failure + + def unmount_volume_from_instance(self) -> StepStatus: + """Unmount volume from instance""" + if self.get_step_status(self.mount_volume_to_instance) != StepStatus.Success.name: + return StepStatus.Success + response = self.ansible_run(f"umount {self.mount_point}") + if response["status"] != "Success": + return StepStatus.Failure + return StepStatus.Success + + def delete_mount_point(self) -> StepStatus: + """Delete mount point""" + if not self.mount_point or not self.mount_point.startswith("/mnt"): + jingrow.throw("Mount point is not valid.") + # check if mount point was created + if self.get_step_status(self.create_mount_point) != "Success": + return StepStatus.Success + response = self.ansible_run(f"rm -rf {self.mount_point}") + if response["status"] != "Success": + return StepStatus.Failure + return StepStatus.Success + + def detach_volume_from_instance(self) -> StepStatus: + """Detach volume from instance""" + # check if volume was attached + if not self.volume or self.get_step_status(self.attach_volume_to_instance) != "Success": + return StepStatus.Success + state = self.virtual_machine.get_state_of_volume(self.volume) + if state != "in-use": + return StepStatus.Success + self.virtual_machine.detach(self.volume) + return StepStatus.Success + + def wait_for_volume_to_be_detached(self) -> StepStatus: + """Wait for volume to be detached""" + if not self.volume: + return StepStatus.Success + state = self.virtual_machine.get_state_of_volume(self.volume) + if state in ["available", "deleting", "deleted"]: + return StepStatus.Success + if state == "error": + return StepStatus.Failure + return StepStatus.Pending + + def delete_volume(self) -> StepStatus: + """Delete volume""" + if ( + not self.volume + or self.get_step_status(self.create_volume_from_snapshot) != StepStatus.Success.name + ): + return StepStatus.Success + state = self.virtual_machine.get_state_of_volume(self.volume) + if state in ["deleting", "deleted"]: + return StepStatus.Success + self.virtual_machine.client().delete_volume(VolumeId=self.volume) + return StepStatus.Success + + def is_db_files_modified_during_failed_restoration(self): + if self.status != "Failure": + return False + # Check if Restore Database job has created + if not self.job: + return False + # Check if Restore Database job has failed + job_status = jingrow.db.get_value("Agent Job", self.job, "status") + if job_status == "Failure": + job_steps = jingrow.get_all( + "Agent Job Step", + filters={ + "agent_job": self.job, + }, + fields=["step_name", "status"], + order_by="creation asc", + ) + """ + [ + {'step_name': 'Validate Backup Files', 'status': 'Success'}, + {'step_name': 'Validate Connection to Target Database', 'status': 'Success'}, + {'step_name': 'Warmup MyISAM Files', 'status': 'Success'}, + {'step_name': 'Check and Fix MyISAM Table Files', 'status': 'Success'}, + {'step_name': 'Warmup InnoDB Files', 'status': 'Success'}, + {'step_name': 'Prepare Database for Restoration', 'status': 'Success'}, + {'step_name': 'Create Tables from Table Schema', 'status': 'Success'}, + {'step_name': 'Discard InnoDB Tablespaces', 'status': 'Success'}, + {'step_name': 'Copying InnoDB Table Files', 'status': 'Success'}, + {'step_name': 'Import InnoDB Tablespaces', 'status': 'Success'}, + {'step_name': 'Hold Write Lock on MyISAM Tables', 'status': 'Success'}, + {'step_name': 'Copying MyISAM Table Files', 'status': 'Success'}, + {'step_name': 'Unlock All Tables', 'status': 'Success'} + ] + """ + # Check on which step the job has failed + # Anything on after `Prepare Database for Restoration` is considered as full restoration required + first_failed_step = None + for step in job_steps: + if step["status"] == "Failure": + first_failed_step = step + break + if first_failed_step and first_failed_step["step_name"] in [ + "Create Tables from Table Schema", + "Discard InnoDB Tablespaces", + "Copying InnoDB Table Files", + "Import InnoDB Tablespaces", + "Hold Write Lock on MyISAM Tables", + "Copying MyISAM Table Files", + "Unlock All Tables", + ]: + return True + return False + + def get_step_status(self, step_method: Callable) -> str: + step = self.get_step_by_method(step_method.__name__) + return step.status if step else "Pending" + + def add_steps(self): + for step in self.migration_steps: + step.update({"status": "Pending"}) + self.append("steps", step) + + @jingrow.whitelist() + def execute(self): + # If restore_specific_tables was provided, but no tables are there to restore, then skip the restore + if self.restore_specific_tables: + try: + restorable_tables = json.loads(self.tables_to_restore) + except Exception: + restorable_tables = [] + if len(restorable_tables) == 0: + self.status = "Success" + for step in self.steps: + step.status = "Skipped" + self.save() + return + # Else, continue with the restoration + self.status = "Running" + self.start = jingrow.utils.now_datetime() + self.save() + self.next() + + def fail(self) -> None: + self.status = "Failure" + for step in self.steps: + if step.status == "Pending": + step.status = "Skipped" + self.end = jingrow.utils.now_datetime() + self.duration = jingrow.utils.cint((self.end - self.start).total_seconds()) + self.save() + self.cleanup() + + def finish(self) -> None: + self.status = "Success" + # If any step is failed, then mark the job as failed + for step in self.steps: + if step.status == "Failure": + self.status = "Failure" + self.end = jingrow.utils.now_datetime() + self.duration = jingrow.utils.cint((self.end - self.start).total_seconds()) + self.save() + + @jingrow.whitelist() + def next(self, ignore_version=False) -> None: + self.status = "Running" + self.save(ignore_version=ignore_version) + next_step = self.next_step + + if not next_step: + # We've executed everything + self.finish() + return + + jingrow.enqueue_pg( + self.pagetype, + self.name, + "execute_step", + step_name=next_step.name, + enqueue_after_commit=True, + at_front=True, + ) + + @jingrow.whitelist() + def cleanup(self): + is_cleanup_required = False + for step in self.steps: + # Mark the pending non-cleanup steps as skipped + if not step.is_cleanup_step and step.status == "Pending": + step.status = "Skipped" + + # Mark the cleanup steps with non-failure status as pending + if step.is_cleanup_step and step.status != "Failure": + step.status = "Pending" + is_cleanup_required = True + + if is_cleanup_required: + self.next() + + @jingrow.whitelist() + def force_continue(self) -> None: + first_failed_step: PhysicalBackupRestorationStep = None + # Mark all failed and skipped steps as pending + for step in self.steps: + if step.status in ("Failure", "Skipped"): + if not first_failed_step: + first_failed_step = step + step.status = "Pending" + + # If the job was failed in Restore Database step, then reset the job + if first_failed_step and first_failed_step.method == self.restore_database.__name__: + self.job = None + self.next() + + @jingrow.whitelist() + def force_fail(self) -> None: + # Mark all pending steps as failure + for step in self.steps: + if step.status == "Pending": + step.status = "Failure" + self.status = "Failure" + self.save() + + @property + def next_step(self) -> PhysicalBackupRestorationStep | None: + for step in self.steps: + if step.status == "Pending": + return step + return None + + @jingrow.whitelist() + def execute_step(self, step_name): + step = self.get_step(step_name) + + if not step.start: + step.start = jingrow.utils.now_datetime() + step.status = "Running" + ignore_version_while_saving = False + try: + result = getattr(self, step.method)() + step.status = result.name + if step.is_async and result == StepStatus.Pending: + self.save(ignore_version=True) + return + if step.wait_for_completion: + step.attempts = step.attempts + 1 + if result == StepStatus.Pending: + # Wait some time before the next run + ignore_version_while_saving = True + time.sleep(1) + except Exception: + step.status = "Failure" + step.traceback = jingrow.get_traceback(with_context=True) + + step.end = jingrow.utils.now_datetime() + step.duration = (step.end - step.start).total_seconds() + + if step.status == "Failure": + self.fail() + else: + self.next(ignore_version_while_saving) + + def get_step(self, step_name) -> PhysicalBackupRestorationStep | None: + for step in self.steps: + if step.name == step_name: + return step + return None + + def get_step_by_method(self, method_name) -> PhysicalBackupRestorationStep | None: + for step in self.steps: + if step.method == method_name: + return step + return None + + def ansible_run(self, command): + inventory = f"{self.virtual_machine.public_ip_address}," + result = AnsibleAdHoc(sources=inventory).run(command, self.name)[0] + self.add_command(command, result) + return result + + def add_command(self, command, result): + pretty_result = json.dumps(result, indent=2, sort_keys=True, default=str) + comment = f"
{command}
{pretty_result}
" + self.add_comment(text=comment) + + +def process_job_update(job): + if job.reference_pagetype != "Physical Backup Restoration": + return + + pg: PhysicalBackupRestoration = jingrow.get_pg("Physical Backup Restoration", job.reference_name) + if job.status in ["Success", "Failure", "Delivery Failure"]: + pg.next(ignore_version=True) + + +def get_physical_backup_restoration_steps(name: str) -> list[dict]: + """ + { + "title": "Step Name", + "status": "Success", + "output": "Output", + "stage": "Restore Backup" + } + """ + steps = jingrow.get_all( + "Physical Backup Restoration Step", + filters={"parent": name}, + fields=["step", "status", "name", "creation"], + order_by="idx asc", + ) + job_name = jingrow.db.get_value("Physical Backup Restoration", name, "job") + steps = [ + { + "title": step["step"], + "status": step["status"], + "output": "", + "stage": "Restore Backup", + "name": step["name"], + } + for step in steps + ] + job_steps = [] + if job_name: + job_steps = jingrow.get_all( + "Agent Job Step", + filters={"agent_job": job_name}, + fields=["output", "step_name", "status", "name"], + order_by="creation asc", + ) + if steps: + index_of_restore_database_step = None + for index, step in enumerate(steps): + if step["title"] == "Restore database": + index_of_restore_database_step = index + break + if index_of_restore_database_step is not None: + job_steps = [ + { + "title": step.get("step_name"), + "status": step.get("status"), + "output": step.get("output"), + "stage": "Restore Backup", + } + for step in job_steps + ] + steps = ( + steps[:index_of_restore_database_step] + + job_steps + + steps[index_of_restore_database_step + 1 :] + ) + return steps diff --git a/jcloud/jcloud/pagetype/physical_backup_restoration/test_physical_backup_restoration.py b/jcloud/jcloud/pagetype/physical_backup_restoration/test_physical_backup_restoration.py new file mode 100644 index 0000000..91abb71 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_restoration/test_physical_backup_restoration.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestPhysicalBackupRestoration(UnitTestCase): + """ + Unit tests for PhysicalBackupRestoration. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestPhysicalBackupRestoration(IntegrationTestCase): + """ + Integration tests for PhysicalBackupRestoration. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/physical_backup_restoration_step/__init__.py b/jcloud/jcloud/pagetype/physical_backup_restoration_step/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/physical_backup_restoration_step/physical_backup_restoration_step.json b/jcloud/jcloud/pagetype/physical_backup_restoration_step/physical_backup_restoration_step.json new file mode 100644 index 0000000..fcea290 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_restoration_step/physical_backup_restoration_step.json @@ -0,0 +1,114 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-01-10 13:15:34.497717", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "step", + "status", + "method", + "column_break_ahqu", + "start", + "end", + "duration", + "column_break_qtmf", + "wait_for_completion", + "is_async", + "attempts", + "is_cleanup_step", + "section_break_vyao", + "traceback" + ], + "fields": [ + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nRunning\nSkipped\nSuccess\nFailure", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "wait_for_completion", + "fieldtype": "Check", + "label": "Wait For Completion" + }, + { + "fieldname": "step", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Step", + "reqd": 1 + }, + { + "fieldname": "method", + "fieldtype": "Data", + "label": "Method", + "reqd": 1 + }, + { + "fieldname": "column_break_ahqu", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_qtmf", + "fieldtype": "Column Break" + }, + { + "fieldname": "start", + "fieldtype": "Datetime", + "label": "Start" + }, + { + "fieldname": "end", + "fieldtype": "Datetime", + "label": "End" + }, + { + "fieldname": "duration", + "fieldtype": "Duration", + "label": "Duration" + }, + { + "fieldname": "attempts", + "fieldtype": "Int", + "label": "Attempts" + }, + { + "fieldname": "section_break_vyao", + "fieldtype": "Section Break" + }, + { + "fieldname": "traceback", + "fieldtype": "Code", + "label": "Traceback" + }, + { + "default": "0", + "fieldname": "is_async", + "fieldtype": "Check", + "label": "Is Async" + }, + { + "default": "0", + "fieldname": "is_cleanup_step", + "fieldtype": "Check", + "label": "Is Cleanup Step" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-01-20 16:48:34.338658", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Physical Backup Restoration Step", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/physical_backup_restoration_step/physical_backup_restoration_step.py b/jcloud/jcloud/pagetype/physical_backup_restoration_step/physical_backup_restoration_step.py new file mode 100644 index 0000000..bd79d37 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_backup_restoration_step/physical_backup_restoration_step.py @@ -0,0 +1,35 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +# import jingrow +from jingrow.model.document import Document + + +class PhysicalBackupRestorationStep(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + attempts: DF.Int + duration: DF.Duration | None + end: DF.Datetime | None + is_async: DF.Check + is_cleanup_step: DF.Check + method: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + start: DF.Datetime | None + status: DF.Literal["Pending", "Running", "Skipped", "Success", "Failure"] + step: DF.Data + traceback: DF.Code | None + wait_for_completion: DF.Check + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/physical_restoration_test/__init__.py b/jcloud/jcloud/pagetype/physical_restoration_test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/physical_restoration_test/physical_restoration_test.js b/jcloud/jcloud/pagetype/physical_restoration_test/physical_restoration_test.js new file mode 100644 index 0000000..5105606 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_restoration_test/physical_restoration_test.js @@ -0,0 +1,27 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Physical Restoration Test', { + refresh(frm) { + if (frm.is_new()) { + return; + } + + [ + [__('Sync'), 'sync', false], + [__('Start / Resume'), 'start', true], + [__('Reset Failed Restorations'), 'reset_failed_restorations', true], + ].forEach(([label, method, grouped]) => { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => frm.call(method).then(() => frm.refresh()), + ); + }, + grouped ? __('Actions') : null, + ); + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/physical_restoration_test/physical_restoration_test.json b/jcloud/jcloud/pagetype/physical_restoration_test/physical_restoration_test.json new file mode 100644 index 0000000..7db14e0 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_restoration_test/physical_restoration_test.json @@ -0,0 +1,98 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-02-18 14:30:59.285081", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "backup_group", + "test_site", + "max_restorations", + "column_break_ctsy", + "destination_database", + "destination_server", + "completed", + "section_break_nipr", + "results" + ], + "fields": [ + { + "fieldname": "column_break_ctsy", + "fieldtype": "Column Break" + }, + { + "fieldname": "test_site", + "fieldtype": "Link", + "label": "Test Site", + "options": "Site", + "reqd": 1 + }, + { + "fieldname": "section_break_nipr", + "fieldtype": "Section Break" + }, + { + "fieldname": "results", + "fieldtype": "Table", + "label": "Results", + "options": "Physical Restoration Test Result" + }, + { + "fetch_from": "test_site.database_name", + "fieldname": "destination_database", + "fieldtype": "Data", + "label": " Destination Database" + }, + { + "fieldname": "destination_server", + "fieldtype": "Link", + "label": "Destination Server", + "options": "Database Server", + "read_only": 1 + }, + { + "fieldname": "max_restorations", + "fieldtype": "Int", + "label": "Max Restorations", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "completed", + "fieldtype": "Check", + "label": "Completed" + }, + { + "fieldname": "backup_group", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Backup Group", + "options": "Physical Backup Group", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-02-18 16:41:44.401442", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Physical Restoration Test", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/physical_restoration_test/physical_restoration_test.py b/jcloud/jcloud/pagetype/physical_restoration_test/physical_restoration_test.py new file mode 100644 index 0000000..3635c3b --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_restoration_test/physical_restoration_test.py @@ -0,0 +1,134 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import jingrow +from jingrow.model.document import Document + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.physical_backup_group.physical_backup_group import PhysicalBackupGroup + from jcloud.jcloud.pagetype.site_update.site_update import PhysicalBackupRestoration + + +class PhysicalRestorationTest(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.physical_restoration_test_result.physical_restoration_test_result import ( + PhysicalRestorationTestResult, + ) + + backup_group: DF.Link + completed: DF.Check + destination_database: DF.Data | None + destination_server: DF.Link | None + max_restorations: DF.Int + results: DF.Table[PhysicalRestorationTestResult] + test_site: DF.Link + # end: auto-generated types + + def validate(self): + if not self.destination_server: + self.destination_server = jingrow.get_value( + "Server", jingrow.get_value("Site", self.test_site, "server"), "database_server" + ) + if self.is_new(): + pass + + def after_insert(self): + backup_group: PhysicalBackupGroup = jingrow.get_pg("Physical Backup Group", self.backup_group) + # set max restorations + if not self.max_restorations or self.max_restorations > len(backup_group.site_backups): + self.max_restorations = len(backup_group.site_backups) + + # populate results table + records = backup_group.site_backups[: self.max_restorations] + for record in records: + self.append( + "results", + { + "site": record.site, + "db_size_mb": record.db_size, + "restore_record": self._create_restoration_record(record.backup).name, + "status": "Pending", + }, + ) + + self.save() + + def _create_restoration_record(self, site_backup: str) -> PhysicalBackupRestoration: + return jingrow.get_pg( + { + "pagetype": "Physical Backup Restoration", + "site": self.test_site, + "status": "Pending", + "site_backup": site_backup, + "source_database": jingrow.db.get_value("Site Backup", site_backup, "database_name"), + "destination_database": self.destination_database, + "destination_server": self.destination_server, + "restore_specific_tables": False, + "tables_to_restore": "[]", + "physical_restoration_test": self.name, + } + ).insert(ignore_permissions=True) + + @jingrow.whitelist() + def start(self): + self.sync() + record = None + # check if there is any running restoration + for result in self.results: + if result.status == "Running": + return + + for result in self.results: + if result.status == "Pending": + record = result + break + if record: + restore_record: PhysicalBackupRestoration = jingrow.get_pg( + "Physical Backup Restoration", record.restore_record + ) + restore_record.execute() + record.status = "Running" + record.save() + else: + self.completed = True + self.save() + jingrow.throw("No pending restoration found") + + @jingrow.whitelist() + def sync(self): + for result in self.results: + result.save() + + @jingrow.whitelist() + def reset_failed_restorations(self): + for result in self.results: + if result.status == "Failure": + result.status = "Pending" + # find the backup from the previous restoration + site_backup = jingrow.db.get_value( + "Physical Backup Restoration", result.restore_record, "site_backup" + ) + result.restore_record = self._create_restoration_record(site_backup).name + result.duration = None + result.save() + + +def trigger_next_restoration(record_id: str): + if not jingrow.db.exists("Physical Restoration Test", record_id): + return + record: PhysicalRestorationTest = jingrow.get_pg("Physical Restoration Test", record_id) + try: + record.start() + except Exception: + jingrow.log_error("Physical Restoration Test Exception") diff --git a/jcloud/jcloud/pagetype/physical_restoration_test/test_physical_restoration_test.py b/jcloud/jcloud/pagetype/physical_restoration_test/test_physical_restoration_test.py new file mode 100644 index 0000000..31f4a30 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_restoration_test/test_physical_restoration_test.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestPhysicalRestorationTest(UnitTestCase): + """ + Unit tests for PhysicalRestorationTest. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestPhysicalRestorationTest(IntegrationTestCase): + """ + Integration tests for PhysicalRestorationTest. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/physical_restoration_test_result/__init__.py b/jcloud/jcloud/pagetype/physical_restoration_test_result/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/physical_restoration_test_result/physical_restoration_test_result.json b/jcloud/jcloud/pagetype/physical_restoration_test_result/physical_restoration_test_result.json new file mode 100644 index 0000000..f0d6f25 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_restoration_test_result/physical_restoration_test_result.json @@ -0,0 +1,69 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-02-18 15:18:37.332620", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "duration", + "db_size_mb", + "site", + "restore_record" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Site", + "options": "Site", + "reqd": 1 + }, + { + "fieldname": "db_size_mb", + "fieldtype": "Int", + "in_list_view": 1, + "label": "DB Size (MB)", + "reqd": 1 + }, + { + "fieldname": "restore_record", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Restore Record", + "options": "Physical Backup Restoration", + "reqd": 1 + }, + { + "fetch_from": "restore_record.duration", + "fieldname": "duration", + "fieldtype": "Duration", + "in_list_view": 1, + "label": "Duration", + "read_only": 1 + }, + { + "fetch_from": "restore_record.status", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-02-18 16:59:59.685437", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Physical Restoration Test Result", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/physical_restoration_test_result/physical_restoration_test_result.py b/jcloud/jcloud/pagetype/physical_restoration_test_result/physical_restoration_test_result.py new file mode 100644 index 0000000..600a053 --- /dev/null +++ b/jcloud/jcloud/pagetype/physical_restoration_test_result/physical_restoration_test_result.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +# import jingrow +from jingrow.model.document import Document + + +class PhysicalRestorationTestResult(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + db_size_mb: DF.Int + duration: DF.Duration | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + restore_record: DF.Link + site: DF.Link + status: DF.Literal["Pending", "Running", "Success", "Failure"] + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/plan_change/__init__.py b/jcloud/jcloud/pagetype/plan_change/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/plan_change/plan_change.js b/jcloud/jcloud/pagetype/plan_change/plan_change.js new file mode 100644 index 0000000..383de75 --- /dev/null +++ b/jcloud/jcloud/pagetype/plan_change/plan_change.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Plan Change', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/plan_change/plan_change.json b/jcloud/jcloud/pagetype/plan_change/plan_change.json new file mode 100644 index 0000000..ec2d320 --- /dev/null +++ b/jcloud/jcloud/pagetype/plan_change/plan_change.json @@ -0,0 +1,110 @@ +{ + "actions": [], + "creation": "2022-09-22 05:40:33.194538", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "document_type", + "document_name", + "team", + "from_plan", + "to_plan", + "column_break_6", + "type", + "timestamp" + ], + "fields": [ + { + "fieldname": "document_type", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Document Type", + "options": "PageType", + "reqd": 1 + }, + { + "fieldname": "document_name", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Document Name", + "options": "document_type", + "reqd": 1 + }, + { + "fetch_from": "site.team", + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team" + }, + { + "fieldname": "from_plan", + "fieldtype": "Link", + "label": "From Plan", + "options": "Server Plan" + }, + { + "fieldname": "to_plan", + "fieldtype": "Link", + "in_list_view": 1, + "label": "To Plan", + "options": "Server Plan", + "reqd": 1 + }, + { + "fieldname": "column_break_6", + "fieldtype": "Column Break" + }, + { + "fieldname": "type", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Type", + "options": "\nInitial Plan\nUpgrade\nDowngrade" + }, + { + "default": "Now", + "fieldname": "timestamp", + "fieldtype": "Datetime", + "label": "Timestamp" + } + ], + "links": [], + "modified": "2024-02-19 14:03:40.310425", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Plan Change", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin" + }, + { + "create": 1, + "role": "Jcloud Member" + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "document_name", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/plan_change/plan_change.py b/jcloud/jcloud/pagetype/plan_change/plan_change.py new file mode 100644 index 0000000..f7f70a3 --- /dev/null +++ b/jcloud/jcloud/pagetype/plan_change/plan_change.py @@ -0,0 +1,74 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow import _ +from jingrow.model.document import Document + + +class PlanChange(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + document_name: DF.DynamicLink + document_type: DF.Link + from_plan: DF.Link | None + team: DF.Link | None + timestamp: DF.Datetime | None + to_plan: DF.Link + type: DF.Literal["", "Initial Plan", "Upgrade", "Downgrade"] + # end: auto-generated types + + def validate(self): + self.team = jingrow.db.get_value(self.document_type, self.document_name, "team") + if self.from_plan and not self.type: + from_plan_value = jingrow.db.get_value("Server Plan", self.from_plan, "price_usd") + to_plan_value = jingrow.db.get_value("Server Plan", self.to_plan, "price_usd") + self.type = "Downgrade" if from_plan_value > to_plan_value else "Upgrade" + + if self.type == "Initial Plan": + self.from_plan = "" + + def after_insert(self): + if self.type == "Initial Plan": + self.create_subscription() + return + + self.change_subscription_plan() + + def create_subscription(self): + jingrow.get_pg( + pagetype="Subscription", + team=self.team, + plan_type="Server Plan", + plan=self.to_plan, + document_type=self.document_type, + document_name=self.document_name, + ).insert() + + def change_subscription_plan(self): + document = jingrow.get_pg(self.document_type, self.document_name) + subscription = document.subscription + if not subscription: + jingrow.throw(f"No subscription for {self.document_type} {self.document_name}") + + if self.from_plan and self.from_plan != subscription.plan: + jingrow.throw( + _("{0} {1} is currently on {2} plan and not {3}").format( + self.document_type, self.document_name, subscription.plan, self.from_plan + ) + ) + + subscription.plan = self.to_plan + subscription.flags.updater_reference = { + "pagetype": self.pagetype, + "docname": self.name, + "label": _("via Plan Change"), + } + subscription.enabled = 1 + subscription.save() diff --git a/jcloud/jcloud/pagetype/plan_change/test_plan_change.py b/jcloud/jcloud/pagetype/plan_change/test_plan_change.py new file mode 100644 index 0000000..1546aa8 --- /dev/null +++ b/jcloud/jcloud/pagetype/plan_change/test_plan_change.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestPlanChange(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/plan_feature/__init__.py b/jcloud/jcloud/pagetype/plan_feature/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/plan_feature/plan_feature.json b/jcloud/jcloud/pagetype/plan_feature/plan_feature.json new file mode 100644 index 0000000..2ef15ae --- /dev/null +++ b/jcloud/jcloud/pagetype/plan_feature/plan_feature.json @@ -0,0 +1,31 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2021-12-22 18:49:12.677271", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "description" + ], + "fields": [ + { + "fieldname": "description", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Description", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2021-12-22 18:49:19.991998", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Plan Feature", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/plan_feature/plan_feature.py b/jcloud/jcloud/pagetype/plan_feature/plan_feature.py new file mode 100644 index 0000000..7a8b242 --- /dev/null +++ b/jcloud/jcloud/pagetype/plan_feature/plan_feature.py @@ -0,0 +1,23 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class PlanFeature(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + description: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/prometheus_alert_rule/__init__.py b/jcloud/jcloud/pagetype/prometheus_alert_rule/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/prometheus_alert_rule/prometheus_alert_rule.js b/jcloud/jcloud/pagetype/prometheus_alert_rule/prometheus_alert_rule.js new file mode 100644 index 0000000..b6abe4c --- /dev/null +++ b/jcloud/jcloud/pagetype/prometheus_alert_rule/prometheus_alert_rule.js @@ -0,0 +1,18 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Prometheus Alert Rule', { + onload_post_render: function (frm) { + frm.trigger('resize_code_fields'); + }, + resize_code_fields: function (frm) { + setTimeout(() => { + frm.fields.map((field) => { + if (field.ace_editor_target) { + field.ace_editor_target.css('height', 100); + field.editor.resize(); + } + }); + }, 1000); + }, +}); diff --git a/jcloud/jcloud/pagetype/prometheus_alert_rule/prometheus_alert_rule.json b/jcloud/jcloud/pagetype/prometheus_alert_rule/prometheus_alert_rule.json new file mode 100644 index 0000000..00d1a7e --- /dev/null +++ b/jcloud/jcloud/pagetype/prometheus_alert_rule/prometheus_alert_rule.json @@ -0,0 +1,223 @@ +{ + "actions": [], + "autoname": "Prompt", + "creation": "2021-05-26 19:54:21.999890", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "severity", + "for", + "enabled", + "column_break_2", + "description", + "section_break_4", + "expression", + "column_break_6", + "labels", + "annotations", + "routing_section", + "group_wait", + "group_interval", + "repeat_interval", + "column_break_19", + "group_by", + "preview_section", + "alert_preview", + "column_break_21", + "route_preview", + "reaction_tab", + "jcloud_job_type", + "ignore_on_clusters", + "column_break_oetk", + "only_on_shared", + "silent" + ], + "fields": [ + { + "fieldname": "for", + "fieldtype": "Data", + "in_list_view": 1, + "label": "For", + "reqd": 1 + }, + { + "fieldname": "expression", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Expression", + "reqd": 1 + }, + { + "default": "{}", + "fieldname": "annotations", + "fieldtype": "Code", + "label": "Annotations", + "reqd": 1 + }, + { + "default": "Critical", + "fieldname": "severity", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Severity", + "options": "Critical\nWarning\nInformation", + "reqd": 1 + }, + { + "fieldname": "description", + "fieldtype": "Data", + "label": "Description", + "reqd": 1 + }, + { + "fieldname": "section_break_4", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_6", + "fieldtype": "Column Break" + }, + { + "default": "{}", + "fieldname": "labels", + "fieldtype": "Code", + "label": "Labels", + "reqd": 1 + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "routing_section", + "fieldtype": "Section Break", + "label": "Routing" + }, + { + "default": "1m", + "fieldname": "group_wait", + "fieldtype": "Data", + "label": "Group Wait", + "reqd": 1 + }, + { + "default": "5m", + "fieldname": "group_interval", + "fieldtype": "Data", + "label": "Group Interval", + "reqd": 1 + }, + { + "default": "1h", + "fieldname": "repeat_interval", + "fieldtype": "Data", + "label": "Repeat Interval", + "reqd": 1 + }, + { + "fieldname": "column_break_19", + "fieldtype": "Column Break" + }, + { + "default": "[\"alertname\"]", + "fieldname": "group_by", + "fieldtype": "Code", + "label": "Group By", + "reqd": 1 + }, + { + "fieldname": "preview_section", + "fieldtype": "Section Break", + "label": "Preview" + }, + { + "fieldname": "alert_preview", + "fieldtype": "Code", + "label": "Alert Preview", + "read_only": 1 + }, + { + "fieldname": "column_break_21", + "fieldtype": "Column Break" + }, + { + "fieldname": "route_preview", + "fieldtype": "Code", + "label": "Route Preview", + "read_only": 1 + }, + { + "default": "1", + "fieldname": "enabled", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Enabled" + }, + { + "fieldname": "reaction_tab", + "fieldtype": "Tab Break", + "label": "Reaction" + }, + { + "fieldname": "jcloud_job_type", + "fieldtype": "Link", + "label": "Jcloud Job Type", + "options": "Jcloud Job Type" + }, + { + "fieldname": "column_break_oetk", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "only_on_shared", + "fieldtype": "Check", + "label": "Only on Shared" + }, + { + "fieldname": "ignore_on_clusters", + "fieldtype": "Table MultiSelect", + "label": "Ignore on Clusters", + "options": "Prometheus Alert Rule Cluster" + }, + { + "default": "0", + "description": "Don't forward to telegram (Only for reactions)", + "fieldname": "silent", + "fieldtype": "Check", + "label": "Silent" + } + ], + "index_web_pages_for_search": 1, + "links": [ + { + "link_pagetype": "Alertmanager Webhook Log", + "link_fieldname": "alert" + } + ], + "modified": "2024-05-27 12:08:23.598999", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Prometheus Alert Rule", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/prometheus_alert_rule/prometheus_alert_rule.py b/jcloud/jcloud/pagetype/prometheus_alert_rule/prometheus_alert_rule.py new file mode 100644 index 0000000..519e258 --- /dev/null +++ b/jcloud/jcloud/pagetype/prometheus_alert_rule/prometheus_alert_rule.py @@ -0,0 +1,157 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json +from typing import TYPE_CHECKING + +import jingrow +import yaml +from jingrow.core.utils import find +from jingrow.model.document import Document + +from jcloud.agent import Agent + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.server.server import Server + + +class PrometheusAlertRule(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.prometheus_alert_rule_cluster.prometheus_alert_rule_cluster import ( + PrometheusAlertRuleCluster, + ) + + alert_preview: DF.Code | None + annotations: DF.Code + description: DF.Data + enabled: DF.Check + expression: DF.Code + group_by: DF.Code + group_interval: DF.Data + group_wait: DF.Data + ignore_on_clusters: DF.TableMultiSelect[PrometheusAlertRuleCluster] + labels: DF.Code + only_on_shared: DF.Check + jcloud_job_type: DF.Link | None + repeat_interval: DF.Data + route_preview: DF.Code | None + severity: DF.Literal["Critical", "Warning", "Information"] + silent: DF.Check + # end: auto-generated types + + def validate(self): + self.alert_preview = yaml.dump(self.get_rule()) + self.route_preview = yaml.dump(self.get_route()) + + def get_rule(self): + labels = json.loads(self.labels) + labels.update({"severity": self.severity.lower()}) + + annotations = json.loads(self.annotations) + annotations.update({"description": self.description}) + + return { + "alert": self.name, + "expr": self.expression, + "for": self.get("for"), + "labels": labels, + "annotations": annotations, + } + + def get_route(self): + return { + "group_by": json.loads(self.group_by), + "group_wait": self.group_wait, + "group_interval": self.group_interval, + "repeat_interval": self.repeat_interval, + "matchers": [f'alertname="{self.name}"'], + } + + def on_update(self): + rules = yaml.dump(self.get_rules()) + routes = yaml.dump(self.get_routes()) + + monitoring_server = jingrow.db.get_single_value("Jcloud Settings", "monitor_server") + agent = Agent(monitoring_server, "Monitor Server") + agent.update_monitor_rules(rules, routes) + + def get_rules(self): + rules_dict = {"groups": [{"name": "All", "rules": []}]} + + rules = jingrow.get_all(self.pagetype, {"enabled": True}) + for rule in rules: + rule_pg = jingrow.get_pg(self.pagetype, rule.name) + rules_dict["groups"][0]["rules"].append(rule_pg.get_rule()) + + return rules_dict + + def get_routes(self): + routes_dict = { + "route": {"receiver": "web.hook", "routes": []}, + "receivers": [ + { + "name": "web.hook", + "webhook_configs": [ + {"url": jingrow.utils.get_url("api/method/jcloud.api.monitoring.alert")} + ], + } + ], + } + + rules = jingrow.get_all(self.pagetype, {"enabled": True}) + for rule in rules: + rule_pg = jingrow.get_pg(self.pagetype, rule.name) + routes_dict["route"]["routes"].append(rule_pg.get_route()) + + return routes_dict + + def react(self, instance_type: str, instance: str, labels: dict | None = None): + return self.run_jcloud_job(self.jcloud_job_type, instance_type, instance, labels) + + def run_jcloud_job( + self, job_name: str, server_type: str, server_name: str, labels: dict | None = None, arguments=None + ): + server: "Server" = jingrow.get_pg(server_type, server_name) + if self.only_on_shared and not server.public: + return None + if find(self.ignore_on_clusters, lambda x: x.cluster == server.cluster): + return None + + if arguments is None: + arguments = {} + + if not labels: + labels = {} + + arguments.update({"labels": labels}) + + if existing_jobs := jingrow.get_all( + "Jcloud Job", + { + "status": ("in", ["Pending", "Running"]), + "server_type": server_type, + "server": server_name, + }, + pluck="name", + ): + return jingrow.get_pg("Jcloud Job", existing_jobs[0]) + + return jingrow.get_pg( + { + "pagetype": "Jcloud Job", + "job_type": job_name, + "server_type": server_type, + "server": server_name, + "virtual_machine": server.virtual_machine, + "arguments": json.dumps(arguments, indent=2, sort_keys=True), + } + ).insert() diff --git a/jcloud/jcloud/pagetype/prometheus_alert_rule/test_prometheus_alert_rule.py b/jcloud/jcloud/pagetype/prometheus_alert_rule/test_prometheus_alert_rule.py new file mode 100644 index 0000000..7df6f91 --- /dev/null +++ b/jcloud/jcloud/pagetype/prometheus_alert_rule/test_prometheus_alert_rule.py @@ -0,0 +1,32 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +import unittest +from unittest.mock import Mock, patch + +import jingrow + +from jcloud.agent import Agent + + +@patch.object(Agent, "update_monitor_rules", new=Mock()) +def create_test_prometheus_alert_rule(name="Sites Down"): + return jingrow.get_pg( + { + "pagetype": "Prometheus Alert Rule", + "name": name, + "description": "Sites didn't respond with http 200", + "severity": "Critical", + "group_wait": "1m", + "group_interval": "1m", + "repeat_interval": "1h", + "group_by": '["alertname", "cluster", "server", "instance"]', + "expression": 'probe_success{job="site"} == 0 and probe_http_status_code != 429', + "for": "4m", + "enable_reactions": True, + }, + ).insert(ignore_if_duplicate=True) + + +class TestPrometheusAlertRule(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/prometheus_alert_rule_cluster/__init__.py b/jcloud/jcloud/pagetype/prometheus_alert_rule_cluster/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/prometheus_alert_rule_cluster/prometheus_alert_rule_cluster.json b/jcloud/jcloud/pagetype/prometheus_alert_rule_cluster/prometheus_alert_rule_cluster.json new file mode 100644 index 0000000..4033826 --- /dev/null +++ b/jcloud/jcloud/pagetype/prometheus_alert_rule_cluster/prometheus_alert_rule_cluster.json @@ -0,0 +1,33 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-05-24 16:13:21.415426", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "cluster" + ], + "fields": [ + { + "fieldname": "cluster", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Cluster", + "options": "Cluster", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-05-24 16:31:37.894227", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Prometheus Alert Rule Cluster", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/prometheus_alert_rule_cluster/prometheus_alert_rule_cluster.py b/jcloud/jcloud/pagetype/prometheus_alert_rule_cluster/prometheus_alert_rule_cluster.py new file mode 100644 index 0000000..3fcc35a --- /dev/null +++ b/jcloud/jcloud/pagetype/prometheus_alert_rule_cluster/prometheus_alert_rule_cluster.py @@ -0,0 +1,23 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class PrometheusAlertRuleCluster(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + cluster: DF.Link + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/proxy_server/__init__.py b/jcloud/jcloud/pagetype/proxy_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/proxy_server/patches/generate_proxysql_monitor_password.py b/jcloud/jcloud/pagetype/proxy_server/patches/generate_proxysql_monitor_password.py new file mode 100644 index 0000000..cb65abf --- /dev/null +++ b/jcloud/jcloud/pagetype/proxy_server/patches/generate_proxysql_monitor_password.py @@ -0,0 +1,12 @@ +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "proxy_server") + + for server in jingrow.get_all( + "Proxy Server", filters={"status": "Active"}, pluck="name" + ): + server = jingrow.get_pg("Proxy Server", server) + server.proxysql_monitor_password = jingrow.generate_hash() + server.save() diff --git a/jcloud/jcloud/pagetype/proxy_server/proxy_server.js b/jcloud/jcloud/pagetype/proxy_server/proxy_server.js new file mode 100644 index 0000000..456e883 --- /dev/null +++ b/jcloud/jcloud/pagetype/proxy_server/proxy_server.js @@ -0,0 +1,120 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Proxy Server', { + refresh: function (frm) { + [ + [__('Ping Agent'), 'ping_agent', false, frm.pg.is_server_setup], + [__('Ping Ansible'), 'ping_ansible', true], + [__('Ping Ansible Unprepared'), 'ping_ansible_unprepared', true], + [__('Update Agent'), 'update_agent', true, frm.pg.is_server_setup], + [ + __('Update Agent Ansible'), + 'update_agent_ansible', + true, + frm.pg.is_server_setup, + ], + [__('Prepare Server'), 'prepare_server', true, !frm.pg.is_server_setup], + [__('Setup Server'), 'setup_server', true, !frm.pg.is_server_setup], + [ + __('Setup SSH Proxy'), + 'setup_ssh_proxy', + true, + frm.pg.ssh_certificate_authority && !frm.pg.is_ssh_proxy_setup, + ], + [ + __('Setup ProxySQL'), + 'setup_proxysql', + true, + !frm.pg.is_proxysql_setup, + ], + [ + __('Setup ProxySQL Monitor'), + 'setup_proxysql_monitor', + true, + frm.pg.is_proxysql_setup, + ], + [ + __('Setup Wildcard Hosts'), + 'setup_wildcard_hosts', + true, + frm.pg.is_server_setup, + ], + [ + __('Show Agent Password'), + 'show_agent_password', + false, + frm.pg.is_server_setup, + ], + [ + __('Fetch Keys'), + 'fetch_keys', + false, + frm.pg.is_server_setup && + (!frm.pg.jingrow_public_key || !frm.pg.root_public_key), + ], + [__('Update TLS Certificate'), 'update_tls_certificate', true], + [__('Create Image'), 'create_image', true, frm.pg.status == 'Active'], + [ + __('Setup Replication'), + 'setup_replication', + true, + frm.pg.is_server_setup && + !frm.pg.is_primary && + !frm.pg.is_replication_setup, + ], + [ + __('Trigger Failover'), + 'trigger_failover', + true, + frm.pg.is_server_setup && + !frm.pg.is_primary && + frm.pg.is_replication_setup, + ], + [__('Archive'), 'archive', true, frm.pg.status !== 'Archived'], + [__('Setup Fail2ban'), 'setup_fail2ban', true, frm.pg.is_server_setup], + [__('Setup Wireguard'), 'setup_wireguard', true], + [__('Reload Wireguard'), 'reload_wireguard', true], + [ + __('Reboot with serial console'), + 'reboot_with_serial_console', + true, + frm.pg.virtual_machine, + ], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + }, + + hostname: function (frm) { + jcloud.set_hostname_abbreviation(frm); + }, +}); diff --git a/jcloud/jcloud/pagetype/proxy_server/proxy_server.json b/jcloud/jcloud/pagetype/proxy_server/proxy_server.json new file mode 100644 index 0000000..2d8fa3d --- /dev/null +++ b/jcloud/jcloud/pagetype/proxy_server/proxy_server.json @@ -0,0 +1,450 @@ +{ + "actions": [], + "creation": "2022-01-28 20:07:40.294840", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "hostname", + "hostname_abbreviation", + "domain", + "self_hosted_server_domain", + "column_break_3", + "cluster", + "provider", + "virtual_machine", + "is_server_setup", + "is_self_hosted", + "team", + "public", + "storage_section", + "auto_add_storage_min", + "auto_add_storage_max", + "section_break_8", + "ip", + "enabled_default_routing", + "column_break_10", + "private_ip", + "private_mac_address", + "private_vlan_id", + "agent_section", + "agent_password", + "column_break_mznm", + "disable_agent_job_auto_retry", + "replica_section", + "is_primary", + "primary", + "is_replication_setup", + "ssh_section", + "ssh_user", + "ssh_port", + "jingrow_user_password", + "jingrow_public_key", + "column_break_18", + "root_public_key", + "section_break_21", + "domains", + "ssh_proxy_section", + "ssh_certificate_authority", + "column_break_26", + "is_ssh_proxy_setup", + "proxysql_section", + "proxysql_admin_password", + "proxysql_monitor_password", + "is_proxysql_setup", + "vpn_tab", + "wireguard_network", + "wireguard_network_ip", + "wireguard_port", + "is_wireguard_setup", + "column_break_dapz", + "wireguard_private_key", + "wireguard_public_key", + "private_ip_interface_id", + "wireguard_interface_id" + ], + "fields": [ + { + "fetch_from": "virtual_machine.public_ip_address", + "fieldname": "ip", + "fieldtype": "Data", + "in_list_view": 1, + "label": "IP", + "set_only_once": 1 + }, + { + "fetch_from": "virtual_machine.private_ip_address", + "fieldname": "private_ip", + "fieldtype": "Data", + "label": "Private IP", + "set_only_once": 1 + }, + { + "fieldname": "agent_password", + "fieldtype": "Password", + "label": "Agent Password", + "set_only_once": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nInstalling\nActive\nBroken\nArchived", + "read_only": 1, + "reqd": 1 + }, + { + "default": "0", + "fieldname": "is_server_setup", + "fieldtype": "Check", + "label": "Server Setup", + "read_only": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "agent_section", + "fieldtype": "Section Break", + "label": "Agent" + }, + { + "fieldname": "ssh_section", + "fieldtype": "Section Break", + "label": "SSH" + }, + { + "fieldname": "root_public_key", + "fieldtype": "Code", + "label": "Root Public Key", + "read_only": 1 + }, + { + "fieldname": "jingrow_public_key", + "fieldtype": "Code", + "label": "Jingrow Public Key", + "read_only": 1 + }, + { + "fieldname": "column_break_10", + "fieldtype": "Column Break" + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Cluster", + "options": "Cluster", + "set_only_once": 1 + }, + { + "fieldname": "hostname", + "fieldtype": "Data", + "label": "Hostname", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "label": "Domain", + "options": "Root Domain", + "read_only": 1, + "set_only_once": 1 + }, + { + "default": "Generic", + "fieldname": "provider", + "fieldtype": "Select", + "label": "Provider", + "options": "Generic\nScaleway\nAWS EC2\nOCI", + "set_only_once": 1 + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break", + "label": "Networking" + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_mac_address", + "fieldtype": "Data", + "label": "Private Mac Address", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_vlan_id", + "fieldtype": "Data", + "label": "Private VLAN ID", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "fieldname": "column_break_18", + "fieldtype": "Column Break" + }, + { + "fieldname": "jingrow_user_password", + "fieldtype": "Password", + "label": "Jingrow User Password", + "set_only_once": 1 + }, + { + "fieldname": "section_break_21", + "fieldtype": "Section Break" + }, + { + "fieldname": "domains", + "fieldtype": "Table", + "label": "Domains", + "options": "Proxy Server Domain" + }, + { + "depends_on": "eval: pg.provider === \"AWS EC2\"", + "fieldname": "virtual_machine", + "fieldtype": "Link", + "label": "Virtual Machine", + "mandatory_depends_on": "eval: pg.provider === \"AWS EC2\"", + "options": "Virtual Machine" + }, + { + "fieldname": "ssh_proxy_section", + "fieldtype": "Section Break", + "label": "SSH Proxy" + }, + { + "fieldname": "ssh_certificate_authority", + "fieldtype": "Link", + "label": "SSH Certificate Authority", + "options": "SSH Certificate Authority" + }, + { + "fieldname": "column_break_26", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "is_ssh_proxy_setup", + "fieldtype": "Check", + "label": "Is SSH Proxy Setup", + "read_only": 1 + }, + { + "fieldname": "proxysql_section", + "fieldtype": "Section Break", + "label": "ProxySQL" + }, + { + "fieldname": "proxysql_admin_password", + "fieldtype": "Password", + "label": "ProxySQL Admin Password", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "is_proxysql_setup", + "fieldtype": "Check", + "label": "Is ProxySQL Setup", + "read_only": 1 + }, + { + "fieldname": "proxysql_monitor_password", + "fieldtype": "Password", + "label": "ProxySQL Monitor Password", + "read_only": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "fieldname": "replica_section", + "fieldtype": "Section Break", + "label": "Replica" + }, + { + "default": "0", + "fieldname": "is_primary", + "fieldtype": "Check", + "label": "Is Primary" + }, + { + "depends_on": "eval: pg.is_primary != 1", + "fieldname": "primary", + "fieldtype": "Link", + "label": "Primary", + "mandatory_depends_on": "eval: pg.is_primary != 1", + "options": "Proxy Server" + }, + { + "default": "0", + "depends_on": "eval: pg.is_primary != 1", + "fieldname": "is_replication_setup", + "fieldtype": "Check", + "label": "Is Replication Setup" + }, + { + "default": "0", + "fieldname": "is_self_hosted", + "fieldtype": "Check", + "label": "Is Self Hosted" + }, + { + "default": "root", + "depends_on": "eval:pg.is_self_hosted", + "fetch_if_empty": 1, + "fieldname": "ssh_user", + "fieldtype": "Data", + "label": "SSH User" + }, + { + "default": "22", + "fieldname": "ssh_port", + "fieldtype": "Int", + "label": "SSH Port" + }, + { + "default": "self.jingrow.dev", + "depends_on": "eval:pg.is_self_hosted", + "fieldname": "self_hosted_server_domain", + "fieldtype": "Data", + "label": "Self Hosted Server Domain" + }, + { + "fieldname": "vpn_tab", + "fieldtype": "Tab Break", + "label": "VPN" + }, + { + "default": "51820", + "fieldname": "wireguard_port", + "fieldtype": "Int", + "label": "Wireguard Port" + }, + { + "fieldname": "wireguard_private_key", + "fieldtype": "Password", + "label": "Wireguard Private Key" + }, + { + "fieldname": "wireguard_public_key", + "fieldtype": "Password", + "label": "Wireguard Public Key" + }, + { + "default": "wg0", + "fieldname": "wireguard_interface_id", + "fieldtype": "Data", + "label": "Wireguard Interface ID" + }, + { + "default": "0", + "fieldname": "is_wireguard_setup", + "fieldtype": "Check", + "label": "Is Wireguard Setup" + }, + { + "fieldname": "private_ip_interface_id", + "fieldtype": "Data", + "label": "Private IP Interface ID" + }, + { + "fieldname": "wireguard_network", + "fieldtype": "Data", + "label": "Wireguard Network" + }, + { + "fieldname": "wireguard_network_ip", + "fieldtype": "Data", + "label": "Wireguard Network IP" + }, + { + "fieldname": "column_break_dapz", + "fieldtype": "Column Break" + }, + { + "fieldname": "hostname_abbreviation", + "fieldtype": "Data", + "label": "Hostname Abbreviation" + }, + { + "default": "0", + "description": "Wildcard routing eg. *.root-domain", + "fieldname": "enabled_default_routing", + "fieldtype": "Check", + "label": "Enabled Default Routing", + "read_only": 1 + }, + { + "fieldname": "column_break_mznm", + "fieldtype": "Column Break" + }, + { + "default": "1", + "fieldname": "disable_agent_job_auto_retry", + "fieldtype": "Check", + "label": "Disable Agent Job Auto Retry" + }, + { + "default": "0", + "fieldname": "public", + "fieldtype": "Check", + "label": "Public" + }, + { + "fieldname": "storage_section", + "fieldtype": "Section Break", + "label": "Storage" + }, + { + "default": "10", + "description": "Minimum storage to add automatically each time", + "fieldname": "auto_add_storage_min", + "fieldtype": "Int", + "label": "Auto Add Storage Min", + "non_negative": 1 + }, + { + "default": "50", + "description": "Maximum storage to add automatically each time", + "fieldname": "auto_add_storage_max", + "fieldtype": "Int", + "label": "Auto Add Storage Max", + "non_negative": 1 + } + ], + "links": [], + "modified": "2024-12-26 13:20:05.120010", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Proxy Server", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/proxy_server/proxy_server.py b/jcloud/jcloud/pagetype/proxy_server/proxy_server.py new file mode 100644 index 0000000..c60dc03 --- /dev/null +++ b/jcloud/jcloud/pagetype/proxy_server/proxy_server.py @@ -0,0 +1,551 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +from typing import TYPE_CHECKING + +import jingrow +from jingrow.utils import unique + +from jcloud.agent import Agent +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.runner import Ansible +from jcloud.utils import log_error + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.bench.bench import Bench + from jcloud.jcloud.pagetype.root_domain.root_domain import RootDomain + + +class ProxyServer(BaseServer): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.proxy_server_domain.proxy_server_domain import ProxyServerDomain + + agent_password: DF.Password | None + auto_add_storage_max: DF.Int + auto_add_storage_min: DF.Int + cluster: DF.Link | None + disable_agent_job_auto_retry: DF.Check + domain: DF.Link | None + domains: DF.Table[ProxyServerDomain] + enabled_default_routing: DF.Check + jingrow_public_key: DF.Code | None + jingrow_user_password: DF.Password | None + hostname: DF.Data + hostname_abbreviation: DF.Data | None + ip: DF.Data | None + is_primary: DF.Check + is_proxysql_setup: DF.Check + is_replication_setup: DF.Check + is_self_hosted: DF.Check + is_server_setup: DF.Check + is_ssh_proxy_setup: DF.Check + is_wireguard_setup: DF.Check + primary: DF.Link | None + private_ip: DF.Data | None + private_ip_interface_id: DF.Data | None + private_mac_address: DF.Data | None + private_vlan_id: DF.Data | None + provider: DF.Literal["Generic", "Scaleway", "AWS EC2", "OCI"] + proxysql_admin_password: DF.Password | None + proxysql_monitor_password: DF.Password | None + public: DF.Check + root_public_key: DF.Code | None + self_hosted_server_domain: DF.Data | None + ssh_certificate_authority: DF.Link | None + ssh_port: DF.Int + ssh_user: DF.Data | None + status: DF.Literal["Pending", "Installing", "Active", "Broken", "Archived"] + team: DF.Link | None + virtual_machine: DF.Link | None + wireguard_interface_id: DF.Data | None + wireguard_network: DF.Data | None + wireguard_network_ip: DF.Data | None + wireguard_port: DF.Int + wireguard_private_key: DF.Password | None + wireguard_public_key: DF.Password | None + # end: auto-generated types + + def validate(self): + super().validate() + self.validate_domains() + self.validate_proxysql_admin_password() + + def validate_domains(self): + domains = [row.domain for row in self.domains] + code_servers = [row.code_server for row in self.domains] + # Always include self.domain in the domains child table + # Remove duplicates + domains = unique([self.domain, *domains]) + self.domains = [] + for i, domain in enumerate(domains): + if not jingrow.db.exists( + "TLS Certificate", {"wildcard": True, "status": "Active", "domain": domain} + ): + jingrow.throw(f"Valid wildcard TLS Certificate not found for {domain}") + if code_servers: + self.append("domains", {"domain": domain, "code_server": code_servers[i]}) + + def validate_proxysql_admin_password(self): + if not self.proxysql_admin_password: + self.proxysql_admin_password = jingrow.generate_hash(length=32) + + def get_wildcard_domains(self): + wildcard_domains = [] + for domain in self.domains: + if domain.domain == self.domain: + # self.domain certs are symlinks + continue + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": domain.domain}, "name" + ) + certificate = jingrow.get_pg("TLS Certificate", certificate_name) + wildcard_domains.append( + { + "domain": domain.domain, + "certificate": { + "privkey.pem": certificate.private_key, + "fullchain.pem": certificate.full_chain, + "chain.pem": certificate.intermediate_chain, + }, + "code_server": domain.code_server, + } + ) + return wildcard_domains + + @jingrow.whitelist() + def setup_wildcard_hosts(self): + agent = Agent(self.name, server_type="Proxy Server") + wildcards = self.get_wildcard_domains() + agent.setup_wildcard_hosts(wildcards) + + def _setup_server(self): + agent_password = self.get_password("agent_password") + agent_repository_url = self.get_agent_repository_url() + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": self.domain}, "name" + ) + certificate = jingrow.get_pg("TLS Certificate", certificate_name) + monitoring_password = jingrow.get_pg("Cluster", self.cluster).get_password("monitoring_password") + + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if log_server: + kibana_password = jingrow.get_pg("Log Server", log_server).get_password("kibana_password") + else: + kibana_password = None + + try: + ansible = Ansible( + playbook="self_hosted_proxy.yml" if getattr(self, "is_self_hosted", False) else "proxy.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "server": self.name, + "workers": 1, + "domain": self.domain, + "agent_password": agent_password, + "agent_repository_url": agent_repository_url, + "monitoring_password": monitoring_password, + "log_server": log_server, + "kibana_password": kibana_password, + "certificate_private_key": certificate.private_key, + "certificate_full_chain": certificate.full_chain, + "certificate_intermediate_chain": certificate.intermediate_chain, + "jcloud_url": jingrow.utils.get_url(), + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_server_setup = True + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Proxy Server Setup Exception", server=self.as_dict()) + self.save() + + def _install_exporters(self): + monitoring_password = jingrow.get_pg("Cluster", self.cluster).get_password("monitoring_password") + try: + ansible = Ansible( + playbook="proxy_exporters.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "private_ip": self.private_ip, + "monitoring_password": monitoring_password, + }, + ) + ansible.run() + except Exception: + log_error("Exporters Install Exception", server=self.as_dict()) + + @jingrow.whitelist() + def setup_ssh_proxy(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_ssh_proxy", queue="long", timeout=1200) + + def _setup_ssh_proxy(self): + settings = jingrow.db.get_value( + "Jcloud Settings", + None, + ["docker_registry_url", "docker_registry_username", "docker_registry_password"], + as_dict=True, + ) + ca = jingrow.get_pg("SSH Certificate Authority", self.ssh_certificate_authority) + try: + ansible = Ansible( + playbook="ssh_proxy.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "registry_url": settings.docker_registry_url, + "registry_username": settings.docker_registry_username, + "registry_password": settings.docker_registry_password, + "docker_image": ca.docker_image, + }, + ) + play = ansible.run() + if play.status == "Success": + self.reload() + self.is_ssh_proxy_setup = True + self.save() + except Exception: + log_error("SSH Proxy Setup Exception", server=self.as_dict()) + + @jingrow.whitelist() + def setup_fail2ban(self): + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_fail2ban", queue="long", timeout=1200) + + def _setup_fail2ban(self): + try: + ansible = Ansible( + playbook="fail2ban.yml", + server=self, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Fail2ban Setup Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def setup_proxysql(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_proxysql", queue="long", timeout=1200) + + def _setup_proxysql(self): + try: + default_hostgroup = jingrow.get_all( + "Database Server", + "MIN(server_id)", + {"status": "Active", "cluster": self.cluster}, + as_list=True, + )[0][0] + ansible = Ansible( + playbook="proxysql.yml", + server=self, + variables={ + "server": self.name, + "proxysql_admin_password": self.get_password("proxysql_admin_password"), + "default_hostgroup": default_hostgroup, + }, + ) + play = ansible.run() + if play.status == "Success": + self.reload() + self.is_proxysql_setup = True + self.save() + except Exception: + log_error("ProxySQL Setup Exception", server=self.as_dict()) + + @jingrow.whitelist() + def setup_replication(self): + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_replication", queue="long", timeout=1200) + + def _setup_replication(self): + self._setup_secondary() + if self.status == "Active": + primary = jingrow.get_pg("Proxy Server", self.primary) + primary._setup_primary(self.name) + if primary.status == "Active": + self.is_replication_setup = True + self.save() + + def _setup_primary(self, secondary): + secondary_private_ip = jingrow.db.get_value("Proxy Server", secondary, "private_ip") + try: + ansible = Ansible( + playbook="primary_proxy.yml", + server=self, + variables={"secondary_private_ip": secondary_private_ip}, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Primary Proxy Server Setup Exception", server=self.as_dict()) + self.save() + + def _setup_secondary(self): + try: + ansible = Ansible( + playbook="secondary_proxy.yml", + server=self, + variables={"primary_public_key": self.get_primary_jingrow_public_key()}, + ) + play = ansible.run() + self.reload() + + if play.status == "Success": + self.status = "Active" + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Secondary Proxy Server Setup Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def trigger_failover(self): + if self.is_primary: + return + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_trigger_failover", queue="long", timeout=3600) + + def stop_primary(self): + primary = jingrow.get_pg("Proxy Server", self.primary) + try: + ansible = Ansible( + playbook="failover_prepare_primary_proxy.yml", + server=primary, + ) + ansible.run() + except Exception: + pass # may be unreachable + + def forward_jobs_to_secondary(self): + jingrow.db.set_value( + "Agent Job", + {"server": self.primary, "status": "Undelivered"}, + "server", + self.name, + ) + + def move_wildcard_domains_from_primary(self): + jingrow.db.set_value( + "Proxy Server Domain", + {"parent": self.primary}, + "parent", + self.name, + ) + + def remove_primarys_access(self): + ansible = Ansible( + playbook="failover_remove_primary_access.yml", + server=self, + variables={ + "primary_public_key": jingrow.db.get_value("Proxy Server", self.primary, "jingrow_public_key") + }, + ) + ansible.run() + + def up_secondary(self): + ansible = Ansible(playbook="failover_up_secondary_proxy.yml", server=self) + ansible.run() + + def update_dns_records_for_all_sites(self): + from itertools import groupby + + servers = jingrow.get_all("Server", {"proxy_server": self.primary}, pluck="name") + sites_domains = jingrow.get_all( + "Site", + {"status": ("!=", "Archived"), "server": ("in", servers)}, + ["name", "domain"], + order_by="domain", + ) + for domain_name, sites in groupby(sites_domains, lambda x: x["domain"]): + domain: RootDomain = jingrow.get_pg("Root Domain", domain_name) + domain.update_dns_records_for_sites([site.name for site in sites], self.name) + + def _trigger_failover(self): + try: + self.update_dns_records_for_all_sites() + self.stop_primary() + self.remove_primarys_access() + self.forward_jobs_to_secondary() + self.up_secondary() + self.update_app_servers() + self.move_wildcard_domains_from_primary() + self.switch_primary() + self.add_ssh_users_for_existing_benches() + except Exception: + self.status = "Broken" + log_error("Proxy Server Failover Exception", pg=self) + self.save() + + def add_ssh_users_for_existing_benches(self): + benches = jingrow.qb.PageType("Bench") + servers = jingrow.qb.PageType("Server") + active_benches = ( + jingrow.qb.from_(benches) + .join(servers) + .on(servers.name == benches.server) + .select(benches.name) + .where(servers.proxy_server == self.primary) + .where(benches.status == "Active") + .run(as_dict=True) + ) + for bench_name in active_benches: + bench: "Bench" = jingrow.get_pg("Bench", bench_name) + bench.add_ssh_user() + + def update_app_servers(self): + jingrow.db.set_value("Server", {"proxy_server": self.primary}, "proxy_server", self.name) + + def switch_primary(self): + jingrow.db.set_value("Proxy Server", self.primary, "is_primary", False) + self.is_primary = True + self.is_replication_setup = False + self.primary = None + self.status = "Active" + + @jingrow.whitelist() + def setup_proxysql_monitor(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_proxysql_monitor", queue="long", timeout=1200) + + def _setup_proxysql_monitor(self): + try: + default_hostgroup = jingrow.get_all( + "Database Server", + "MIN(server_id)", + {"status": "Active", "cluster": self.cluster}, + as_list=True, + )[0][0] + ansible = Ansible( + playbook="proxysql_monitor.yml", + server=self, + variables={ + "server": self.name, + "proxysql_admin_password": self.get_password("proxysql_admin_password"), + "default_hostgroup": default_hostgroup, + }, + ) + ansible.run() + except Exception: + log_error("ProxySQL Monitor Setup Exception", server=self.as_dict()) + + @jingrow.whitelist() + def setup_wireguard(self): + if not self.private_ip_interface_id: + play = jingrow.get_last_pg("Ansible Play", {"play": "Ping Server", "server": self.name}) + task = jingrow.get_pg("Ansible Task", {"play": play.name, "task": "Gather Facts"}) + import json + + task_res = json.loads(task.result)["ansible_facts"] + for i in task_res["interfaces"]: + if task_res[i]["ipv4"]["address"] == self.private_ip: + self.private_ip_interface_id = task_res[i]["device"] + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_wireguard", queue="long", timeout=1200) + + def _setup_wireguard(self): + try: + ansible = Ansible( + playbook="wireguard.yml", + server=self, + variables={ + "server": self.name, + "wireguard_port": self.wireguard_port, + "wireguard_network": self.wireguard_network_ip + + "/" + + self.wireguard_network.split("/")[1], + "interface_id": self.private_ip_interface_id, + "wireguard_private_key": False, + "wireguard_public_key": False, + "peers": "", + "reload_wireguard": bool(self.is_wireguard_setup), + }, + ) + play = ansible.run() + if play.status == "Success": + self.reload() + self.is_wireguard_setup = True + if not self.wireguard_private_key and not self.wireguard_public_key: + self.wireguard_private_key = jingrow.get_pg( + "Ansible Task", {"play": play.name, "task": "Generate Wireguard Private Key"} + ).output + self.wireguard_public_key = jingrow.get_pg( + "Ansible Task", {"play": play.name, "task": "Generate Wireguard Public Key"} + ).output + self.save() + except Exception: + log_error("Wireguard Setup Exception", server=self.as_dict()) + + @jingrow.whitelist() + def reload_wireguard(self): + jingrow.enqueue_pg("Proxy Server", self.name, "_reload_wireguard", queue="default", timeout=1200) + + def _reload_wireguard(self): + import json + + peers = jingrow.get_list( + "Wireguard Peer", + filters={"upstream_proxy": self.name, "status": "Active"}, + fields=["peer_name as name", "public_key", "ip as peer_ip", "allowed_ips"], + order_by="creation asc", + ) + try: + ansible = Ansible( + playbook="reload_wireguard.yml", + server=self, + variables={ + "server": self.name, + "wireguard_port": self.wireguard_port, + "wireguard_network": self.wireguard_network_ip + + "/" + + self.wireguard_network.split("/")[1], + "interface_id": self.private_ip_interface_id, + "wireguard_private_key": self.get_password("wireguard_private_key"), + "wireguard_public_key": self.get_password("wireguard_public_key"), + "peers": json.dumps(peers), + }, + ) + ansible.run() + except Exception: + log_error("Wireguard Setup Exception", server=self.as_dict()) + + +def process_update_nginx_job_update(job): + proxy_server = jingrow.get_pg("Proxy Server", job.server) + if job.status == "Success": + proxy_server.status = "Active" + elif job.status in ["Failure", "Undelivered", "Delivery Failure"]: + proxy_server.status = "Broken" + elif job.status in ["Pending", "Running"]: + proxy_server.status = "Installing" + proxy_server.save() diff --git a/jcloud/jcloud/pagetype/proxy_server/proxy_server_dashboard.py b/jcloud/jcloud/pagetype/proxy_server/proxy_server_dashboard.py new file mode 100644 index 0000000..908ec2c --- /dev/null +++ b/jcloud/jcloud/pagetype/proxy_server/proxy_server_dashboard.py @@ -0,0 +1,16 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow import _ + + +def get_data(): + return { + "fieldname": "server", + "non_standard_fieldnames": {"Server": "proxy_server"}, + "transactions": [ + {"label": _("Related Documents"), "items": ["Server"]}, + {"label": _("Logs"), "items": ["Agent Job", "Ansible Play"]}, + ], + } diff --git a/jcloud/jcloud/pagetype/proxy_server/test_proxy_server.py b/jcloud/jcloud/pagetype/proxy_server/test_proxy_server.py new file mode 100644 index 0000000..8520d19 --- /dev/null +++ b/jcloud/jcloud/pagetype/proxy_server/test_proxy_server.py @@ -0,0 +1,95 @@ +# Copyright (c) 2020, JINGROW +# See license.txt +from __future__ import annotations + +from unittest.mock import Mock, patch + +import jingrow +from jingrow.model.naming import make_autoname +from jingrow.tests.utils import JingrowTestCase +from moto import mock_aws + +from jcloud.jcloud.pagetype.agent_job.test_agent_job import fake_agent_job +from jcloud.jcloud.pagetype.jcloud_settings.test_jcloud_settings import ( + create_test_jcloud_settings, +) +from jcloud.jcloud.pagetype.proxy_server.proxy_server import ProxyServer +from jcloud.jcloud.pagetype.root_domain.root_domain import RootDomain +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.jcloud.pagetype.virtual_machine.test_virtual_machine import create_test_virtual_machine +from jcloud.utils.test import foreground_enqueue_pg + + +@patch.object(BaseServer, "after_insert", new=Mock()) +@patch.object(ProxyServer, "validate_domains", new=Mock()) +def create_test_proxy_server( + hostname: str = "n", + domain: str = "fc.dev", + domains: list[dict[str, str]] | None = None, + cluster: str = "Default", + is_primary: bool = True, +) -> ProxyServer: + """Create test Proxy Server pg""" + if domains is None: + domains = [{"domain": "fc.dev"}] + create_test_jcloud_settings() + server = jingrow.get_pg( + { + "pagetype": "Proxy Server", + "status": "Active", + "ip": jingrow.mock("ipv4"), + "private_ip": jingrow.mock("ipv4_private"), + "hostname": make_autoname(hostname + ".######"), + "cluster": cluster, + "domain": domain, + "domains": domains, + "is_primary": is_primary, + "virtual_machine": create_test_virtual_machine().name, + } + ).insert(ignore_if_duplicate=True) + server.reload() + return server + + +@patch( + "jcloud.jcloud.pagetype.proxy_server.proxy_server.jingrow.enqueue_pg", + foreground_enqueue_pg, +) +@patch("jcloud.jcloud.pagetype.proxy_server.proxy_server.Ansible", new=Mock()) +class TestProxyServer(JingrowTestCase): + @fake_agent_job("Reload NGINX Job") + @mock_aws + @patch.object( + RootDomain, + "update_dns_records_for_sites", + wraps=RootDomain.update_dns_records_for_sites, + autospec=True, + ) + def test_sites_dns_updated_on_failover(self, update_dns_records_for_sites): + from jcloud.jcloud.pagetype.server.test_server import create_test_server + from jcloud.jcloud.pagetype.site.test_site import create_test_site + + proxy1 = create_test_proxy_server() + proxy2 = create_test_proxy_server(is_primary=False) + + root_domain: RootDomain = jingrow.get_pg("Root Domain", proxy1.domain) + root_domain.boto3_client.create_hosted_zone( + Name=proxy1.domain, + CallerReference="1", + HostedZoneConfig={"Comment": "Test", "PrivateZone": False}, + ) + + server = create_test_server(proxy1.name) + site1 = create_test_site(server=server.name) + create_test_site() # another proxy; unrelated + + proxy2.db_set("primary", proxy1.name) + proxy2.db_set("is_replication_setup", 1) + proxy2.trigger_failover() + update_dns_records_for_sites.assert_called_once_with(root_domain, [site1.name], proxy2.name) + proxy2.reload() + proxy1.reload() + self.assertTrue(proxy2.is_primary) + self.assertFalse(proxy1.is_primary) + self.assertEqual(proxy2.status, "Active") + self.assertEqual(proxy1.status, "Active") diff --git a/jcloud/jcloud/pagetype/proxy_server_domain/__init__.py b/jcloud/jcloud/pagetype/proxy_server_domain/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/proxy_server_domain/proxy_server_domain.json b/jcloud/jcloud/pagetype/proxy_server_domain/proxy_server_domain.json new file mode 100644 index 0000000..719fe39 --- /dev/null +++ b/jcloud/jcloud/pagetype/proxy_server_domain/proxy_server_domain.json @@ -0,0 +1,41 @@ +{ + "actions": [], + "creation": "2021-03-24 10:21:00.192032", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "domain", + "code_server" + ], + "fields": [ + { + "fieldname": "domain", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Domain", + "options": "Root Domain", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "code_server", + "fieldtype": "Check", + "label": "Code Server" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-07-24 21:41:33.648896", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Proxy Server Domain", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/proxy_server_domain/proxy_server_domain.py b/jcloud/jcloud/pagetype/proxy_server_domain/proxy_server_domain.py new file mode 100644 index 0000000..53cbea6 --- /dev/null +++ b/jcloud/jcloud/pagetype/proxy_server_domain/proxy_server_domain.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class ProxyServerDomain(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + code_server: DF.Check + domain: DF.Link + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/razorpay_payment_record/__init__.py b/jcloud/jcloud/pagetype/razorpay_payment_record/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/razorpay_payment_record/razorpay_payment_record.js b/jcloud/jcloud/pagetype/razorpay_payment_record/razorpay_payment_record.js new file mode 100644 index 0000000..0c0cb00 --- /dev/null +++ b/jcloud/jcloud/pagetype/razorpay_payment_record/razorpay_payment_record.js @@ -0,0 +1,14 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Razorpay Payment Record', { + refresh: function (frm) { + if (frm.pg.status === 'Pending') { + frm.add_custom_button(__('Sync'), function () { + frm.call('sync').then(() => { + frm.refresh(); + }); + }); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/razorpay_payment_record/razorpay_payment_record.json b/jcloud/jcloud/pagetype/razorpay_payment_record/razorpay_payment_record.json new file mode 100644 index 0000000..d097387 --- /dev/null +++ b/jcloud/jcloud/pagetype/razorpay_payment_record/razorpay_payment_record.json @@ -0,0 +1,95 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-01-19 15:12:41.914117", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "payment_id", + "order_id", + "signature", + "type", + "status", + "failure_reason" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_standard_filter": 1, + "label": "Team", + "options": "Team" + }, + { + "fieldname": "payment_id", + "fieldtype": "Data", + "in_standard_filter": 1, + "label": "Payment ID", + "read_only": 1, + "unique": 1 + }, + { + "fieldname": "order_id", + "fieldtype": "Data", + "label": "Order ID", + "read_only": 1, + "unique": 1 + }, + { + "fieldname": "signature", + "fieldtype": "Data", + "label": "Signature", + "read_only": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Captured\nFailed\nPending", + "read_only": 1 + }, + { + "fieldname": "failure_reason", + "fieldtype": "Small Text", + "label": "Failure Reason", + "read_only": 1 + }, + { + "default": "Prepaid Credits", + "fieldname": "type", + "fieldtype": "Select", + "label": "Type", + "options": "Prepaid Credits\nPartnership Fee" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-01-05 22:06:31.980472", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Razorpay Payment Record", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "order_id" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/razorpay_payment_record/razorpay_payment_record.py b/jcloud/jcloud/pagetype/razorpay_payment_record/razorpay_payment_record.py new file mode 100644 index 0000000..46bcb50 --- /dev/null +++ b/jcloud/jcloud/pagetype/razorpay_payment_record/razorpay_payment_record.py @@ -0,0 +1,232 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +from datetime import datetime, timedelta + +import jingrow +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.team.team import _enqueue_finalize_unpaid_invoices_for_team +from jcloud.utils import log_error +from jcloud.utils.billing import get_razorpay_client + + +class RazorpayPaymentRecord(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + failure_reason: DF.SmallText | None + order_id: DF.Data | None + payment_id: DF.Data | None + signature: DF.Data | None + status: DF.Literal["Captured", "Failed", "Pending"] + team: DF.Link | None + type: DF.Literal["Prepaid Credits", "Partnership Fee"] + # end: auto-generated types + + def on_update(self): + if self.has_value_changed("status") and self.status == "Captured": + if self.type == "Prepaid Credits": + self.process_prepaid_credits() + elif self.type == "Partnership Fee": + self.process_partnership_fee() + + def process_prepaid_credits(self): + team = jingrow.get_pg("Team", self.team) + + client = get_razorpay_client() + payment = client.payment.fetch(self.payment_id) + amount_with_tax = payment["amount"] / 100 + gst = float(payment["notes"].get("gst", 0)) + amount = amount_with_tax - gst + balance_transaction = team.allocate_credit_amount( + amount, + source="Prepaid Credits", + remark=f"Razorpay: {self.payment_id}", + ) + team.reload() + + # Add a field to track razorpay event + invoice = jingrow.get_pg( + pagetype="Invoice", + team=team.name, + type="Prepaid Credits", + status="Paid", + due_date=datetime.fromtimestamp(payment["created_at"]), + total=amount, + amount_due=amount, + gst=gst or 0, + amount_due_with_tax=amount_with_tax, + amount_paid=amount_with_tax, + razorpay_order_id=self.order_id, + razorpay_payment_record=self.name, + razorpay_payment_method=payment["method"], + ) + invoice.append( + "items", + { + "description": "Prepaid Credits", + "document_type": "Balance Transaction", + "document_name": balance_transaction.name, + "quantity": 1, + "rate": amount, + }, + ) + invoice.insert() + invoice.reload() + + invoice.update_razorpay_transaction_details(payment) + invoice.submit() + + _enqueue_finalize_unpaid_invoices_for_team(team.name) + + def process_partnership_fee(self): + team = jingrow.get_pg("Team", self.team) + + client = get_razorpay_client() + payment = client.payment.fetch(self.payment_id) + amount_with_tax = payment["amount"] / 100 + gst = float(payment["notes"].get("gst", 0)) + amount = amount_with_tax - gst + balance_transaction = team.allocate_credit_amount( + amount, + source="Prepaid Credits", + remark=f"Razorpay: {self.payment_id}", + type="Partnership Fee", + ) + team.reload() + + # Add a field to track razorpay event + invoice = jingrow.get_pg( + pagetype="Invoice", + team=team.name, + type="Partnership Fees", + status="Paid", + due_date=datetime.fromtimestamp(payment["created_at"]), + total=amount, + amount_due=amount, + gst=gst or 0, + amount_due_with_tax=amount_with_tax, + amount_paid=amount_with_tax, + razorpay_order_id=self.order_id, + razorpay_payment_record=self.name, + razorpay_payment_method=payment["method"], + ) + invoice.append( + "items", + { + "description": "Partnership Fee", + "document_type": "Balance Transaction", + "document_name": balance_transaction.name, + "quantity": 1, + "rate": amount, + }, + ) + invoice.insert() + invoice.reload() + + invoice.update_razorpay_transaction_details(payment) + invoice.submit() + + @jingrow.whitelist() + def sync(self): + try: + client = get_razorpay_client() + response = client.order.payments(self.order_id) + + for item in response.get("items"): + if item["status"] == "captured": + jingrow.get_pg( + { + "pagetype": "Razorpay Webhook Log", + "payload": jingrow.as_json(item), + "event": "order.paid", + "payment_id": item["id"], + "name": item["order_id"], + } + ).insert(ignore_if_duplicate=True) + except Exception: + log_error(title="Failed to sync Razorpay Payment Record", order_id=self.order_id) + + +def fetch_pending_payment_orders(hours=12): + past_12hrs_ago = datetime.now() - timedelta(hours=hours) + pending_orders = jingrow.get_all( + "Razorpay Payment Record", + dict(status="Pending", creation=(">=", past_12hrs_ago)), + pluck="order_id", + ) + + client = get_razorpay_client() + if not pending_orders: + return + + for order_id in pending_orders: + try: + response = client.order.payments(order_id) + for item in response.get("items"): + if item["status"] == "captured": + jingrow.get_pg( + { + "pagetype": "Razorpay Webhook Log", + "payload": jingrow.as_json(item), + "event": "order.paid", + "payment_id": item["id"], + "name": item["order_id"], + } + ).insert(ignore_if_duplicate=True) + except Exception: + log_error(title="Failed to capture pending order", order_id=order_id) + + """ + Sample Response + ref: https://razorpay.com/docs/api/orders/#fetch-payments-for-an-order + + { + "entity": "collection", + "count": 1, + "items": [ + { + "id": "pay_JhOBNkFZFi0EOX", + "entity": "payment", + "amount": 100, + "currency": "CNY", + "status": "captured", + "order_id": "order_DaaS6LOUAASb7Y", + "invoice_id": null, + "international": false, + "method": "card", + "amount_refunded": 0, + "refund_status": null, + "captured": true, + "description": "", + "card_id": "card_Be7AhhLtm1gxzc", + "bank": null, + "wallet": null, + "vpa": null, + "email": "gaurav.kumar@example.com", + "contact": "+919900000000", + "customer_id": "cust_Be6N4O63pXzmqK", + "token_id": "token_BhNxzjrZvkqLWr", + "notes": [], + "fee": 0, + "tax": 0, + "error_code": null, + "error_description": null, + "error_source": null, + "error_step": null, + "error_reason": null, + "acquirer_data": { + "auth_code": null + }, + "created_at": 1655212834 + } + ] + } + """ diff --git a/jcloud/jcloud/pagetype/razorpay_payment_record/test_razorpay_payment_record.py b/jcloud/jcloud/pagetype/razorpay_payment_record/test_razorpay_payment_record.py new file mode 100644 index 0000000..90c058c --- /dev/null +++ b/jcloud/jcloud/pagetype/razorpay_payment_record/test_razorpay_payment_record.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestRazorpayPaymentRecord(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/razorpay_webhook_log/__init__.py b/jcloud/jcloud/pagetype/razorpay_webhook_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/razorpay_webhook_log/razorpay_webhook_log.js b/jcloud/jcloud/pagetype/razorpay_webhook_log/razorpay_webhook_log.js new file mode 100644 index 0000000..c84249f --- /dev/null +++ b/jcloud/jcloud/pagetype/razorpay_webhook_log/razorpay_webhook_log.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Razorpay Webhook Log', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/razorpay_webhook_log/razorpay_webhook_log.json b/jcloud/jcloud/pagetype/razorpay_webhook_log/razorpay_webhook_log.json new file mode 100644 index 0000000..9eaba3f --- /dev/null +++ b/jcloud/jcloud/pagetype/razorpay_webhook_log/razorpay_webhook_log.json @@ -0,0 +1,63 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "Prompt", + "creation": "2022-01-19 15:16:25.377525", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "event", + "payment_id", + "payload" + ], + "fields": [ + { + "fieldname": "event", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Event" + }, + { + "fieldname": "payment_id", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Payment ID" + }, + { + "fieldname": "payload", + "fieldtype": "Code", + "label": "Payload", + "options": "JSON", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-01-19 22:51:03.279573", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Razorpay Webhook Log", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "event" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/razorpay_webhook_log/razorpay_webhook_log.py b/jcloud/jcloud/pagetype/razorpay_webhook_log/razorpay_webhook_log.py new file mode 100644 index 0000000..11200b9 --- /dev/null +++ b/jcloud/jcloud/pagetype/razorpay_webhook_log/razorpay_webhook_log.py @@ -0,0 +1,153 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + +from jcloud.utils import log_error +from jcloud.utils.billing import get_razorpay_client + + +class RazorpayWebhookLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + event: DF.Data | None + payload: DF.Code | None + payment_id: DF.Data | None + # end: auto-generated types + + def after_insert(self): + payment_record = jingrow.get_pg("Razorpay Payment Record", {"order_id": self.name}) + + if self.event in ("order.paid", "payment.captured") and payment_record.status != "Captured": + payment_record.update({"payment_id": self.payment_id, "status": "Captured"}) + payment_record.save(ignore_permissions=True) + + +@jingrow.whitelist(allow_guest=True) +def razorpay_authorized_payment_handler(): + client = get_razorpay_client() + form_dict = jingrow.local.form_dict + + payment_id = None + try: + payload = jingrow.request.get_data() + signature = jingrow.get_request_header("X-Razorpay-Signature") + webhook_secret = jingrow.db.get_single_value("Jcloud Settings", "razorpay_webhook_secret") + entity_data = form_dict["payload"]["payment"]["entity"] + + client.utility.verify_webhook_signature(payload.decode(), signature, webhook_secret) + if entity_data["status"] != "authorized": + raise Exception("invalid payment status received") + payment_id = entity_data.get("id") + order_id = entity_data.get("order_id", "") + amount = entity_data.get("amount") + notes = entity_data.get("notes") + + if not order_id: + return + + razorpay_payment_record = jingrow.db.exists("Razorpay Payment Record", {"order_id": order_id}) + if not razorpay_payment_record: + # Don't log error if its not JingrowCloud order + # Example of valid notes + # "notes": { + # "Description": "Order for Jingrow Prepaid Credits", + # "Team (Jingrow ID)": "test@example.com" + # "gst": 245 + # }, + + if notes and notes.get("description"): + log_error( + "Razorpay payment record for given order does not exist", + order_id=order_id, + ) + return + + # Only capture payment, if the status of order id is pending + if jingrow.db.get_value("Razorpay Payment Record", razorpay_payment_record, "status") != "Pending": + return + + # Capture the authorized payment + client.payment.capture(payment_id, amount) + except Exception as e: + error_message = str(e) + if ( + "payment has already been captured" in error_message + or "the order is already paid" in error_message + or "id provided does not exist" in error_message + ): + return + log_error( + title="Razorpay Authorized Payment Webhook Handler", + payment_id=payment_id, + ) + raise Exception from e + + +@jingrow.whitelist(allow_guest=True) +def razorpay_webhook_handler(): + client = get_razorpay_client() + current_user = jingrow.session.user + form_dict = jingrow.local.form_dict + + try: + payload = jingrow.request.get_data() + signature = jingrow.get_request_header("X-Razorpay-Signature") + webhook_secret = jingrow.db.get_single_value("Jcloud Settings", "razorpay_webhook_secret") + + client.utility.verify_webhook_signature(payload.decode(), signature, webhook_secret) + + # set user to Administrator, to not have to do ignore_permissions everywhere + jingrow.set_user("Administrator") + + entity_data = form_dict["payload"]["payment"]["entity"] + razorpay_order_id = entity_data.get("order_id") + + if not razorpay_order_id: + return + + razorpay_payment_record = jingrow.db.exists("Razorpay Payment Record", {"order_id": razorpay_order_id}) + + notes = form_dict["payload"]["payment"]["entity"]["notes"] + if not razorpay_payment_record: + # Don't log error if its not JingrowCloud order + # Example of valid notes + # "notes": { + # "Description": "Order for Jingrow Prepaid Credits", + # "Team (Jingrow ID)": "test@example.com", + # "gst": 245 + # }, + + if notes and notes.get("description"): + log_error( + "Razorpay payment record for given order does not exist", + order_id=razorpay_order_id, + ) + return + + jingrow.get_pg( + { + "pagetype": "Razorpay Webhook Log", + "payload": jingrow.as_json(form_dict), + "event": form_dict.get("event"), + "payment_id": form_dict["payload"]["payment"]["entity"]["id"], + "name": razorpay_order_id, + } + ).insert(ignore_if_duplicate=True) + + except Exception as e: + jingrow.db.rollback() + log_error( + title="Razorpay Webhook Handler", + payment_id=form_dict["payload"]["payment"]["entity"]["id"], + ) + jingrow.set_user(current_user) + raise Exception from e diff --git a/jcloud/jcloud/pagetype/razorpay_webhook_log/test_razorpay_webhook_log.py b/jcloud/jcloud/pagetype/razorpay_webhook_log/test_razorpay_webhook_log.py new file mode 100644 index 0000000..4d90cc0 --- /dev/null +++ b/jcloud/jcloud/pagetype/razorpay_webhook_log/test_razorpay_webhook_log.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestRazorpayWebhookLog(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/region/__init__.py b/jcloud/jcloud/pagetype/region/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/region/region.js b/jcloud/jcloud/pagetype/region/region.js new file mode 100644 index 0000000..ed4ddc6 --- /dev/null +++ b/jcloud/jcloud/pagetype/region/region.js @@ -0,0 +1,6 @@ +// Copyright (c) 2018, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Region', { + refresh: function (frm) {}, +}); diff --git a/jcloud/jcloud/pagetype/region/region.json b/jcloud/jcloud/pagetype/region/region.json new file mode 100644 index 0000000..2e51d60 --- /dev/null +++ b/jcloud/jcloud/pagetype/region/region.json @@ -0,0 +1,54 @@ +{ + "actions": [], + "allow_import": 1, + "autoname": "field:region_name", + "creation": "2018-04-26 17:21:09.034956", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "region_name", + "last_allocated_to" + ], + "fields": [ + { + "fieldname": "region_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Region Name", + "reqd": 1, + "unique": 1 + }, + { + "fieldname": "last_allocated_to", + "fieldtype": "Link", + "label": "Last Allocated To", + "options": "JERP Consultant", + "read_only": 1 + } + ], + "links": [], + "modified": "2021-04-30 13:05:32.560491", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Region", + "name_case": "Title Case", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/region/region.py b/jcloud/jcloud/pagetype/region/region.py new file mode 100644 index 0000000..c291a1e --- /dev/null +++ b/jcloud/jcloud/pagetype/region/region.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018, JINGROW +# For license information, please see license.txt + + +from jingrow.model.document import Document + + +class Region(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + last_allocated_to: DF.Link | None + region_name: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/region/test_region.py b/jcloud/jcloud/pagetype/region/test_region.py new file mode 100644 index 0000000..4bb09a5 --- /dev/null +++ b/jcloud/jcloud/pagetype/region/test_region.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2018, JINGROW +# See license.txt + + +import unittest + + +class TestRegion(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/registry_server/__init__.py b/jcloud/jcloud/pagetype/registry_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/registry_server/registry_server.js b/jcloud/jcloud/pagetype/registry_server/registry_server.js new file mode 100644 index 0000000..868577b --- /dev/null +++ b/jcloud/jcloud/pagetype/registry_server/registry_server.js @@ -0,0 +1,56 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Registry Server', { + refresh: function (frm) { + [ + [__('Ping Ansible'), 'ping_ansible', true], + [__('Ping Ansible Unprepared'), 'ping_ansible_unprepared', true], + [__('Prepare Server'), 'prepare_server', true, !frm.pg.is_server_setup], + [__('Setup Server'), 'setup_server', true, !frm.pg.is_server_setup], + [ + __('Update TLS Certificate'), + 'update_tls_certificate', + true, + frm.pg.is_server_setup, + ], + [ + __('Fetch Keys'), + 'fetch_keys', + false, + frm.pg.is_server_setup && + (!frm.pg.jingrow_public_key || !frm.pg.root_public_key), + ], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/registry_server/registry_server.json b/jcloud/jcloud/pagetype/registry_server/registry_server.json new file mode 100644 index 0000000..8bbfe1f --- /dev/null +++ b/jcloud/jcloud/pagetype/registry_server/registry_server.json @@ -0,0 +1,230 @@ +{ + "actions": [], + "creation": "2021-01-04 16:30:38.925921", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "hostname", + "domain", + "column_break_4", + "provider", + "virtual_machine", + "is_server_setup", + "networking_section", + "ip", + "column_break_9", + "private_ip", + "private_mac_address", + "private_vlan_id", + "agent_section", + "agent_password", + "registry_section", + "registry_username", + "column_break_10", + "registry_password", + "ssh_section", + "jingrow_user_password", + "jingrow_public_key", + "column_break_20", + "root_public_key", + "monitoring_section", + "monitoring_password" + ], + "fields": [ + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nInstalling\nActive\nBroken\nArchived", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "hostname", + "fieldtype": "Data", + "label": "Hostname", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "label": "Domain", + "options": "Root Domain", + "read_only": 1, + "set_only_once": 1 + }, + { + "default": "0", + "fieldname": "is_server_setup", + "fieldtype": "Check", + "label": "Server Setup", + "read_only": 1 + }, + { + "fetch_from": "virtual_machine.public_ip_address", + "fieldname": "ip", + "fieldtype": "Data", + "in_list_view": 1, + "label": "IP", + "reqd": 1, + "set_only_once": 1 + }, + { + "fetch_from": "virtual_machine.private_ip_address", + "fieldname": "private_ip", + "fieldtype": "Data", + "label": "Private IP", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "registry_section", + "fieldtype": "Section Break", + "label": "Registry" + }, + { + "fieldname": "registry_username", + "fieldtype": "Data", + "label": "Registry Username", + "set_only_once": 1 + }, + { + "fieldname": "registry_password", + "fieldtype": "Password", + "label": "Registry Password", + "set_only_once": 1 + }, + { + "fieldname": "ssh_section", + "fieldtype": "Section Break", + "label": "SSH" + }, + { + "fieldname": "root_public_key", + "fieldtype": "Code", + "label": "Root Public Key", + "read_only": 1 + }, + { + "fieldname": "column_break_20", + "fieldtype": "Column Break" + }, + { + "fieldname": "jingrow_public_key", + "fieldtype": "Code", + "label": "Jingrow Public Key", + "read_only": 1 + }, + { + "fieldname": "column_break_10", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "default": "Generic", + "fieldname": "provider", + "fieldtype": "Select", + "label": "Provider", + "options": "Generic\nScaleway\nAWS EC2\nOCI", + "set_only_once": 1 + }, + { + "fieldname": "jingrow_user_password", + "fieldtype": "Password", + "label": "Jingrow User Password", + "set_only_once": 1 + }, + { + "fieldname": "networking_section", + "fieldtype": "Section Break", + "label": "Networking" + }, + { + "fieldname": "column_break_9", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_mac_address", + "fieldtype": "Data", + "label": "Private Mac Address", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_vlan_id", + "fieldtype": "Data", + "label": "Private VLAN ID", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "fieldname": "agent_section", + "fieldtype": "Section Break", + "label": "Agent" + }, + { + "fieldname": "agent_password", + "fieldtype": "Password", + "label": "Agent Password" + }, + { + "fieldname": "monitoring_section", + "fieldtype": "Section Break", + "label": "Monitoring" + }, + { + "fieldname": "monitoring_password", + "fieldtype": "Password", + "label": "Monitoring Password", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.provider === \"AWS EC2\"", + "fieldname": "virtual_machine", + "fieldtype": "Link", + "label": "Virtual Machine", + "mandatory_depends_on": "eval:pg.provider === \"AWS EC2\"", + "options": "Virtual Machine" + } + ], + "links": [ + { + "link_pagetype": "Ansible Play", + "link_fieldname": "server" + } + ], + "modified": "2023-12-13 15:09:46.909110", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Registry Server", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/registry_server/registry_server.py b/jcloud/jcloud/pagetype/registry_server/registry_server.py new file mode 100644 index 0000000..16fa7ee --- /dev/null +++ b/jcloud/jcloud/pagetype/registry_server/registry_server.py @@ -0,0 +1,102 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import jingrow + +from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import toggle_builds +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class RegistryServer(BaseServer): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + agent_password: DF.Password | None + domain: DF.Link | None + jingrow_public_key: DF.Code | None + jingrow_user_password: DF.Password | None + hostname: DF.Data + ip: DF.Data + is_server_setup: DF.Check + monitoring_password: DF.Password | None + private_ip: DF.Data + private_mac_address: DF.Data | None + private_vlan_id: DF.Data | None + provider: DF.Literal["Generic", "Scaleway", "AWS EC2", "OCI"] + registry_password: DF.Password | None + registry_username: DF.Data | None + root_public_key: DF.Code | None + status: DF.Literal["Pending", "Installing", "Active", "Broken", "Archived"] + virtual_machine: DF.Link | None + # end: auto-generated types + + def validate(self): + self.validate_agent_password() + self.validate_registry_username() + self.validate_registry_password() + self.validate_monitoring_password() + + def validate_registry_password(self): + if not self.registry_password: + self.registry_password = jingrow.generate_hash(length=32) + + def validate_registry_username(self): + if not self.registry_username: + self.registry_username = "jingrow" + + def validate_monitoring_password(self): + if not self.monitoring_password: + self.monitoring_password = jingrow.generate_hash() + + def _setup_server(self): + agent_password = self.get_password("agent_password") + agent_repository_url = self.get_agent_repository_url() + monitoring_password = self.get_password("monitoring_password") + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": self.domain}, "name" + ) + certificate = jingrow.get_pg("TLS Certificate", certificate_name) + try: + ansible = Ansible( + playbook="registry.yml", + server=self, + variables={ + "server": self.name, + "workers": 1, + "domain": self.domain, + "agent_password": agent_password, + "agent_repository_url": agent_repository_url, + "monitoring_password": monitoring_password, + "private_ip": self.private_ip, + "registry_username": self.registry_username, + "registry_password": self.get_password("registry_password"), + "certificate_private_key": certificate.private_key, + "certificate_full_chain": certificate.full_chain, + "certificate_intermediate_chain": certificate.intermediate_chain, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_server_setup = True + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Registry Server Setup Exception", server=self.as_dict()) + self.save() + + def _prune_docker_system(self): + toggle_builds(True) + super()._prune_docker_system() + toggle_builds(False) diff --git a/jcloud/jcloud/pagetype/registry_server/test_registry_server.py b/jcloud/jcloud/pagetype/registry_server/test_registry_server.py new file mode 100644 index 0000000..7eba49c --- /dev/null +++ b/jcloud/jcloud/pagetype/registry_server/test_registry_server.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestRegistryServer(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/release_group/__init__.py b/jcloud/jcloud/pagetype/release_group/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/release_group/patches/set_bench_dependency_in_release_group.py b/jcloud/jcloud/pagetype/release_group/patches/set_bench_dependency_in_release_group.py new file mode 100644 index 0000000..043fbe4 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group/patches/set_bench_dependency_in_release_group.py @@ -0,0 +1,17 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + for name in jingrow.db.get_all("Release Group", pluck="name"): + release_group = jingrow.get_pg("Release Group", name) + release_group.extend( + "dependencies", + [ + {"dependency": "BENCH_VERSION", "version": "5.2.1"}, + ], + ) + release_group.db_update_all() diff --git a/jcloud/jcloud/pagetype/release_group/patches/sync_common_site_config.py b/jcloud/jcloud/pagetype/release_group/patches/sync_common_site_config.py new file mode 100644 index 0000000..c95b788 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group/patches/sync_common_site_config.py @@ -0,0 +1,41 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + for name in jingrow.db.get_all("Release Group", pluck="name"): + release_group = jingrow.get_pg("Release Group", name) + if ( + release_group.common_site_config + and release_group.common_site_config != "{}" + and release_group.common_site_config_table == [] + ): + common_site_config = jingrow.parse_json(release_group.common_site_config) + for key, value in common_site_config.items(): + config_type = get_type(value) + if config_type == "JSON": + value = jingrow.as_json(value) + release_group.append( + "common_site_config_table", + { + "key": key, + "value": value, + "type": config_type, + "internal": jingrow.db.get_value("Site Config Key", key, "internal"), + }, + ) + release_group.save() + + +def get_type(value): + if isinstance(value, bool): + return "Boolean" + elif isinstance(value, str): + return "String" + elif isinstance(value, int): + return "Number" + elif isinstance(value, dict): + return "JSON" diff --git a/jcloud/jcloud/pagetype/release_group/release_group.js b/jcloud/jcloud/pagetype/release_group/release_group.js new file mode 100644 index 0000000..86cde89 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group/release_group.js @@ -0,0 +1,95 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Release Group', { + refresh: function (frm) { + frm.add_web_link( + `/dashboard/groups/${frm.pg.name}`, + __('Visit Dashboard'), + ); + [ + [__('Create Deploy Candidate'), 'create_deploy_candidate'], + [ + __('Create Duplicate Deploy Candidate'), + 'create_duplicate_deploy_candidate', + ], + [__('Update Benches Configuration'), 'update_benches_config'], + ].forEach(([label, method]) => { + frm.add_custom_button( + label, + () => { + frm.call(method).then(({ message }) => { + jingrow.msgprint({ + title: __('New Deploy Candidate Created'), + indicator: 'green', + message: __(`New {0} for this bench was created successfully.`, [ + `Deploy Candidate`, + ]), + }); + frm.refresh(); + }); + }, + __('Actions'), + ); + }); + + frm.add_custom_button( + 'Change Server', + () => { + let d = new jingrow.ui.Dialog({ + title: 'Change Server', + fields: [ + { + fieldtype: 'Link', + fieldname: 'server', + label: 'Server', + options: 'Server', + reqd: 1, + }, + ], + primary_action({ server }) { + frm.call('change_server', { server }).then((r) => { + if (!r.exc) { + jingrow.show_alert(`Server changed to ${server}`); + } + d.hide(); + }); + }, + }); + d.show(); + }, + __('Actions'), + ); + frm.add_custom_button( + 'Add Server', + () => { + let d = new jingrow.ui.Dialog({ + title: 'Add Server', + fields: [ + { + fieldtype: 'Link', + fieldname: 'server', + label: 'Server', + options: 'Server', + reqd: 1, + }, + ], + primary_action({ server }) { + frm.call('add_server', { server, deploy: true }).then((r) => { + if (!r.exc) { + jingrow.show_alert( + `Added ${server} and deployed last successful candidate`, + ); + } + d.hide(); + }); + }, + }); + d.show(); + }, + __('Actions'), + ); + + frm.set_df_property('dependencies', 'cannot_add_rows', 1); + }, +}); diff --git a/jcloud/jcloud/pagetype/release_group/release_group.json b/jcloud/jcloud/pagetype/release_group/release_group.json new file mode 100644 index 0000000..99fa54c --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group/release_group.json @@ -0,0 +1,417 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "bench-.####", + "creation": "2022-01-28 20:07:29.874387", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "title", + "version", + "team", + "column_break_4", + "enabled", + "default", + "public", + "central_bench", + "section_break_7", + "servers", + "section_break_keov", + "build_server", + "apps_tab", + "apps", + "config_tab", + "bench_configuration_section", + "common_site_configuration_column", + "common_site_config_table", + "common_site_config", + "column_break_14", + "bench_config", + "gunicorn_threads_per_worker", + "redis_cache_size", + "automatic_worker_allocation_section", + "min_gunicorn_workers", + "min_background_workers", + "column_break_njfg", + "max_gunicorn_workers", + "max_background_workers", + "dependencies_tab", + "dependencies", + "last_dependency_update", + "packages", + "environment_variables", + "mounts", + "feature_flags_tab", + "is_redisearch_enabled", + "is_push_to_deploy_enabled", + "use_app_cache", + "compress_app_cache", + "use_delta_builds", + "column_break_9efq", + "merge_all_rq_queues", + "merge_default_and_short_rq_queues", + "use_rq_workerpool", + "saas_tab", + "saas_bench", + "column_break_26", + "saas_app", + "miscellaneous_tab", + "tags", + "is_code_server_enabled" + ], + "fields": [ + { + "default": "0", + "fieldname": "default", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Default" + }, + { + "default": "0", + "fieldname": "public", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Public" + }, + { + "default": "1", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Team", + "options": "Team", + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "version", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Version", + "options": "Jingrow Version", + "reqd": 1 + }, + { + "fieldname": "section_break_7", + "fieldtype": "Section Break" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title", + "reqd": 1 + }, + { + "fieldname": "servers", + "fieldtype": "Table", + "label": "Servers", + "options": "Release Group Server" + }, + { + "fieldname": "apps", + "fieldtype": "Table", + "label": "Apps", + "options": "Release Group App", + "reqd": 1 + }, + { + "fieldname": "bench_configuration_section", + "fieldtype": "Section Break", + "label": "Bench Configuration" + }, + { + "default": "{}", + "fieldname": "common_site_config", + "fieldtype": "Code", + "label": "Configuration Preview", + "options": "JSON", + "read_only": 1 + }, + { + "default": "{}", + "fieldname": "bench_config", + "fieldtype": "Code", + "label": "Bench Config" + }, + { + "fieldname": "column_break_14", + "fieldtype": "Column Break", + "label": "Bench Config" + }, + { + "fieldname": "dependencies", + "fieldtype": "Table", + "label": "Dependencies", + "options": "Release Group Dependency" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "is_redisearch_enabled", + "fieldtype": "Check", + "label": "Is RediSearch Enabled" + }, + { + "default": "0", + "fieldname": "saas_bench", + "fieldtype": "Check", + "label": "SaaS Bench" + }, + { + "description": "This bench is for the following SaaS App", + "fieldname": "saas_app", + "fieldtype": "Link", + "label": "SaaS App", + "options": "Saas App" + }, + { + "default": "0", + "fieldname": "central_bench", + "fieldtype": "Check", + "label": "Central Bench" + }, + { + "fieldname": "feature_flags_tab", + "fieldtype": "Tab Break", + "label": "Feature Flags" + }, + { + "default": "0", + "fieldname": "is_push_to_deploy_enabled", + "fieldtype": "Check", + "label": "Push To Deploy" + }, + { + "fieldname": "config_tab", + "fieldtype": "Tab Break", + "label": "Config" + }, + { + "fieldname": "dependencies_tab", + "fieldtype": "Tab Break", + "label": "Dependencies" + }, + { + "fieldname": "saas_tab", + "fieldtype": "Tab Break", + "label": "SaaS" + }, + { + "fieldname": "column_break_26", + "fieldtype": "Column Break" + }, + { + "fieldname": "apps_tab", + "fieldtype": "Tab Break", + "label": "Apps" + }, + { + "fieldname": "packages", + "fieldtype": "Table", + "label": "Packages", + "options": "Release Group Package" + }, + { + "fieldname": "environment_variables", + "fieldtype": "Table", + "label": "Environment Variables", + "options": "Release Group Variable" + }, + { + "fieldname": "column_break_9efq", + "fieldtype": "Column Break", + "label": "RQ Worker Config" + }, + { + "default": "0", + "fieldname": "merge_all_rq_queues", + "fieldtype": "Check", + "label": "Merge All RQ Queues" + }, + { + "default": "0", + "fieldname": "merge_default_and_short_rq_queues", + "fieldtype": "Check", + "label": "Merge Default and Short RQ Queues" + }, + { + "fieldname": "miscellaneous_tab", + "fieldtype": "Tab Break", + "label": "Miscellaneous" + }, + { + "fieldname": "tags", + "fieldtype": "Table", + "label": "Tags", + "options": "Resource Tag" + }, + { + "fieldname": "common_site_configuration_column", + "fieldtype": "Column Break", + "label": "Common Site Config" + }, + { + "fieldname": "common_site_config_table", + "fieldtype": "Table", + "label": "Configuration", + "options": "Common Site Config" + }, + { + "fieldname": "last_dependency_update", + "fieldtype": "Datetime", + "label": "Last Dependency Update", + "read_only": 1 + }, + { + "fieldname": "max_gunicorn_workers", + "fieldtype": "Int", + "label": "Max Gunicorn Workers", + "non_negative": 1 + }, + { + "fieldname": "max_background_workers", + "fieldtype": "Int", + "label": "Max Background Workers", + "non_negative": 1 + }, + { + "fieldname": "automatic_worker_allocation_section", + "fieldtype": "Section Break", + "label": "Automatic Worker Allocation" + }, + { + "fieldname": "min_gunicorn_workers", + "fieldtype": "Int", + "label": "Min Gunicorn Workers", + "non_negative": 1 + }, + { + "fieldname": "min_background_workers", + "fieldtype": "Int", + "label": "Min Background Workers", + "non_negative": 1 + }, + { + "fieldname": "column_break_njfg", + "fieldtype": "Column Break" + }, + { + "default": "0", + "description": "Setting this to non-zero value will set Gunicorn worker class to gthread.", + "fieldname": "gunicorn_threads_per_worker", + "fieldtype": "Int", + "label": "Gunicorn Threads Per Worker", + "non_negative": 1 + }, + { + "default": "0", + "fetch_from": "team.is_code_server_user", + "fieldname": "is_code_server_enabled", + "fieldtype": "Check", + "label": "Is Code Server Enabled", + "read_only": 1 + }, + { + "fieldname": "mounts", + "fieldtype": "Table", + "label": "Mounts", + "options": "Release Group Mount" + }, + { + "default": "0", + "fieldname": "use_rq_workerpool", + "fieldtype": "Check", + "label": "Use RQ WorkerPool" + }, + { + "default": "0", + "description": "Uses Bench get-app cache for faster image builds. Can be used only if Bench version is 5.22.1 or later.", + "fieldname": "use_app_cache", + "fieldtype": "Check", + "label": "Use App Cache" + }, + { + "default": "0", + "depends_on": "eval: pg.use_app_cache", + "description": "Use Gzip to compress bench get-app artifacts before caching.", + "fieldname": "compress_app_cache", + "fieldtype": "Check", + "label": "Compress App Cache" + }, + { + "fieldname": "section_break_keov", + "fieldtype": "Section Break" + }, + { + "default": "0", + "description": "Quickens builds by fetching app changes without rebuilding app if app rebuild is not required.", + "fieldname": "use_delta_builds", + "fieldtype": "Check", + "label": "Use Delta Builds" + }, + { + "fieldname": "build_server", + "fieldtype": "Link", + "label": "Build Server", + "options": "Server" + }, + { + "default": "512", + "fieldname": "redis_cache_size", + "fieldtype": "Int", + "label": "Redis Cache Size (MB)" + } + ], + "links": [], + "modified": "2024-12-27 11:58:29.907938", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Release Group", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "title", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/release_group/release_group.py b/jcloud/jcloud/pagetype/release_group/release_group.py new file mode 100644 index 0000000..8db2b3d --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group/release_group.py @@ -0,0 +1,1550 @@ +# For license information, please see license.txt +from __future__ import annotations + +import json +from contextlib import suppress +from functools import cached_property +from itertools import chain +from typing import TYPE_CHECKING, TypedDict + +import jingrow +import jingrow.query_builder +import semantic_version as sv +from jingrow import _ +from jingrow.core.pagetype.version.version import get_diff +from jingrow.core.utils import find, find_all +from jingrow.model.document import Document +from jingrow.model.naming import append_number_if_name_exists +from jingrow.query_builder.functions import Count +from jingrow.utils import cstr, flt, get_url, sbool +from jingrow.utils.caching import redis_cache + +from jcloud.api.client import dashboard_whitelist +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.jcloud.pagetype.app.app import new_app +from jcloud.jcloud.pagetype.app_source.app_source import AppSource, create_app_source +from jcloud.jcloud.pagetype.deploy_candidate.utils import is_suspended +from jcloud.jcloud.pagetype.resource_tag.tag_helpers import TagHelpers +from jcloud.jcloud.pagetype.server.server import Server +from jcloud.utils import ( + get_app_tag, + get_client_blacklisted_keys, + get_current_team, + get_last_pg, + log_error, +) + +if TYPE_CHECKING: + from datetime import datetime + from typing import Any + +DEFAULT_DEPENDENCIES = [ + {"dependency": "NVM_VERSION", "version": "0.36.0"}, + {"dependency": "NODE_VERSION", "version": "14.19.0"}, + {"dependency": "PYTHON_VERSION", "version": "3.7"}, + {"dependency": "WKHTMLTOPDF_VERSION", "version": "0.12.5"}, + {"dependency": "BENCH_VERSION", "version": "5.15.2"}, +] + + +class LastDeployInfo(TypedDict): + name: str + status: str + creation: datetime + + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.app.app import App + from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate + + +class ReleaseGroup(Document, TagHelpers): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.common_site_config.common_site_config import CommonSiteConfig + from jcloud.jcloud.pagetype.release_group_app.release_group_app import ReleaseGroupApp + from jcloud.jcloud.pagetype.release_group_dependency.release_group_dependency import ( + ReleaseGroupDependency, + ) + from jcloud.jcloud.pagetype.release_group_mount.release_group_mount import ( + ReleaseGroupMount, + ) + from jcloud.jcloud.pagetype.release_group_package.release_group_package import ( + ReleaseGroupPackage, + ) + from jcloud.jcloud.pagetype.release_group_server.release_group_server import ( + ReleaseGroupServer, + ) + from jcloud.jcloud.pagetype.release_group_variable.release_group_variable import ( + ReleaseGroupVariable, + ) + from jcloud.jcloud.pagetype.resource_tag.resource_tag import ( + ResourceTag, + ) + + apps: DF.Table[ReleaseGroupApp] + bench_config: DF.Code | None + build_server: DF.Link | None + central_bench: DF.Check + common_site_config: DF.Code | None + common_site_config_table: DF.Table[CommonSiteConfig] + compress_app_cache: DF.Check + default: DF.Check + dependencies: DF.Table[ReleaseGroupDependency] + enabled: DF.Check + environment_variables: DF.Table[ReleaseGroupVariable] + gunicorn_threads_per_worker: DF.Int + is_code_server_enabled: DF.Check + is_push_to_deploy_enabled: DF.Check + is_redisearch_enabled: DF.Check + last_dependency_update: DF.Datetime | None + max_background_workers: DF.Int + max_gunicorn_workers: DF.Int + merge_all_rq_queues: DF.Check + merge_default_and_short_rq_queues: DF.Check + min_background_workers: DF.Int + min_gunicorn_workers: DF.Int + mounts: DF.Table[ReleaseGroupMount] + packages: DF.Table[ReleaseGroupPackage] + public: DF.Check + redis_cache_size: DF.Int + saas_app: DF.Link | None + saas_bench: DF.Check + servers: DF.Table[ReleaseGroupServer] + tags: DF.Table[ResourceTag] + team: DF.Link + title: DF.Data + use_app_cache: DF.Check + use_delta_builds: DF.Check + use_rq_workerpool: DF.Check + version: DF.Link + # end: auto-generated types + + dashboard_fields = ("title", "version", "apps", "team", "public", "tags") + + @staticmethod + def get_list_query(query, filters, **list_args): + ReleaseGroupServer = jingrow.qb.PageType("Release Group Server") + ReleaseGroup = jingrow.qb.PageType("Release Group") + Bench = jingrow.qb.PageType("Bench") + Site = jingrow.qb.PageType("Site") + + site_count = ( + jingrow.qb.from_(Site) + .select(jingrow.query_builder.functions.Count("*")) + .where(Site.group == ReleaseGroup.name) + .where(Site.status != "Archived") + ) + + active_benches = ( + jingrow.qb.from_(Bench) + .select(jingrow.query_builder.functions.Count("*")) + .where(Bench.group == ReleaseGroup.name) + .where(Bench.status == "Active") + ) + + query = ( + query.where(ReleaseGroup.team == jingrow.local.team().name) + .where(ReleaseGroup.enabled == 1) + .where(ReleaseGroup.public == 0) + .select(site_count.as_("site_count"), active_benches.as_("active_benches")) + ) + + if server := filters.get("server"): + query = ( + query.inner_join(ReleaseGroupServer) + .on(ReleaseGroupServer.parent == ReleaseGroup.name) + .where(ReleaseGroupServer.server == server) + ) + + return query + + def get_pg(self, pg): + pg.deploy_information = self.deploy_information() + pg.status = self.status + pg.actions = self.get_actions() + pg.are_builds_suspended = are_builds_suspended() + pg.eol_versions = jingrow.db.get_all( + "Jingrow Version", + filters={"status": "End of Life"}, + fields=["name"], + order_by="name desc", + pluck="name", + ) + + if len(self.servers) == 1: + server = jingrow.db.get_value("Server", self.servers[0].server, ["team", "title"], as_dict=True) + pg.server = self.servers[0].server + pg.server_title = server.title + pg.server_team = server.team + + pg.enable_inplace_updates = jingrow.get_value( + "Team", + self.team, + "enable_inplace_updates", + ) + if pg.enable_inplace_updates: + pg.inplace_update_failed_benches = self.get_inplace_update_failed_benches() + + def get_inplace_update_failed_benches(self): + return jingrow.db.get_all( + "Bench", + {"group": self.name, "status": "Active", "last_inplace_update_failed": True}, + pluck="name", + ) + + def get_actions(self): + return [ + { + "action": "Rename Bench Group", + "description": "Rename the bench group", + "button_label": "Rename", + "pg_method": "rename", + }, + { + "action": "Transfer Bench Group", + "description": "Transfer ownership of this bench group to another team", + "button_label": "Transfer", + "pg_method": "send_change_team_request", + }, + { + "action": "Drop Bench Group", + "description": "Drop the bench group", + "button_label": "Drop", + "pg_method": "drop", + "group": "Dangerous Actions", + }, + ] + + def validate(self): + self.validate_title() + self.validate_jingrow_app() + self.validate_duplicate_app() + self.validate_app_versions() + self.validate_servers() + self.validate_rq_queues() + self.validate_max_min_workers() + self.validate_feature_flags() + + def before_insert(self): + # to avoid adding deps while cloning a release group + if len(self.dependencies) == 0: + self.fetch_dependencies() + self.set_default_app_cache_flags() + self.set_default_delta_builds_flags() + self.setup_default_feature_flags() + + def after_insert(self): + from jcloud.jcloud.pagetype.jcloud_role.jcloud_role import ( + add_permission_for_newly_created_pg, + ) + + add_permission_for_newly_created_pg(self) + + def on_update(self): + old_pg = self.get_pg_before_save() + if self.flags.in_insert or self.is_new() or not old_pg: + return + diff = get_diff(old_pg, self) or {} + for row in chain(diff.get("row_changed", []), diff.get("added", [])): + if row[0] == "dependencies": + self.db_set("last_dependency_update", jingrow.utils.now_datetime()) + break + if self.has_value_changed("team"): + jingrow.db.delete("Jcloud Role Permission", {"release_group": self.name}) + + def on_trash(self): + candidates = jingrow.get_all("Deploy Candidate", {"group": self.name}) + for candidate in candidates: + jingrow.delete_pg("Deploy Candidate", candidate.name) + + def before_save(self): + self.update_common_site_config_preview() + + def update_common_site_config_preview(self): + """Regenerates rg.common_site_config on each rg.before_save + from the rg.common_site_config child table data""" + new_config = {} + + for row in self.common_site_config_table: + # update internal flag from master + row.internal = jingrow.db.get_value("Site Config Key", row.key, "internal") + key_type = row.type or row.get_type() + row.type = key_type + + if key_type == "Number": + key_value = int(row.value) if isinstance(row.value, (float, int)) else json.loads(row.value) + elif key_type == "Boolean": + key_value = row.value if isinstance(row.value, bool) else bool(json.loads(cstr(row.value))) + elif key_type == "JSON": + key_value = json.loads(cstr(row.value)) + else: + key_value = row.value + + new_config[row.key] = key_value + + self.common_site_config = json.dumps(new_config, indent=4) + + @dashboard_whitelist() + def update_dependency(self, dependency_name, version, is_custom): + """Updates a dependency version in the Release Group Dependency table""" + for dependency in self.dependencies: + if dependency.name == dependency_name: + dependency.version = version + dependency.is_custom = is_custom + self.save() + return + + @dashboard_whitelist() + def delete_config(self, key): + """Deletes a key from the common_site_config_table""" + + if key in get_client_blacklisted_keys(): + return + + updated_common_site_config = [] + for row in self.common_site_config_table: + if row.key != key and not row.internal: + updated_common_site_config.append({"key": row.key, "value": row.value, "type": row.type}) + + # using a tuple to avoid updating bench_config + # TODO: remove tuple when bench_config is removed and field for http_timeout is added + self.update_config_in_release_group(updated_common_site_config, ()) + + @dashboard_whitelist() + def update_config(self, config): + sanitized_common_site_config = [ + {"key": c.key, "type": c.type, "value": c.value} for c in self.common_site_config_table + ] + sanitized_bench_config = [] + bench_config_keys = ["http_timeout"] + + config = jingrow.parse_json(config) + + for key, value in config.items(): + if key in get_client_blacklisted_keys(): + jingrow.throw(_(f"The key {key} is blacklisted or is internal and cannot be updated")) + + config_type = get_config_type(value) + + if jingrow.db.exists("Site Config Key", key): + config_type = jingrow.db.get_value("Site Config Key", key, "type") + + value = get_formatted_config_value( + config_type, + value, + key, + self.name, + ) + + if key in bench_config_keys: + sanitized_bench_config.append({"key": key, "value": value, "type": config_type}) + + # update existing key + for row in sanitized_common_site_config: + if row["key"] == key: + row["value"] = value + row["type"] = config_type + break + else: + sanitized_common_site_config.append({"key": key, "value": value, "type": config_type}) + + self.update_config_in_release_group(sanitized_common_site_config, sanitized_bench_config) + self.update_benches_config() + + def update_config_in_release_group(self, common_site_config, bench_config): + """Updates bench_config and common_site_config in the Release Group + + Args: + config (list): List of dicts with key, value, and type + """ + blacklisted_config = [ + x for x in self.common_site_config_table if x.key in get_client_blacklisted_keys() + ] + self.common_site_config_table = [] + + # Maintain keys that aren't accessible to Dashboard user + for i, _config in enumerate(blacklisted_config): + _config.idx = i + 1 + self.common_site_config_table.append(_config) + + for d in common_site_config: + d = jingrow._dict(d) + if isinstance(d.value, (dict, list)): + value = json.dumps(d.value) + else: + value = d.value + self.append("common_site_config_table", {"key": d.key, "value": value, "type": d.type}) + # redis_cache_size is a field on release group but we want to treat it as config key + # TODO: add another interface for updating similar values + if d["key"] == "redis_cache_size": + self.redis_cache_size = int(d.value) + + for d in bench_config: + if d["key"] == "http_timeout": + # http_timeout should be the only thing configurable in bench_config + self.bench_config = json.dumps({"http_timeout": int(d["value"])}, indent=4) + + if bench_config == []: + self.bench_config = json.dumps({}) + + self.save() + + @dashboard_whitelist() + def update_environment_variable(self, environment_variables: dict): + for key, value in environment_variables.items(): + is_updated = False + for env_var in self.environment_variables: + if env_var.key == key: + if env_var.internal: + jingrow.throw(f"Environment variable {env_var.key} is internal and cannot be updated") + else: + env_var.value = value + is_updated = True + if not is_updated: + self.append("environment_variables", {"key": key, "value": value, "internal": False}) + self.save() + + @dashboard_whitelist() + def delete_environment_variable(self, key): + updated_env_variables = [] + for env_var in self.environment_variables: + if env_var.key != key or env_var.internal: + updated_env_variables.append(env_var) + self.environment_variables = updated_env_variables + self.save() + + def validate_title(self): + if jingrow.get_all( + "Release Group", + { + "title": self.title, + "team": self.team or "", + "name": ("!=", self.name), + "enabled": True, + }, + limit=1, + ): + jingrow.throw( + f"Bench Group of name {self.title} already exists. Please try another name.", + jingrow.ValidationError, + ) + + def validate_jingrow_app(self): + if self.apps[0].app != "jingrow": + jingrow.throw("First app must be Jingrow", jingrow.ValidationError) + + def validate_duplicate_app(self): + apps = set() + for app in self.apps: + app_name = app.app + if app_name in apps: + jingrow.throw(f"App {app.app} can be added only once", jingrow.ValidationError) + apps.add(app_name) + + def validate_app_versions(self): + # App Source should be compatible with Release Group's version + with suppress(AttributeError, RuntimeError): + if ( + not jingrow.flags.in_test + and jingrow.request.path == "/api/method/jcloud.api.bench.change_branch" + ): + return # Separate validation exists in set_app_source + for app in self.apps: + self.validate_app_version(app) + + def validate_app_version(self, app: "ReleaseGroupApp"): + source = jingrow.get_pg("App Source", app.source) + if all(row.version != self.version for row in source.versions): + branch, repo = jingrow.db.get_values("App Source", app.source, ("branch", "repository"))[0] + msg = f"{repo.rsplit('/')[-1] or repo.rsplit('/')[-2]}:{branch} branch is no longer compatible with {self.version} version of Jingrow" + jingrow.throw(msg, jingrow.ValidationError) + + def validate_servers(self): + if self.servers: + servers = set(server.server for server in self.servers) + if len(servers) != len(self.servers): + jingrow.throw("Servers can be added only once", jingrow.ValidationError) + elif self.is_new(): + server_for_new_bench = Server.get_prod_for_new_bench() + if server_for_new_bench: + self.append("servers", {"server": server_for_new_bench}) + + def fetch_dependencies(self): + jingrow_version = jingrow.get_pg("Jingrow Version", self.version) + + for d in jingrow_version.dependencies: + self.append("dependencies", {"dependency": d.dependency, "version": d.version}) + + def validate_rq_queues(self): + if self.merge_all_rq_queues and self.merge_default_and_short_rq_queues: + jingrow.throw( + "Can't set Merge All RQ Queues and Merge Short and Default RQ Queues at once", + jingrow.ValidationError, + ) + + def validate_max_min_workers(self): + if ( + self.max_gunicorn_workers + and self.min_gunicorn_workers + and self.max_gunicorn_workers < self.min_gunicorn_workers + ): + jingrow.throw( + "Max Gunicorn Workers can't be less than Min Gunicorn Workers", + jingrow.ValidationError, + ) + if ( + self.max_background_workers + and self.min_background_workers + and self.max_background_workers < self.min_background_workers + ): + jingrow.throw( + "Max Background Workers can't be less than Min Background Workers", + jingrow.ValidationError, + ) + + def validate_feature_flags(self) -> None: + if self.use_app_cache and not self.can_use_get_app_cache(): + jingrow.throw(_("Use App Cache cannot be set, BENCH_VERSION must be 5.22.1 or later")) + + def can_use_get_app_cache(self) -> bool: + version = find( + self.dependencies, + lambda x: x.dependency == "BENCH_VERSION", + ).version + + try: + return sv.Version(version) in sv.SimpleSpec(">=5.22.1") + except ValueError: + return False + + @jingrow.whitelist() + def create_duplicate_deploy_candidate(self): + return self.create_deploy_candidate([]) + + @dashboard_whitelist() + def redeploy(self): + dc = self.create_duplicate_deploy_candidate() + dc.schedule_build_and_deploy() + + @dashboard_whitelist() + def initial_deploy(self): + dc = self.create_deploy_candidate() + dc.schedule_build_and_deploy() + + @jingrow.whitelist() + def create_deploy_candidate( + self, + apps_to_update=None, + run_will_fail_check=False, + ) -> "DeployCandidate | None": + if not self.enabled: + return None + + apps = self.get_apps_to_update(apps_to_update) + if apps_to_update is None: + self.validate_dc_apps_against_rg(apps) + + dependencies = [{"dependency": d.dependency, "version": d.version} for d in self.dependencies] + + packages = [ + { + "package_manager": p.package_manager, + "package": p.package, + "package_prerequisites": p.package_prerequisites, + "after_install": p.after_install, + } + for p in self.packages + ] + + environment_variables = [{"key": v.key, "value": v.value} for v in self.environment_variables] + + # Create and deploy the DC + new_dc: "DeployCandidate" = jingrow.get_pg( + { + "pagetype": "Deploy Candidate", + "group": self.name, + "apps": apps, + "dependencies": dependencies, + "packages": packages, + "environment_variables": environment_variables, + } + ) + + if run_will_fail_check: + from jcloud.jcloud.pagetype.deploy_candidate.validations import ( + check_if_update_will_fail, + ) + + check_if_update_will_fail(self, new_dc) + + new_dc.insert() + return new_dc + + def validate_dc_apps_against_rg(self, dc_apps) -> None: + app_map = {app["app"]: app for app in dc_apps} + not_found = [] + for app in self.apps: + if app.app in app_map: + continue + not_found.append(app.app) + + if not not_found: + return + + msg = _("Following apps {0} not found. Potentially due to not approved App Releases.").format( + not_found + ) + jingrow.throw(msg) + + def get_apps_to_update(self, apps_to_update): + # If apps_to_update is None, try to update all apps + if apps_to_update is None: + apps_to_update = self.apps + + apps = [] + last_deployed_bench = get_last_pg("Bench", {"group": self.name, "status": "Active"}) + + for app in self.deploy_information().apps: + app_to_update = find(apps_to_update, lambda x: x.get("app") == app.app) + # If we want to update the app and there's an update available + if app_to_update and app["update_available"]: + # Use a specific release if mentioned, otherwise pick the most recent one + target_release = app_to_update.get("release", app.next_release) + apps.append( + { + "app": app["app"], + "source": app["source"], + "release": target_release, + "hash": jingrow.db.get_value("App Release", target_release, "hash"), + } + ) + else: + # Either we don't want to update the app or there's no update available + if last_deployed_bench: + # Find the last deployed release and use it + app_to_keep = find(last_deployed_bench.apps, lambda x: x.app == app.app) + if app_to_keep: + apps.append( + { + "app": app_to_keep.app, + "source": app_to_keep.source, + "release": app_to_keep.release, + "hash": app_to_keep.hash, + } + ) + + return self.get_sorted_based_on_rg_apps(apps) + + def get_sorted_based_on_rg_apps(self, apps): + # Rearrange Apps to match release group ordering + sorted_apps = [] + + for app in self.apps: + dc_app = find(apps, lambda x: x["app"] == app.app) + if dc_app: + sorted_apps.append(dc_app) + + for app in apps: + if not find(sorted_apps, lambda x: x["app"] == app["app"]): + sorted_apps.append(app) + + return sorted_apps + + @jingrow.whitelist() + def deploy_information(self): + out = jingrow._dict(update_available=False) + + last_deployed_bench = get_last_pg("Bench", {"group": self.name, "status": "Active"}) + out.apps = self.get_app_updates(last_deployed_bench.apps if last_deployed_bench else []) + out.last_deploy = self.last_dc_info + out.deploy_in_progress = self.deploy_in_progress + + out.removed_apps = self.get_removed_apps() + out.update_available = ( + any([app["update_available"] for app in out.apps]) + or (len(out.removed_apps) > 0) + or self.dependency_update_pending + ) + out.number_of_apps = len(self.apps) + + out.sites = [ + site.update({"skip_failing_patches": False, "skip_backups": False}) + for site in jingrow.get_all( + "Site", + {"group": self.name, "status": ("in", ["Active", "Broken"])}, + ["name", "server", "bench"], + ) + ] + + return out + + @dashboard_whitelist() + def deployed_versions(self): + Bench = jingrow.qb.PageType("Bench") + Server = jingrow.qb.PageType("Server") + deployed_versions = ( + jingrow.qb.from_(Bench) + .left_join(Server) + .on(Server.name == Bench.server) + .where((Bench.group == self.name) & (Bench.status != "Archived")) + .groupby(Bench.name) + .select(Bench.name, Bench.status, Bench.is_ssh_proxy_setup, Server.proxy_server) + .orderby(Bench.creation, order=jingrow.qb.desc) + .run(as_dict=True) + ) + + rg_version = self.version + + sites_in_group_details = jingrow.db.get_all( + "Site", + filters={ + "group": self.name, + "status": ("not in", ("Archived", "Suspended")), + "is_standby": 0, + }, + fields=["name", "status", "cluster", "plan", "creation", "bench"], + ) + + if sites_in_group_details: + Cluster = jingrow.qb.PageType("Cluster") + cluster_data = ( + jingrow.qb.from_(Cluster) + .select(Cluster.name, Cluster.title, Cluster.image) + .where(Cluster.name.isin([site.cluster for site in sites_in_group_details])) + .run(as_dict=True) + ) + + Plan = jingrow.qb.PageType("Site Plan") + plan_data = ( + jingrow.qb.from_(Plan) + .select(Plan.name, Plan.plan_title, Plan.price_cny, Plan.price_usd) + .where(Plan.name.isin([site.plan for site in sites_in_group_details])) + .run(as_dict=True) + ) + + ResourceTag = jingrow.qb.PageType("Resource Tag") + tag_data = ( + jingrow.qb.from_(ResourceTag) + .select(ResourceTag.tag_name, ResourceTag.parent) + .where(ResourceTag.parent.isin([site.name for site in sites_in_group_details])) + .run(as_dict=True) + ) + + cur_user_ssh_key = jingrow.get_all( + "User SSH Key", {"user": jingrow.session.user, "is_default": 1}, limit=1 + ) + + benches = [dn.name for dn in deployed_versions] + benches_with_patches = jingrow.get_all( + "App Patch", + fields=["bench"], + filters={"bench": ["in", benches], "status": "Applied"}, + pluck="bench", + ) + + for version in deployed_versions: + version.has_app_patch_applied = version.name in benches_with_patches + version.has_ssh_access = version.is_ssh_proxy_setup and len(cur_user_ssh_key) > 0 + version.sites = find_all(sites_in_group_details, lambda x: x.bench == version.name) + for site in version.sites: + site.version = rg_version + site.server_region_info = find(cluster_data, lambda x: x.name == site.cluster) + site.plan = find(plan_data, lambda x: x.name == site.plan) + tags = find_all(tag_data, lambda x: x.parent == site.name) + site.tags = [tag.tag_name for tag in tags] + + version.deployed_on = jingrow.db.get_value( + "Agent Job", + {"bench": version.name, "job_type": "New Bench", "status": "Success"}, + "end", + ) + + return deployed_versions + + @dashboard_whitelist() + def get_app_versions(self, bench): + apps = jingrow.db.get_all( + "Bench App", + {"parent": bench}, + ["name", "app", "hash", "source"], + order_by="idx", + ) + for app in apps: + app.update( + jingrow.db.get_value( + "App Source", + app.source, + ("branch", "repository", "repository_owner", "repository_url"), + as_dict=1, + cache=True, + ) + ) + app.tag = get_app_tag(app.repository, app.repository_owner, app.hash) + return apps + + @dashboard_whitelist() + def send_change_team_request(self, team_mail_id: str, reason: str): + """Send email to team to accept bench transfer request""" + + if self.team != get_current_team(): + jingrow.throw( + "You should belong to the team owning the bench to initiate a bench ownership transfer." + ) + + if not jingrow.db.exists("Team", {"user": team_mail_id, "enabled": 1}): + jingrow.throw("No Active Team record found.") + + old_team = jingrow.db.get_value("Team", self.team, "user") + + if old_team == team_mail_id: + jingrow.throw(f"Bench group is already owned by the team {team_mail_id}") + + key = jingrow.generate_hash("Release Group Transfer Link", 20) + jingrow.get_pg( + { + "pagetype": "Team Change", + "document_type": "Release Group", + "document_name": self.name, + "to_team": jingrow.db.get_value("Team", {"user": team_mail_id, "enabled": 1}), + "from_team": self.team, + "reason": reason or "", + "key": key, + } + ).insert() + + link = get_url(f"/api/method/jcloud.api.bench.confirm_bench_transfer?key={key}") + + if jingrow.conf.developer_mode: + print(f"Bench transfer link for {team_mail_id}\n{link}\n") + + jingrow.sendmail( + recipients=team_mail_id, + subject="Transfer Bench Ownership Confirmation", + template="transfer_team_confirmation", + args={ + "name": self.title or self.name, + "type": "bench", + "old_team": old_team, + "new_team": team_mail_id, + "transfer_url": link, + }, + ) + + @dashboard_whitelist() + def generate_certificate(self): + user_ssh_key = jingrow.get_all( + "User SSH Key", + {"user": jingrow.session.user, "is_default": True}, + pluck="name", + limit=1, + ) + + if not user_ssh_key: + jingrow.throw(_("Please set a SSH key to generate certificate")) + + return jingrow.get_pg( + { + "pagetype": "SSH Certificate", + "certificate_type": "User", + "group": self.name, + "user": jingrow.session.user, + "user_ssh_key": user_ssh_key[0], + "validity": "6h", + } + ).insert() + + @dashboard_whitelist() + def get_certificate(self): + user_ssh_key = jingrow.db.get_all( + "User SSH Key", {"user": jingrow.session.user, "is_default": True}, pluck="name" + ) + if not len(user_ssh_key): + return False + certificates = jingrow.db.get_all( + "SSH Certificate", + { + "user": jingrow.session.user, + "valid_until": [">", jingrow.utils.now()], + "group": self.name, + "user_ssh_key": user_ssh_key[0], + }, + pluck="name", + limit=1, + ) + if certificates: + return jingrow.get_pg("SSH Certificate", certificates[0]) + return False + + @property + def dependency_update_pending(self): + if not self.last_dependency_update or not self.last_dc_info: + return False + return jingrow.utils.get_datetime(self.last_dependency_update) > self.last_dc_info.creation + + @property + def deploy_in_progress(self): + from jcloud.jcloud.pagetype.bench.bench import TRANSITORY_STATES as BENCH_TRANSITORY + from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import ( + TRANSITORY_STATES as DC_TRANSITORY, + ) + + if self.last_dc_info and self.last_dc_info.status in DC_TRANSITORY: + return True + + if any(i["status"] in BENCH_TRANSITORY for i in self.last_benches_info): + return True + + update_jobs = get_job_names(self.name, "Update Bench In Place", ["Pending", "Running"]) + if len(update_jobs): + return True + + return False + + @property + def status(self): + active_benches = jingrow.db.get_all( + "Bench", {"group": self.name, "status": "Active"}, limit=1, order_by="creation desc" + ) + return "Active" if active_benches else "Awaiting Deploy" + + @cached_property + def last_dc_info(self) -> "LastDeployInfo | None": + dc = jingrow.qb.PageType("Deploy Candidate") + + query = ( + jingrow.qb.from_(dc) + .where(dc.group == self.name) + .select(dc.name, dc.status, dc.creation) + .orderby(dc.creation, order=jingrow.qb.desc) + .limit(1) + ) + + results = query.run(as_dict=True) + + if len(results) > 0: + return results[0] + return None + + @cached_property + def last_benches_info(self) -> "list[LastDeployInfo]": + if not (name := (self.last_dc_info or {}).get("name")): + return [] + + b = jingrow.qb.PageType("Bench") + query = ( + jingrow.qb.from_(b) + .where(b.candidate == name) + .select(b.name, b.status, b.creation) + .orderby(b.creation, order=jingrow.qb.desc) + .limit(1) + ) + return query.run(as_dict=True) + + def get_app_updates(self, current_apps): + next_apps = self.get_next_apps(current_apps) + + apps = [] + for app in next_apps: + bench_app = find(current_apps, lambda x: x.app == app.app) + current_hash = bench_app.hash if bench_app else None + source = jingrow.get_pg("App Source", app.source) + + will_branch_change = False + current_branch = source.branch + if bench_app: + current_source_branch = jingrow.db.get_value("App Source", bench_app.source, "branch") + will_branch_change = current_source_branch != source.branch + current_branch = current_source_branch + + current_tag = ( + get_app_tag(source.repository, source.repository_owner, current_hash) + if current_hash + else None + ) + + for release in app.releases: + release.tag = get_app_tag(source.repository, source.repository_owner, release.hash) + + next_hash = app.hash + + update_available = not current_hash or current_hash != next_hash or will_branch_change + if not app.releases: + update_available = False + + apps.append( + jingrow._dict( + { + "title": app.title, + "app": app.app, + "name": app.app, + "source": source.name, + "repository": source.repository, + "repository_owner": source.repository_owner, + "repository_url": source.repository_url, + "branch": source.branch, + "current_hash": current_hash, + "current_tag": current_tag, + "current_release": bench_app.release if bench_app else None, + "releases": app.releases, + "next_release": app.release, + "will_branch_change": will_branch_change, + "current_branch": current_branch, + "update_available": update_available, + } + ) + ) + return apps + + def get_next_apps(self, current_apps): + marketplace_app_sources = self.get_marketplace_app_sources() + current_team = get_current_team(True) + app_publishers_team = [current_team.name] + + if current_team.parent_team: + app_publishers_team.append(current_team.parent_team) + + only_approved_for_sources = [self.apps[0].source] # add jingrow app source + if marketplace_app_sources: + AppSource = jingrow.qb.PageType("App Source") + only_approved_for_sources.append( + jingrow.qb.from_(AppSource) + .where(AppSource.name.isin(marketplace_app_sources)) + .where(AppSource.team.notin(app_publishers_team)) + .select(AppSource.name) + .run(as_dict=True, pluck="name") + ) + + next_apps = [] + + app_sources = [app.source for app in self.apps] + AppRelease = jingrow.qb.PageType("App Release") + latest_releases = ( + jingrow.qb.from_(AppRelease) + .where(AppRelease.source.isin(app_sources)) + .select( + AppRelease.name, + AppRelease.source, + AppRelease.public, + AppRelease.status, + AppRelease.hash, + AppRelease.message, + AppRelease.creation, + ) + .orderby(AppRelease.creation, order=jingrow.qb.desc) + .run(as_dict=True) + ) + + for app in self.apps: + latest_app_release = None + latest_app_releases = find_all(latest_releases, lambda x: x.source == app.source) + + if app.source in only_approved_for_sources: + latest_app_release = find(latest_app_releases, can_use_release) + latest_app_releases = find_all(latest_app_releases, can_use_release) + else: + latest_app_release = find(latest_app_releases, lambda x: x.source == app.source) + + # No release exists for this source + if not latest_app_release: + continue + + bench_app = find(current_apps, lambda x: x.app == app.app) + + upcoming_release = latest_app_release.name if latest_app_release else bench_app.release + upcoming_hash = latest_app_release.hash if latest_app_release else bench_app.hash + + upcoming_releases = latest_app_releases + if bench_app: + new_branch = jingrow.db.get_value("App Source", app.source, "branch") + old_branch = jingrow.db.get_value("App Source", bench_app.source, "branch") + + if new_branch == old_branch: + current_release_creation = jingrow.db.get_value( + "App Release", bench_app.release, "creation" + ) + upcoming_releases = [ + release + for release in latest_app_releases + if release.creation > current_release_creation + ] + + next_apps.append( + jingrow._dict( + { + "app": app.app, + "source": app.source, + "release": upcoming_release, + "hash": upcoming_hash, + "title": app.title, + "releases": upcoming_releases[:16], + } + ) + ) + + return next_apps + + def get_removed_apps(self): + # Apps that were removed from the release group + # but were in the last deployed bench + removed_apps = [] + + latest_bench = get_last_pg("Bench", {"group": self.name, "status": "Active"}) + + if latest_bench: + bench_apps = latest_bench.apps + + for bench_app in bench_apps: + if not find(self.apps, lambda rg_app: rg_app.app == bench_app.app): + app_title = jingrow.db.get_value("App", bench_app.app, "title") + removed_apps.append({"name": bench_app.app, "title": app_title}) + + return removed_apps + + def update_source(self, source: "AppSource", is_update: bool = False): + self.remove_app_if_invalid(source) + if is_update: + update_rg_app_source(self, source) + else: + self.append("apps", {"source": source.name, "app": source.app}) + self.save() + + def remove_app_if_invalid(self, source: "AppSource"): + """ + Remove app if previously added app has an invalid + repository URL and GitHub responds with a 404 when + fetching the app information. + """ + matching_apps = [a for a in self.apps if a.app == source.app] + if not matching_apps: + return + + rg_app = matching_apps[0] + value = jingrow.get_value( + "App Source", + rg_app.source, + ["last_github_poll_failed", "last_github_response", "repository_url"], + as_dict=True, + ) + + if value.repository_url == source.repository_url: + return + + if not value.last_github_poll_failed or not value.last_github_response: + return + + if '"Not Found"' not in value.last_github_response: + return + + self.remove_app(source.app) + + @dashboard_whitelist() + def change_app_branch(self, app: str, to_branch: str) -> None: + current_app_source = self.get_app_source(app) + + # Already on that branch + if current_app_source.branch == to_branch: + jingrow.throw(f"App already on branch {to_branch}!") + + required_app_source = jingrow.get_all( + "App Source", + filters={"repository_url": current_app_source.repository_url, "branch": to_branch}, + or_filters={"team": current_app_source.team, "public": 1}, + limit=1, + ) + + if required_app_source: + required_app_source = required_app_source[0] + else: + versions = jingrow.get_all( + "App Source Version", filters={"parent": current_app_source.name}, pluck="version" + ) + + required_app_source = create_app_source( + app, current_app_source.repository_url, to_branch, versions + ) + required_app_source.reload() + required_app_source.github_installation_id = current_app_source.github_installation_id + required_app_source.save() + + self.set_app_source(app, required_app_source.name) + + def get_app_source(self, app: str) -> AppSource: + source = jingrow.get_all( + "Release Group App", filters={"parent": self.name, "app": app}, pluck="source" + ) + + if source: + source = source[0] + else: + jingrow.throw("Release group app does not exist!") + + return jingrow.get_pg("App Source", source) + + def set_app_source(self, target_app: str, source: str) -> None: + """Set `target_app`'s source in release group to `source`""" + for app in self.apps: + if app.app == target_app: + app.source = source + app.save() + break + self.validate_app_version(app) + self.save() + + def get_marketplace_app_sources(self) -> list[str]: + all_marketplace_sources = jingrow.get_all("Marketplace App Version", pluck="source") + return [app.source for app in self.apps if app.source in all_marketplace_sources] + + def get_clusters(self): + """Get unique clusters corresponding to self.servers""" + servers = jingrow.db.get_all("Release Group Server", {"parent": self.name}, pluck="server") + return jingrow.get_all("Server", {"name": ("in", servers)}, pluck="cluster", distinct=True) + + @dashboard_whitelist() + def add_region(self, region): + """ + Add new region to release group (limits to 2). Meant for dashboard use only. + """ + + if len(self.get_clusters()) >= 2: + jingrow.throw("More than 2 regions for bench not allowed") + self.add_cluster(region) + + def add_cluster(self, cluster: str): + """ + Add new server belonging to cluster. + + Deploys bench if no update available + """ + server = Server.get_prod_for_new_bench({"cluster": cluster}) + if not server: + log_error("No suitable server for new bench") + jingrow.throw(f"No suitable server for new bench in {cluster}") + app_update_available = self.deploy_information().update_available + self.add_server(server, deploy=not app_update_available) + + def get_last_successful_candidate(self) -> "DeployCandidate": + return jingrow.get_last_pg("Deploy Candidate", {"status": "Success", "group": self.name}) + + def get_last_deploy_candidate(self): + try: + dc: "DeployCandidate" = jingrow.get_last_pg( + "Deploy Candidate", + { + "status": ["!=", "Draft"], + "group": self.name, + }, + ) + return dc + except jingrow.DoesNotExistError: + return None + + @jingrow.whitelist() + def add_server(self, server: str, deploy=False): + self.append("servers", {"server": server, "default": False}) + self.save() + if deploy: + return self.get_last_successful_candidate()._create_deploy([server]) + return None + + @jingrow.whitelist() + def change_server(self, server: str): + """ + Create latest candidate in given server and tries to move sites there. + + If only 1 server in server list, removes it, else schedules site + migrations from first server in list to given. + """ + if len(self.servers) == 1: + self.remove(self.servers[0]) + self.add_server(server, deploy=True) + + @jingrow.whitelist() + def update_benches_config(self): + """Update benches config for all benches in the release group""" + benches = jingrow.get_all("Bench", "name", {"group": self.name, "status": "Active"}) + for bench in benches: + jingrow.get_pg("Bench", bench.name).update_bench_config(force=True) + + @dashboard_whitelist() + def add_app(self, app, is_update: bool = False): + if isinstance(app, str): + app = json.loads(app) + + if not (name := app.get("name")): + return + + if jingrow.db.exists("App", name): + app_pg: "App" = jingrow.get_pg("App", name) + else: + app_pg = new_app(name, app["title"]) + + source = app_pg.add_source( + self.version, + app["repository_url"], + app["branch"], + self.team, + app.get("github_installation_id", None), + ) + self.update_source(source, is_update) + + @dashboard_whitelist() + def remove_app(self, app: str): + """Remove app from release group""" + + app_pg_to_remove = find(self.apps, lambda x: x.app == app) + if app_pg_to_remove: + self.remove(app_pg_to_remove) + + self.save() + return app + + @dashboard_whitelist() + def fetch_latest_app_update(self, app: str): + app_source = self.get_app_source(app) + app_source.create_release(force=True) + + @jingrow.whitelist() + def archive(self): + benches = jingrow.get_all("Bench", filters={"group": self.name, "status": "Active"}, pluck="name") + for bench in benches: + jingrow.get_pg("Bench", bench).archive() + + new_name = f"{self.title}.archived" + self.title = append_number_if_name_exists("Release Group", new_name, "title", separator=".") + self.enabled = 0 + self.save() + + jingrow.db.delete("Jcloud Role Permission", {"release_group": self.name}) + + @dashboard_whitelist() + def delete(self) -> None: + # Note: using delete instead of archive to avoid client api fetching the pg again + + self.archive() + + def set_default_app_cache_flags(self): + if self.use_app_cache: + return + + if not jingrow.db.get_single_value("Jcloud Settings", "use_app_cache"): + return + + if not self.can_use_get_app_cache(): + return + + self.use_app_cache = 1 + self.compress_app_cache = jingrow.db.get_single_value( + "Jcloud Settings", + "compress_app_cache", + ) + + def set_default_delta_builds_flags(self): + if not jingrow.db.get_single_value("Jcloud Settings", "use_delta_builds"): + return + + self.use_delta_builds = 0 + + def is_version_14_or_higher(self): + return jingrow.get_cached_value("Jingrow Version", self.version, "number") >= 14 + + def setup_default_feature_flags(self): + DEFAULT_FEATURE_FLAGS = { + "Version 14": {"merge_default_and_short_rq_queues": True}, + "Version 15": { + "gunicorn_threads_per_worker": "4", + "merge_default_and_short_rq_queues": True, + "use_rq_workerpool": True, + }, + "Nightly": { + "gunicorn_threads_per_worker": "4", + "merge_default_and_short_rq_queues": True, + "use_rq_workerpool": True, + }, + } + flags = DEFAULT_FEATURE_FLAGS.get(self.version, {}) + for key, value in flags.items(): + setattr(self, key, value) + + +@redis_cache(ttl=60) +def are_builds_suspended() -> bool: + return is_suspended() + + +def new_release_group(title, version, apps, team=None, cluster=None, saas_app="", server=None): + if cluster: + if not server: + restricted_release_group_names = jingrow.db.get_all( + "Site Plan Release Group", + pluck="release_group", + filters={"parenttype": "Site Plan", "parentfield": "release_groups"}, + distinct=True, + ) + restricted_server_names = jingrow.db.get_all( + "Release Group Server", + pluck="server", + filters={ + "parenttype": "Release Group", + "parentfield": "servers", + "parent": ("in", restricted_release_group_names), + }, + distinct=True, + ) + server = jingrow.get_all( + "Server", + { + "status": "Active", + "cluster": cluster, + "use_for_new_benches": True, + "name": ("not in", restricted_server_names), + }, + pluck="name", + limit=1, + )[0] + servers = [{"server": server}] + elif server: + servers = [{"server": server}] + else: + servers = [] + return jingrow.get_pg( + { + "pagetype": "Release Group", + "title": title, + "version": version, + "apps": apps, + "servers": servers, + "team": team, + "saas_app": saas_app, + } + ).insert() + + +def get_status(name): + return ( + "Active" + if jingrow.get_all("Bench", {"group": name, "status": "Active"}, limit=1, order_by="creation desc") + else "Awaiting Deploy" + ) + + +def prune_servers_without_sites(): + rg_servers = jingrow.qb.PageType("Release Group Server") + rg = jingrow.qb.PageType("Release Group") + groups_with_multiple_servers = ( + jingrow.qb.from_(rg_servers) + .inner_join(rg) + .on(rg.name == rg_servers.parent) + .where(rg.enabled == 1) + .where(rg.public == 0) + .where(rg.central_bench == 0) + .where(rg.team != "team@jingrow.com") + .where( + rg.modified < jingrow.utils.add_to_date(None, days=-7) + ) # use this timestamp to assume server added time + .groupby(rg_servers.parent) + .having(Count("*") > 1) + .select(rg_servers.parent) + .run(as_dict=False) + ) + groups_with_multiple_servers = [x[0] for x in groups_with_multiple_servers] + groups_with_multiple_servers = jingrow.get_all( + "Release Group Server", + filters={"parent": ("in", groups_with_multiple_servers)}, + fields=["parent", "server"], + order_by="parent", + as_list=True, + ) + + from jcloud.jcloud.pagetype.bench.bench import ( + get_scheduled_version_upgrades, + get_unfinished_site_migrations, + ) + + for group, server in groups_with_multiple_servers: + sites = jingrow.get_all( + "Site", + {"status": ("!=", "Archived"), "group": group, "server": server}, + ["name"], + ) + if not sites: + benches = jingrow.get_all( + "Bench", + {"group": group, "server": server, "status": "Active"}, + ["name", "server", "group"], + ) + for bench in benches: + if get_unfinished_site_migrations(bench.name) or get_scheduled_version_upgrades(bench): + continue + jingrow.db.delete("Release Group Server", {"parent": group, "server": server}) + jingrow.db.commit() + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Release Group") + + +def can_use_release(app_src): + if not app_src.public: + return True + + return app_src.status == "Approved" + + +def update_rg_app_source(rg: "ReleaseGroup", source: "AppSource"): + for app in rg.apps: + if app.app == source.app: + app.source = source.name + break + + +def get_job_names(rg: str, job_type: str, job_status: list[str]): + b = jingrow.qb.PageType("Bench") + aj = jingrow.qb.PageType("Agent Job") + + jobs = ( + jingrow.qb.from_(b) + .inner_join(aj) + .on(b.name == aj.bench) + .where(b.group == rg) + .where(aj.job_type == job_type) + .where(aj.status.isin(job_status)) + .select(aj.name) + .orderby(aj.modified, order=jingrow.query_builder.Order.desc) + ).run() + + return [j[0] for j in jobs] + + +def get_config_type(value: Any): + if isinstance(value, (dict, list)): + return "JSON" + + if isinstance(value, bool): + return "Boolean" + + if isinstance(value, (int, float)): + return "Number" + + return "String" + + +def get_formatted_config_value(config_type: str, value: Any, key: str, name: str): + if config_type == "Number": + return flt(value) + + if config_type == "Boolean": + return bool(sbool(value)) + + if config_type == "JSON": + return jingrow.parse_json(value) + + if config_type == "Password" and value == "*******": + return jingrow.get_value("Site Config", {"key": key, "parent": name}, "value") + + return value diff --git a/jcloud/jcloud/pagetype/release_group/release_group_dashboard.py b/jcloud/jcloud/pagetype/release_group/release_group_dashboard.py new file mode 100644 index 0000000..d80117c --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group/release_group_dashboard.py @@ -0,0 +1,11 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +def get_data(): + return { + "fieldname": "group", + "transactions": [ + {"items": ["Bench", "Deploy", "Deploy Candidate", "Deploy Candidate Difference"]}, + ], + } diff --git a/jcloud/jcloud/pagetype/release_group/test_release_group.py b/jcloud/jcloud/pagetype/release_group/test_release_group.py new file mode 100644 index 0000000..4cbbdee --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group/test_release_group.py @@ -0,0 +1,396 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + +import unittest +from unittest.mock import Mock, patch + +import jingrow +from jingrow.core.utils import find + +from jcloud.api.bench import deploy_information +from jcloud.api.client import get_list +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.app.app import App +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.app_release.test_app_release import create_test_app_release +from jcloud.jcloud.pagetype.app_source.app_source import AppSource +from jcloud.jcloud.pagetype.app_source.test_app_source import create_test_app_source +from jcloud.jcloud.pagetype.release_group.release_group import ( + ReleaseGroup, + new_release_group, +) +from jcloud.jcloud.pagetype.team.test_team import create_test_team + + +def create_test_release_group( + apps: list[App], + user: str = None, + public=False, + jingrow_version="Version 14", + servers: list[str] = None, +) -> ReleaseGroup: + """ + Create Release Group pg. + + Also creates app source + """ + user = user or jingrow.session.user + release_group = jingrow.get_pg( + { + "pagetype": "Release Group", + "version": jingrow_version, + "enabled": True, + "title": f"Test ReleaseGroup {jingrow.mock('name')}", + "team": jingrow.get_value("Team", {"user": user}, "name"), + "public": public, + } + ) + for app in apps: + app_source = create_test_app_source(release_group.version, app) + release_group.append("apps", {"app": app.name, "source": app_source.name}) + + if servers: + for server in servers: + release_group.append("servers", {"server": server}) + + release_group.insert(ignore_if_duplicate=True) + release_group.reload() + return release_group + + +@patch.object(AppSource, "create_release", create_test_app_release) +class TestReleaseGroup(unittest.TestCase): + def setUp(self): + self.team = create_test_team().name + + def tearDown(self): + jingrow.db.rollback() + + def test_create_release_group(self): + app = create_test_app("jingrow", "Jingrow Framework") + source = app.add_source( + "Version 12", "http://git.jingrow.com:3000/jingrow/jingrow", "version-12", team=self.team + ) + group = new_release_group( + "Test Group", + "Version 12", + [{"app": source.app, "source": source.name}], + team=self.team, + ) + self.assertEqual(group.title, "Test Group") + + def test_create_release_group_set_app_from_source(self): + app1 = create_test_app("jingrow", "Jingrow Framework") + source1 = app1.add_source( + "Version 12", "http://git.jingrow.com:3000/jingrow/jingrow", "version-12", team=self.team + ) + app2 = create_test_app("jerp", "JERP") + source2 = app2.add_source( + "Version 12", "http://git.jingrow.com:3000/jingrow/jerp", "version-12", team=self.team + ) + group = new_release_group( + "Test Group", + "Version 12", + [{"app": source2.app, "source": source1.name}], + team=self.team, + ) + self.assertEqual(group.apps[0].app, source1.app) + + def test_create_release_group_fail_when_first_app_is_not_jingrow(self): + app = create_test_app("jerp", "JERP") + source = app.add_source( + "Version 12", "http://git.jingrow.com:3000/jingrow/jerp", "version-12", team=self.team + ) + self.assertRaises( + jingrow.ValidationError, + new_release_group, + "Test Group", + "Version 12", + [{"app": source.app, "source": source.name}], + team=self.team, + ) + + def test_create_release_group_fail_when_duplicate_apps(self): + app = create_test_app("jingrow", "Jingrow Framework") + source = app.add_source( + "Version 12", "http://git.jingrow.com:3000/jingrow/jingrow", "version-12", team=self.team + ) + self.assertRaises( + jingrow.ValidationError, + new_release_group, + "Test Group", + "Version 12", + [ + {"app": source.app, "source": source.name}, + {"app": source.app, "source": source.name}, + ], + team=self.team, + ) + + def test_create_release_group_fail_when_version_mismatch(self): + app = create_test_app("jingrow", "Jingrow Framework") + source = app.add_source( + "Version 12", "http://git.jingrow.com:3000/jingrow/jingrow", "version-12", team=self.team + ) + self.assertRaises( + jingrow.ValidationError, + new_release_group, + "Test Group", + "Version 13", + [{"app": source.app, "source": source.name}], + team=self.team, + ) + + def test_create_release_group_fail_with_duplicate_titles(self): + app = create_test_app("jingrow", "Jingrow Framework") + source = app.add_source( + "Version 12", "http://git.jingrow.com:3000/jingrow/jingrow", "version-12", team=self.team + ) + new_release_group( + "Test Group", + "Version 12", + [{"app": source.app, "source": source.name}], + team=self.team, + ) + self.assertRaises( + jingrow.ValidationError, + new_release_group, + "Test Group", + "Version 12", + [{"app": source.app, "source": source.name}], + team=self.team, + ) + + def test_branch_change_already_on_branch(self): + app = create_test_app() + rg = create_test_release_group([app]) + with self.assertRaises(jingrow.ValidationError): + rg.change_app_branch("jingrow", "master") + + def test_branch_change_app_source_exists(self): + app = create_test_app() + rg = create_test_release_group([app]) + + current_app_source = jingrow.get_pg("App Source", rg.apps[0].source) + app_source = create_test_app_source( + current_app_source.versions[0].version, + app, + current_app_source.repository_url, + "develop", + ) + + rg.change_app_branch(app.name, "develop") + rg.reload() + + # Source must be set to the available `app_source` for `app` + self.assertEqual(rg.apps[0].source, app_source.name) + + def test_branch_change_app_source_does_not_exist(self): + app = create_test_app() + rg = create_test_release_group([app]) + previous_app_source = jingrow.get_pg("App Source", rg.apps[0].source) + + rg.change_app_branch(app.name, "develop") + rg.reload() + + new_app_source = jingrow.get_pg("App Source", rg.apps[0].source) + self.assertEqual(new_app_source.branch, "develop") + self.assertEqual( + new_app_source.versions[0].version, previous_app_source.versions[0].version + ) + self.assertEqual(new_app_source.repository_url, previous_app_source.repository_url) + self.assertEqual(new_app_source.app, app.name) + + def test_new_release_group_loaded_with_correct_dependencies(self): + app = create_test_app("jingrow", "Jingrow Framework") + jingrow_version = jingrow.get_pg("Jingrow Version", "Version 14") + group = jingrow.get_pg( + { + "pagetype": "Release Group", + "title": "Test Group", + "version": "Version 14", + "apps": [ + {"app": app.name, "source": create_test_app_source("Version 14", app).name} + ], + "team": self.team, + } + ).insert() + + self.assertEqual( + find(group.dependencies, lambda d: d.dependency == "PYTHON_VERSION").version, + find( + jingrow_version.dependencies, lambda x: x.dependency == "PYTHON_VERSION" + ).version, + ) + + def test_cant_set_min_greater_than_max_workers(self): + rg = create_test_release_group([create_test_app()]) + rg.max_gunicorn_workers = 1 + rg.min_gunicorn_workers = 2 + self.assertRaises(jingrow.ValidationError, rg.save) + rg.max_background_workers = 1 + rg.min_background_workers = 2 + self.assertRaises(jingrow.ValidationError, rg.save) + rg.reload() + try: + rg.max_gunicorn_workers = 2 + rg.min_gunicorn_workers = 1 + rg.max_background_workers = 2 + rg.min_background_workers = 1 + rg.save() + rg.max_gunicorn_workers = 0 # default + rg.min_gunicorn_workers = 2 + rg.max_background_workers = 0 # default + rg.min_background_workers = 2 + rg.save() + except jingrow.ValidationError: + self.fail("Should not raise validation error") + + def test_update_available_shows_for_first_deploy(self): + rg = create_test_release_group([create_test_app()]) + self.assertEqual(deploy_information(rg.name).get("update_available"), True) + + def test_fetch_environment_variables(self): + rg = create_test_release_group([create_test_app()]) + environment_variables = [ + {"key": "test_key", "value": "test_value", "internal": False}, + {"key": "test_key_2", "value": "test_value", "internal": False}, + {"key": "secret_key", "value": "test_value", "internal": True}, + ] + for env in environment_variables: + rg.append("environment_variables", env) + rg.save() + rg.reload() + fetched_environment_variable_list = get_list( + "Release Group Variable", + fields=["name", "key", "value"], + filters={"parenttype": "Release Group", "parent": rg.name}, + ) + self.assertEqual(len(fetched_environment_variable_list), 2) + internal_environment_variables_keys = [ + env["key"] for env in environment_variables if env["internal"] + ] + non_internal_environment_variables_keys = [ + env["key"] for env in environment_variables if not env["internal"] + ] + for env in fetched_environment_variable_list: + self.assertNotIn(env.key, internal_environment_variables_keys) + self.assertIn(env.key, non_internal_environment_variables_keys) + + def test_add_environment_variable(self): + rg = create_test_release_group([create_test_app()]) + rg.update_environment_variable({"test_key": "test_value"}) + rg.reload() + self.assertEqual(len(rg.environment_variables), 1) + self.assertEqual(rg.environment_variables[0].key, "test_key") + self.assertEqual(rg.environment_variables[0].value, "test_value") + + def test_update_environment_variable(self): + rg = create_test_release_group([create_test_app()]) + rg.append( + "environment_variables", {"key": "test_key", "value": "test_value", "internal": 0} + ) + rg.save() + rg.reload() + self.assertEqual(len(rg.environment_variables), 1) + rg.update_environment_variable({"test_key": "new_test_value"}) + rg.reload() + self.assertEqual(len(rg.environment_variables), 1) + self.assertEqual(rg.environment_variables[0].value, "new_test_value") + + def test_update_internal_environment_variable(self): + rg = create_test_release_group([create_test_app()]) + rg.append( + "environment_variables", {"key": "test_key", "value": "test_value", "internal": 1} + ) + rg.save() + rg.reload() + self.assertEqual(len(rg.environment_variables), 1) + + def update_internal_environment_variable(): + rg.update_environment_variable({"test_key": "new_test_value"}) + + self.assertRaisesRegex( + jingrow.ValidationError, + "Environment variable test_key is internal and cannot be updated", + update_internal_environment_variable, + ) + + def test_delete_internal_environment_variable(self): + rg = create_test_release_group([create_test_app()]) + rg.append( + "environment_variables", {"key": "test_key", "value": "test_value", "internal": 1} + ) + rg.save() + rg.reload() + self.assertEqual(len(rg.environment_variables), 1) + rg.delete_environment_variable("test_key") + rg.reload() + self.assertEqual(len(rg.environment_variables), 1) + + def test_delete_environment_variable(self): + rg = create_test_release_group([create_test_app()]) + rg.append( + "environment_variables", {"key": "test_key", "value": "test_value", "internal": 0} + ) + rg.save() + rg.reload() + self.assertEqual(len(rg.environment_variables), 1) + rg.delete_environment_variable("test_key") + rg.reload() + self.assertEqual(len(rg.environment_variables), 0) + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_creating_private_bench_should_not_pick_servers_used_in_restricted_site_plans( + self, + ): + from jcloud.api.bench import new + from jcloud.jcloud.pagetype.cluster.test_cluster import create_test_cluster + from jcloud.jcloud.pagetype.proxy_server.test_proxy_server import ( + create_test_proxy_server, + ) + from jcloud.jcloud.pagetype.root_domain.test_root_domain import create_test_root_domain + from jcloud.jcloud.pagetype.server.test_server import create_test_server + from jcloud.jcloud.pagetype.site.test_site import create_test_bench + from jcloud.jcloud.pagetype.site_plan.test_site_plan import create_test_plan + + cluster = create_test_cluster("Default", public=True) + root_domain = create_test_root_domain("local.fc.jingrow.dev") + jingrow.db.set_single_value("Jcloud Settings", "domain", root_domain.name) + + jingrow_app = create_test_app(name="jingrow") + new_jingrow_app_source = create_test_app_source(version="Version 15", app=jingrow_app) + + n1_server = create_test_proxy_server(cluster=cluster.name, domain=root_domain.name) + f1_server = create_test_server(cluster=cluster.name, proxy_server=n1_server.name) + f1_server.use_for_new_benches = True + f1_server.save() + f1_server.reload() + + n2_server = create_test_proxy_server(cluster=cluster.name, domain=root_domain.name) + f2_server = create_test_server(cluster=cluster.name, proxy_server=n2_server.name) + f2_server.use_for_new_benches = True + f2_server.save() + f2_server.reload() + + rg = create_test_release_group([jingrow_app], servers=[f2_server.name]) + create_test_bench(group=rg) + + create_test_plan("Site", allowed_apps=[], release_groups=[rg.name]) + + """ + Try to create new bench, it should always pick the server which haven't used in any restricted release group + """ + group_name = new( + { + "title": "Test Bench 55", + "apps": [{"name": jingrow_app.name, "source": new_jingrow_app_source.name}], + "version": "Version 15", + "cluster": "Default", + "saas_app": None, + "server": None, + } + ) + new_group = jingrow.get_pg("Release Group", group_name) + self.assertEqual(new_group.servers[0].server, f1_server.name) diff --git a/jcloud/jcloud/pagetype/release_group_app/__init__.py b/jcloud/jcloud/pagetype/release_group_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/release_group_app/release_group_app.json b/jcloud/jcloud/pagetype/release_group_app/release_group_app.json new file mode 100644 index 0000000..75a797d --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_app/release_group_app.json @@ -0,0 +1,64 @@ +{ + "actions": [], + "creation": "2020-01-13 16:04:41.984386", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "app", + "title", + "column_break_2", + "source", + "enable_auto_deploy" + ], + "fields": [ + { + "fieldname": "source", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Source", + "options": "App Source", + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fetch_from": "source.app", + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App", + "options": "App", + "reqd": 1 + }, + { + "fetch_from": "app.title", + "fieldname": "title", + "fieldtype": "Data", + "label": "Title", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "enable_auto_deploy", + "fieldtype": "Check", + "label": "Enable Auto Deploy" + } + ], + "istable": 1, + "links": [], + "modified": "2022-09-30 13:22:28.902064", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Release Group App", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/release_group_app/release_group_app.py b/jcloud/jcloud/pagetype/release_group_app/release_group_app.py new file mode 100644 index 0000000..3a882e4 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_app/release_group_app.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document +from jingrow.utils import cstr + +from jcloud.api.bench import apps + + +class ReleaseGroupApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + enable_auto_deploy: DF.Check + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + source: DF.Link + title: DF.Data + # end: auto-generated types + + dashboard_fields = ["app"] + + @staticmethod + def get_list_query(query, filters=None, **list_args): + group = cstr(filters.get("parent", "")) if filters else None + return apps(group) diff --git a/jcloud/jcloud/pagetype/release_group_dependency/__init__.py b/jcloud/jcloud/pagetype/release_group_dependency/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/release_group_dependency/release_group_dependency.json b/jcloud/jcloud/pagetype/release_group_dependency/release_group_dependency.json new file mode 100644 index 0000000..663d992 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_dependency/release_group_dependency.json @@ -0,0 +1,50 @@ +{ + "actions": [], + "creation": "2021-05-18 17:34:17.579862", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "dependency", + "version", + "is_custom" + ], + "fields": [ + { + "fieldname": "dependency", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Dependency", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "version", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Version", + "reqd": 1 + }, + { + "default": "0", + "depends_on": "eval: pg.is_custom", + "description": "Version has been set by the user from the dashboard and could be invalid.", + "fieldname": "is_custom", + "fieldtype": "Check", + "label": "Is Custom" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-07-19 13:13:25.158330", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Release Group Dependency", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/release_group_dependency/release_group_dependency.py b/jcloud/jcloud/pagetype/release_group_dependency/release_group_dependency.py new file mode 100644 index 0000000..f276881 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_dependency/release_group_dependency.py @@ -0,0 +1,47 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document +from jcloud.api.client import is_owned_by_team + + +class ReleaseGroupDependency(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + dependency: DF.Data + is_custom: DF.Check + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + version: DF.Data + # end: auto-generated types + + @staticmethod + def get_list_query(query, filters=None, **list_args): + if not filters or not (group := filters.get("parent")): + return + is_owned_by_team("Release Group", group, raise_exception=True) + + RGDependency = jingrow.qb.PageType("Release Group Dependency") + BenchDependency = jingrow.qb.PageType("Bench Dependency") + + query = ( + query.join(BenchDependency) + .on(BenchDependency.name == RGDependency.dependency) + .where(BenchDependency.internal == 0) + .select( + RGDependency.dependency, + RGDependency.version, + BenchDependency.title, + RGDependency.is_custom, + ) + ) + dependencies = query.run(as_dict=True) + return dependencies diff --git a/jcloud/jcloud/pagetype/release_group_mount/__init__.py b/jcloud/jcloud/pagetype/release_group_mount/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/release_group_mount/release_group_mount.json b/jcloud/jcloud/pagetype/release_group_mount/release_group_mount.json new file mode 100644 index 0000000..d48ea24 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_mount/release_group_mount.json @@ -0,0 +1,56 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-12-13 15:02:17.666052", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "is_absolute_path", + "section_break_nvom", + "source", + "column_break_zdvz", + "destination" + ], + "fields": [ + { + "fieldname": "source", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Source (Host)", + "reqd": 1 + }, + { + "fieldname": "column_break_zdvz", + "fieldtype": "Column Break" + }, + { + "fieldname": "destination", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Destination (Container)", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "is_absolute_path", + "fieldtype": "Check", + "label": "Is Absolute Path" + }, + { + "fieldname": "section_break_nvom", + "fieldtype": "Section Break" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-12-14 11:01:27.384102", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Release Group Mount", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/release_group_mount/release_group_mount.py b/jcloud/jcloud/pagetype/release_group_mount/release_group_mount.py new file mode 100644 index 0000000..8932344 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_mount/release_group_mount.py @@ -0,0 +1,25 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class ReleaseGroupMount(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + destination: DF.Data + is_absolute_path: DF.Check + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + source: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/release_group_package/__init__.py b/jcloud/jcloud/pagetype/release_group_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/release_group_package/release_group_package.json b/jcloud/jcloud/pagetype/release_group_package/release_group_package.json new file mode 100644 index 0000000..69e8019 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_package/release_group_package.json @@ -0,0 +1,61 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-05-12 14:37:58.299311", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "package_manager", + "package_prerequisites", + "column_break_r6rj", + "package", + "after_install" + ], + "fields": [ + { + "default": "apt", + "fieldname": "package_manager", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Package Manager", + "reqd": 1 + }, + { + "fieldname": "package", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Package", + "reqd": 1 + }, + { + "fieldname": "column_break_r6rj", + "fieldtype": "Column Break" + }, + { + "description": "Use && for multiline prerequisites\n

\nUse jinja syntax to define dependancy variables
\neg: python{{ PYTHON_VERSION }} ", + "fieldname": "package_prerequisites", + "fieldtype": "Text", + "label": "Package Prerequisites" + }, + { + "description": "Use && for multiline prerequisites\n", + "fieldname": "after_install", + "fieldtype": "Text", + "label": "After Install" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-12-26 18:17:13.374441", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Release Group Package", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/release_group_package/release_group_package.py b/jcloud/jcloud/pagetype/release_group_package/release_group_package.py new file mode 100644 index 0000000..566f7c7 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_package/release_group_package.py @@ -0,0 +1,26 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class ReleaseGroupPackage(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + after_install: DF.Text | None + package: DF.Data + package_manager: DF.Data + package_prerequisites: DF.Text | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/release_group_server/__init__.py b/jcloud/jcloud/pagetype/release_group_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/release_group_server/release_group_server.json b/jcloud/jcloud/pagetype/release_group_server/release_group_server.json new file mode 100644 index 0000000..0ce31c9 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_server/release_group_server.json @@ -0,0 +1,48 @@ +{ + "actions": [], + "creation": "2020-12-09 10:06:46.812420", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "server", + "column_break_2", + "default" + ], + "fields": [ + { + "fieldname": "server", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Server", + "options": "Server", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "default", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Default" + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2020-12-09 10:09:06.272798", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Release Group Server", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/release_group_server/release_group_server.py b/jcloud/jcloud/pagetype/release_group_server/release_group_server.py new file mode 100644 index 0000000..709a3fb --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_server/release_group_server.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class ReleaseGroupServer(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + default: DF.Check + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + server: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/release_group_variable/__init__.py b/jcloud/jcloud/pagetype/release_group_variable/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/release_group_variable/release_group_variable.json b/jcloud/jcloud/pagetype/release_group_variable/release_group_variable.json new file mode 100644 index 0000000..0f3a4b4 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_variable/release_group_variable.json @@ -0,0 +1,49 @@ +{ + "actions": [], + "creation": "2023-06-13 16:15:53.783330", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "key", + "value", + "internal" + ], + "fields": [ + { + "fieldname": "key", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Key", + "reqd": 1 + }, + { + "fieldname": "value", + "fieldtype": "Text", + "in_list_view": 1, + "label": "Value", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "internal", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Internal Usage" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-01-20 15:12:45.664299", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Release Group Variable", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/release_group_variable/release_group_variable.py b/jcloud/jcloud/pagetype/release_group_variable/release_group_variable.py new file mode 100644 index 0000000..06de3c9 --- /dev/null +++ b/jcloud/jcloud/pagetype/release_group_variable/release_group_variable.py @@ -0,0 +1,33 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + + +class ReleaseGroupVariable(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING, ClassVar + + if TYPE_CHECKING: + from jingrow.types import DF + + internal: DF.Check + key: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + value: DF.Text + # end: auto-generated types + + dashboard_fields: ClassVar = ["key", "value"] + + @staticmethod + def get_list_query(query, filters=None, **list_args): + environmentVariable = jingrow.qb.PageType("Release Group Variable") + query = query.where(environmentVariable.internal == 0).orderby( + environmentVariable.key, order=jingrow.qb.asc + ) + return query.run(as_dict=True) diff --git a/jcloud/jcloud/pagetype/remote_file/__init__.py b/jcloud/jcloud/pagetype/remote_file/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/remote_file/remote_file.js b/jcloud/jcloud/pagetype/remote_file/remote_file.js new file mode 100644 index 0000000..d57477a --- /dev/null +++ b/jcloud/jcloud/pagetype/remote_file/remote_file.js @@ -0,0 +1,45 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Remote File', { + refresh: function (frm) { + frm.add_custom_button( + __('Download'), + () => { + frm.call('get_download_link').then((r) => { + if (!r.exc) { + window.open(r.message); + } + }); + }, + __('Actions'), + ); + frm.add_custom_button( + __('Delete File'), + () => { + jingrow.confirm( + `Doing this won't allow you to restore the Site again using this backup. Are you sure you want to delete this file from S3?`, + () => frm.call('delete_remote_object').then((r) => frm.refresh()), + ); + }, + __('Actions'), + ); + frm.add_custom_button( + __('Ping'), + () => { + frm.call('exists').then((r) => { + if (!r.exc) { + if (r.message) { + console.log(r.message); + jingrow.msgprint('Pong'); + } else { + frm.refresh(); + frm.refresh_header(); + } + } + }); + }, + __('Actions'), + ); + }, +}); diff --git a/jcloud/jcloud/pagetype/remote_file/remote_file.json b/jcloud/jcloud/pagetype/remote_file/remote_file.json new file mode 100644 index 0000000..eb61c08 --- /dev/null +++ b/jcloud/jcloud/pagetype/remote_file/remote_file.json @@ -0,0 +1,137 @@ +{ + "actions": [], + "allow_import": 1, + "creation": "2020-07-02 14:40:15.222405", + "description": "Tracks S3 uploads made via Jcloud", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "file_name", + "status", + "column_break_uzdq", + "site", + "section_break_5", + "file_size", + "file_path", + "column_break_scaf", + "file_type", + "bucket", + "url" + ], + "fields": [ + { + "fieldname": "file_name", + "fieldtype": "Data", + "in_global_search": 1, + "label": "File Name", + "oldfieldname": "file_name", + "oldfieldtype": "Data", + "read_only": 1 + }, + { + "fieldname": "section_break_5", + "fieldtype": "Section Break" + }, + { + "fieldname": "file_size", + "fieldtype": "Data", + "in_list_view": 1, + "label": "File Size", + "length": 20, + "read_only": 1 + }, + { + "fieldname": "file_path", + "fieldtype": "Text", + "label": "File Path", + "read_only": 1 + }, + { + "fieldname": "file_type", + "fieldtype": "Data", + "label": "File Type", + "read_only": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Available\nUnavailable", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "bucket", + "fieldtype": "Data", + "label": "Location", + "read_only": 1 + }, + { + "fieldname": "url", + "fieldtype": "Code", + "label": "URL", + "read_only": 1 + }, + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Site", + "options": "Site", + "read_only": 1 + }, + { + "fieldname": "column_break_uzdq", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_scaf", + "fieldtype": "Column Break" + } + ], + "icon": "fa fa-file", + "links": [], + "modified": "2024-05-10 15:07:11.179229", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Remote File", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "import": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "if_owner": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "All", + "share": 1, + "write": 1 + } + ], + "search_fields": "file_name", + "sort_field": "modified", + "sort_order": "ASC", + "states": [], + "title_field": "file_name", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/remote_file/remote_file.py b/jcloud/jcloud/pagetype/remote_file/remote_file.py new file mode 100644 index 0000000..efff066 --- /dev/null +++ b/jcloud/jcloud/pagetype/remote_file/remote_file.py @@ -0,0 +1,290 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import json +import pprint + +import jingrow +import requests +from boto3 import client, resource +from jingrow.model.document import Document +from jingrow.utils.password import get_decrypted_password + + +def get_remote_key(file): + from hashlib import sha1 + from os.path import join + from time import time + + from jcloud.utils import get_current_team + + team = sha1(get_current_team().encode()).hexdigest() + time = str(time()).replace(".", "_") + + return join(team, time, file) + + +def poll_file_statuses(): + aws_access_key = jingrow.db.get_single_value( + "Jcloud Settings", "offsite_backups_access_key_id" + ) + aws_secret_key = get_decrypted_password( + "Jcloud Settings", "Jcloud Settings", "offsite_backups_secret_access_key" + ) + default_region = jingrow.db.get_single_value("Jcloud Settings", "backup_region") + buckets = [ + { + "name": jingrow.db.get_single_value("Jcloud Settings", "aws_s3_bucket"), + "region": default_region, + "access_key_id": aws_access_key, + "secret_access_key": aws_secret_key, + }, + { + "name": jingrow.db.get_single_value("Jcloud Settings", "remote_uploads_bucket"), + "region": default_region, + "access_key_id": jingrow.db.get_single_value( + "Jcloud Settings", "remote_access_key_id" + ), + "secret_access_key": get_decrypted_password( + "Jcloud Settings", "Jcloud Settings", "remote_secret_access_key" + ), + }, + ] + + for b in jingrow.get_all("Backup Bucket", ["bucket_name", "cluster", "region"]): + + buckets.append( + { + "name": b["bucket_name"], + "region": b["region"], + "access_key_id": aws_access_key, + "secret_access_key": aws_secret_key, + } + ) + + for bucket in buckets: + jingrow.enqueue( + "jcloud.jcloud.pagetype.remote_file.remote_file.poll_file_statuses_from_bucket", + bucket=bucket, + job_id=f"poll_file_statuses:{bucket['name']}", + queue="long", + deduplicate=True, + enqueue_after_commit=True, + ) + + +def poll_file_statuses_from_bucket(bucket): + from jcloud.utils import chunk + + s3 = resource( + "s3", + aws_access_key_id=bucket["access_key_id"], + aws_secret_access_key=bucket["secret_access_key"], + region_name=bucket["region"], + ) + + available_files = set() + for s3_object in s3.Bucket(bucket["name"]).objects.all(): + available_files.add(s3_object.key) + + pagetype = "Remote File" + remote_files = jingrow.get_all( + pagetype, + fields=["name", "file_path", "status"], + filters={"bucket": bucket["name"]}, + ) + + set_to_available = [] + set_to_unavailable = [] + for remote_file in remote_files: + name, file_path, status = ( + remote_file["name"], + remote_file["file_path"], + remote_file["status"], + ) + if file_path not in available_files: + if status == "Available": + set_to_unavailable.append(name) + else: + if status == "Unavailable": + set_to_available.append(name) + + for files in chunk(set_to_unavailable, 1000): + jingrow.db.set_value(pagetype, {"name": ("in", files)}, "status", "Unavailable") + + for files in chunk(set_to_available, 1000): + jingrow.db.set_value(pagetype, {"name": ("in", files)}, "status", "Available") + + # Delete s3 files that are not tracked with Remote Files + remote_file_paths = set(file["file_path"] for file in remote_files) + file_only_on_s3 = available_files - remote_file_paths + delete_s3_files({bucket["name"]: list(file_only_on_s3)}) + jingrow.db.commit() + + +def delete_remote_backup_objects(remote_files): + """Delete specified objects identified by keys in the backups bucket.""" + remote_files = list(set([x for x in remote_files if x])) + if not remote_files: + return + + buckets = {bucket: [] for bucket in jingrow.get_all("Backup Bucket", pluck="name")} + buckets.update({jingrow.db.get_single_value("Jcloud Settings", "aws_s3_bucket"): []}) + + [ + buckets[bucket].append(file) + for file, bucket in jingrow.db.get_values( + "Remote File", + {"name": ("in", remote_files), "status": "Available"}, + ["file_path", "bucket"], + ) + ] + + delete_s3_files(buckets) + jingrow.db.set_value( + "Remote File", {"name": ("in", remote_files)}, "status", "Unavailable" + ) + + return remote_files + + +class RemoteFile(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + bucket: DF.Data | None + file_name: DF.Data | None + file_path: DF.Text | None + file_size: DF.Data | None + file_type: DF.Data | None + site: DF.Link | None + status: DF.Literal["Available", "Unavailable"] + url: DF.Code | None + # end: auto-generated types + + @property + def s3_client(self): + if not self.bucket: + return None + + elif self.bucket == jingrow.db.get_single_value( + "Jcloud Settings", "remote_uploads_bucket" + ): + access_key_id = jingrow.db.get_single_value("Jcloud Settings", "remote_access_key_id") + secret_access_key = get_decrypted_password( + "Jcloud Settings", "Jcloud Settings", "remote_secret_access_key" + ) + + elif self.bucket: + access_key_id = jingrow.db.get_single_value( + "Jcloud Settings", "offsite_backups_access_key_id" + ) + secret_access_key = get_decrypted_password( + "Jcloud Settings", "Jcloud Settings", "offsite_backups_secret_access_key" + ) + + else: + return None + + return client( + "s3", + aws_access_key_id=access_key_id, + aws_secret_access_key=secret_access_key, + region_name=jingrow.db.get_value("Backup Bucket", self.bucket, "region") + or jingrow.db.get_single_value("Jcloud Settings", "backup_region"), + ) + + @property + def download_link(self): + return self.get_download_link() + + @jingrow.whitelist() + def exists(self): + self.db_set("status", "Available") + + if self.url: + success = str(requests.head(self.url).status_code).startswith("2") + if success: + return True + self.db_set("status", "Unavailable") + return False + else: + try: + return self.s3_client.head_object(Bucket=self.bucket, Key=self.file_path) + except Exception: + self.db_set("status", "Unavailable") + return False + + @jingrow.whitelist() + def delete_remote_object(self): + self.db_set("status", "Unavailable") + return self.s3_client.delete_object( + Bucket=jingrow.db.get_single_value("Jcloud Settings", "remote_uploads_bucket"), + Key=self.file_path, + ) + + def on_trash(self): + self.delete_remote_object() + + @jingrow.whitelist() + def get_download_link(self): + return self.url or self.s3_client.generate_presigned_url( + "get_object", + Params={"Bucket": self.bucket, "Key": self.file_path}, + ExpiresIn=jingrow.db.get_single_value("Jcloud Settings", "remote_link_expiry") or 3600, + ) + + def get_content(self): + if self.url: + return json.loads(requests.get(self.url).content) + else: + obj = self.s3_client.get_object(Bucket=self.bucket, Key=self.file_path) + return json.loads(obj["Body"].read().decode("utf-8")) + + @property + def size(self) -> int: + """ + Get the size of file in bytes + + Sets the file_size field if not already set + """ + if int(self.file_size or 0): + return int(self.file_size or 0) + else: + response = requests.head(self.url) + self.file_size = int(response.headers.get("content-length", 0)) + self.save() + return int(self.file_size) + + +def delete_s3_files(buckets): + """Delete specified files from s3 buckets""" + from boto3 import resource + + from jcloud.utils import chunk + + jcloud_settings = jingrow.get_single("Jcloud Settings") + for bucket_name in buckets.keys(): + s3 = resource( + "s3", + aws_access_key_id=jcloud_settings.offsite_backups_access_key_id, + aws_secret_access_key=jcloud_settings.get_password( + "offsite_backups_secret_access_key", raise_exception=False + ), + endpoint_url=jingrow.db.get_value("Backup Bucket", bucket_name, "endpoint_url") + or "https://s3.amazonaws.com", + ) + bucket = s3.Bucket(bucket_name) + for objects in chunk([{"Key": x} for x in buckets[bucket_name]], 1000): + response = bucket.delete_objects(Delete={"Objects": objects}) + response = pprint.pformat(response) + jingrow.get_pg( + pagetype="Remote Operation Log", operation_type="Delete Files", response=response + ).insert() diff --git a/jcloud/jcloud/pagetype/remote_file/test_remote_file.py b/jcloud/jcloud/pagetype/remote_file/test_remote_file.py new file mode 100644 index 0000000..94a3743 --- /dev/null +++ b/jcloud/jcloud/pagetype/remote_file/test_remote_file.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +import unittest +from datetime import datetime +from typing import Optional + +import jingrow + + +def create_test_remote_file( + site: Optional[str] = None, + creation: datetime = None, + file_path: str = None, + file_size: int = 1024, +): + """Create test remote file pg for required timestamp.""" + creation = creation or jingrow.utils.now_datetime() + remote_file = jingrow.get_pg( + { + "pagetype": "Remote File", + "status": "Available", + "site": site, + "file_path": file_path, + "file_size": file_size, + } + ).insert(ignore_if_duplicate=True) + remote_file.db_set("creation", creation) + remote_file.reload() + return remote_file + + +class TestRemoteFile(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/remote_operation_log/__init__.py b/jcloud/jcloud/pagetype/remote_operation_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/remote_operation_log/remote_operation_log.js b/jcloud/jcloud/pagetype/remote_operation_log/remote_operation_log.js new file mode 100644 index 0000000..1b3f0f1 --- /dev/null +++ b/jcloud/jcloud/pagetype/remote_operation_log/remote_operation_log.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Remote Operation Log', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/remote_operation_log/remote_operation_log.json b/jcloud/jcloud/pagetype/remote_operation_log/remote_operation_log.json new file mode 100644 index 0000000..b89447d --- /dev/null +++ b/jcloud/jcloud/pagetype/remote_operation_log/remote_operation_log.json @@ -0,0 +1,60 @@ +{ + "actions": [], + "creation": "2020-12-16 17:01:05.934510", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "operation_type", + "response" + ], + "fields": [ + { + "fieldname": "response", + "fieldtype": "Code", + "label": "Response", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "operation_type", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Operation Type", + "options": "Delete Files", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2021-02-16 11:10:03.427047", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Remote Operation Log", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin" + }, + { + "create": 1, + "role": "Jcloud Member" + } + ], + "sort_field": "modified", + "sort_order": "DESC" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/remote_operation_log/remote_operation_log.py b/jcloud/jcloud/pagetype/remote_operation_log/remote_operation_log.py new file mode 100644 index 0000000..3149929 --- /dev/null +++ b/jcloud/jcloud/pagetype/remote_operation_log/remote_operation_log.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class RemoteOperationLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + operation_type: DF.Literal["Delete Files"] + response: DF.Code + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/remote_operation_log/test_remote_operation_log.py b/jcloud/jcloud/pagetype/remote_operation_log/test_remote_operation_log.py new file mode 100644 index 0000000..dffe483 --- /dev/null +++ b/jcloud/jcloud/pagetype/remote_operation_log/test_remote_operation_log.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestRemoteOperationLog(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/resource_tag/__init__.py b/jcloud/jcloud/pagetype/resource_tag/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/resource_tag/resource_tag.json b/jcloud/jcloud/pagetype/resource_tag/resource_tag.json new file mode 100644 index 0000000..b965d35 --- /dev/null +++ b/jcloud/jcloud/pagetype/resource_tag/resource_tag.json @@ -0,0 +1,43 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-07-02 00:07:18.322521", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "tag", + "tag_name" + ], + "fields": [ + { + "fieldname": "tag", + "fieldtype": "Link", + "in_list_view": 1, + "in_preview": 1, + "label": "Tag", + "options": "Jcloud Tag" + }, + { + "fetch_from": "tag.tag", + "fieldname": "tag_name", + "fieldtype": "Data", + "in_global_search": 1, + "in_list_view": 1, + "in_preview": 1, + "label": "Tag Name" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-07-02 00:37:51.667939", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Resource Tag", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/resource_tag/resource_tag.py b/jcloud/jcloud/pagetype/resource_tag/resource_tag.py new file mode 100644 index 0000000..ee2c2a4 --- /dev/null +++ b/jcloud/jcloud/pagetype/resource_tag/resource_tag.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class ResourceTag(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + tag: DF.Link | None + tag_name: DF.Data | None + # end: auto-generated types + + dashboard_fields = ["tag", "tag_name"] diff --git a/jcloud/jcloud/pagetype/resource_tag/tag_helpers.py b/jcloud/jcloud/pagetype/resource_tag/tag_helpers.py new file mode 100644 index 0000000..69a7a61 --- /dev/null +++ b/jcloud/jcloud/pagetype/resource_tag/tag_helpers.py @@ -0,0 +1,36 @@ +# Copyright (c) 2024, JINGROW + +from __future__ import unicode_literals + +import jingrow + +from jcloud.api.client import dashboard_whitelist + + +class TagHelpers: + @dashboard_whitelist() + def add_resource_tag(self, tag): + team = jingrow.local.team().name + existing_tags = [row.tag_name for row in self.tags] + if tag in existing_tags: + return + + if not jingrow.db.exists( + "Jcloud Tag", {"tag": tag, "pagetype_name": self.pagetype, "team": team} + ): + tag_pg = jingrow.new_pg( + "Jcloud Tag", tag=tag, pagetype_name=self.pagetype, team=team + ).insert() + else: + tag_pg = jingrow.get_pg( + "Jcloud Tag", + {"tag": tag, "pagetype_name": self.pagetype, "team": team}, + ) + + self.append("tags", {"tag": tag_pg.name}) + self.save() + + @dashboard_whitelist() + def remove_resource_tag(self, tag): + self.tags = [row for row in self.tags if row.tag_name != tag] + self.save() diff --git a/jcloud/jcloud/pagetype/root_domain/__init__.py b/jcloud/jcloud/pagetype/root_domain/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/root_domain/root_domain.js b/jcloud/jcloud/pagetype/root_domain/root_domain.js new file mode 100644 index 0000000..b83c480 --- /dev/null +++ b/jcloud/jcloud/pagetype/root_domain/root_domain.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Root Domain', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/root_domain/root_domain.json b/jcloud/jcloud/pagetype/root_domain/root_domain.json new file mode 100644 index 0000000..67831d8 --- /dev/null +++ b/jcloud/jcloud/pagetype/root_domain/root_domain.json @@ -0,0 +1,122 @@ +{ + "actions": [], + "autoname": "Prompt", + "creation": "2021-03-24 09:04:21.555295", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "default_cluster", + "column_break_4", + "dns_provider", + "dnspod_app_id", + "dnspod_app_token", + "aws_access_key_id", + "aws_secret_access_key" + ], + "fields": [ + { + "default": "DNSPod", + "fieldname": "dns_provider", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "DNS Provider", + "options": "DNSPod\nAWS Route 53\nOther", + "reqd": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval:pg.dns_provider=='AWS Route 53'", + "fieldname": "aws_access_key_id", + "fieldtype": "Data", + "label": "AWS Access Key ID" + }, + { + "depends_on": "eval:pg.dns_provider=='AWS Route 53'", + "fieldname": "aws_secret_access_key", + "fieldtype": "Password", + "label": "AWS Secret Access Key" + }, + { + "fieldname": "default_cluster", + "fieldtype": "Link", + "label": "Default Cluster", + "options": "Cluster", + "reqd": 1 + }, + { + "depends_on": "eval:pg.dns_provider=='DNSPod'", + "fieldname": "dnspod_app_id", + "fieldtype": "Data", + "label": "DNSPod APP ID ( or SecretId )" + }, + { + "depends_on": "eval:pg.dns_provider=='DNSPod'", + "fieldname": "dnspod_app_token", + "fieldtype": "Password", + "label": "DNSPod APP Token ( or SecretKey )" + } + ], + "index_web_pages_for_search": 1, + "links": [ + { + "group": "Linked Documents", + "link_pagetype": "TLS Certificate", + "link_fieldname": "domain" + }, + { + "group": "Linked Documents", + "link_pagetype": "Site", + "link_fieldname": "domain" + }, + { + "group": "Servers", + "link_pagetype": "Server", + "link_fieldname": "domain" + }, + { + "group": "Servers", + "link_pagetype": "Proxy Server", + "link_fieldname": "domain" + }, + { + "group": "Servers", + "link_pagetype": "Database Server", + "link_fieldname": "domain" + }, + { + "group": "Servers", + "link_pagetype": "Registry Server", + "link_fieldname": "domain" + } + ], + "modified": "2025-04-10 01:35:05.509396", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Root Domain", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "row_format": "Dynamic", + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/root_domain/root_domain.py b/jcloud/jcloud/pagetype/root_domain/root_domain.py new file mode 100644 index 0000000..0fae4c5 --- /dev/null +++ b/jcloud/jcloud/pagetype/root_domain/root_domain.py @@ -0,0 +1,342 @@ +# root_domain.py + +from __future__ import annotations + +import json +from datetime import datetime, timedelta +from typing import Iterable + +import boto3 +import jingrow +import requests +from jingrow.core.utils import find +from jingrow.model.document import Document + +from jcloud.utils import log_error + + +class RootDomain(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + aws_access_key_id: DF.Data | None + aws_secret_access_key: DF.Password | None + default_cluster: DF.Link + dns_provider: DF.Literal["DNSPod", "AWS Route 53", "Other"] + dnspod_app_id: DF.Data | None + dnspod_app_token: DF.Password | None + # end: auto-generated types + + def after_insert(self): + """ + 创建 RootDomain 后自动创建一条通配符证书记录,以获取 *.domain.com 证书。 + """ + if not jingrow.db.exists("TLS Certificate", {"wildcard": True, "domain": self.name}): + jingrow.enqueue_pg( + self.pagetype, + self.name, + "obtain_root_domain_tls_certificate", + enqueue_after_commit=True, + ) + + def obtain_root_domain_tls_certificate(self): + """ + 实际队列执行,创建 TLS Certificate 文档,让系统自动签发通配符证书。 + """ + try: + rsa_key_size = jingrow.db.get_value("Jcloud Settings", "Jcloud Settings", "rsa_key_size") + jingrow.get_pg( + { + "pagetype": "TLS Certificate", + "wildcard": True, + "domain": self.name, + "rsa_key_size": rsa_key_size, + } + ).insert() + except Exception: + log_error("Root Domain TLS Certificate Exception") + + # ---------------------------------------------------------------- + # Route53 原有逻辑 + # ---------------------------------------------------------------- + @property + def boto3_client(self): + """ + 只有当 dns_provider == 'AWS Route 53' 时,才初始化 boto3 route53 客户端。 + """ + if self.dns_provider != "AWS Route 53": + return None + if not hasattr(self, "_boto3_client"): + self._boto3_client = boto3.client( + "route53", + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.get_password("aws_secret_access_key"), + ) + return self._boto3_client + + @property + def hosted_zone(self): + """ + 仅当 dns_provider == 'AWS Route 53' 时使用,获取 Route53 HostedZone Id。 + """ + if self.dns_provider != "AWS Route 53": + return None + zones = self.boto3_client.list_hosted_zones_by_name()["HostedZones"] + return find(reversed(zones), lambda x: self.name.endswith(x["Name"][:-1]))["Id"] + + # ---------------------------------------------------------------- + # DNSPod 所需:合并 dnspod_app_id + dnspod_app_token => login_token + # ---------------------------------------------------------------- + @property + def dnspod_login_token(self) -> str: + """ + DNSPod API 需要将 AppID 和 Token 拼成 "AppID,Token" 格式。 + """ + app_id = self.dnspod_app_id or "" + app_token = self.get_password("dnspod_app_token") or "" + return f"{app_id},{app_token}" + + # ---------------------------------------------------------------- + # 通用 DNS 查询 / 删除 / 更新,区分 Route53 和 DNSPod + # ---------------------------------------------------------------- + def get_dns_record_pages(self) -> Iterable: + """ + 根据 dns_provider 不同,分别获取 DNS 记录。 + - AWS Route53:使用 boto3 分页 + - DNSPod:一次性获取所有记录,然后 yield 仿照分页结构 + """ + try: + if self.dns_provider == "AWS Route 53": + paginator = self.boto3_client.get_paginator("list_resource_record_sets") + return paginator.paginate( + PaginationConfig={"MaxItems": 10000, "PageSize": 300, "StartingToken": "0"}, + HostedZoneId=self.hosted_zone.split("/")[-1], + ) + elif self.dns_provider == "DNSPod": + record_list = self._dnspod_list_records() + yield {"ResourceRecordSets": record_list} + else: + # 其他 provider 可自行扩展 + return [] + except Exception: + log_error(f"{self.dns_provider} Pagination Error", domain=self.name) + return [] + + def delete_dns_records(self, records: list[str]): + """ + 统一的删除入口:根据 dns_provider == 'AWS Route 53' or 'DNSPod' 执行不同操作。 + """ + try: + if self.dns_provider == "AWS Route 53": + changes = [] + for record in records: + changes.append({"Action": "DELETE", "ResourceRecordSet": record}) + self.boto3_client.change_resource_record_sets( + ChangeBatch={"Changes": changes}, HostedZoneId=self.hosted_zone + ) + elif self.dns_provider == "DNSPod": + for record in records: + self._dnspod_delete_record(record) + except Exception: + log_error(f"{self.dns_provider} Record Deletion Error", domain=self.name) + + def remove_unused_cname_records(self): + """ + 原先 Route53 清理逻辑保留,DNSPod 同理处理。 + """ + proxies = jingrow.get_all("Proxy Server", {"status": "Active"}, pluck="name") + for page in self.get_dns_record_pages() or []: + to_delete = [] + active = self.get_active_domains() + jingrow.db.commit() + + for record in page["ResourceRecordSets"]: + # 结构: + # { + # "Name": "xxx", + # "Type": "CNAME", + # "ResourceRecords": [{"Value": "proxy"}], + # ... + # } + if record["Type"] == "CNAME" and record["ResourceRecords"][0]["Value"] in proxies: + domain = record["Name"].strip(".") + if domain not in active: + record["Name"] = domain + to_delete.append(record) + if to_delete: + self.delete_dns_records(to_delete) + + def update_dns_records_for_sites(self, sites: list[str], proxy_server: str): + """ + 批量为多个域名 CNAME 指向 proxy_server。 + 对 AWS Route53 => UPSERT + 对 DNSPod => Record.Modify / Create + """ + batch_size = 500 + for i in range(0, len(sites), batch_size): + chunk = sites[i : i + batch_size] + if self.dns_provider == "AWS Route 53": + changes = [] + for site in chunk: + changes.append({ + "Action": "UPSERT", + "ResourceRecordSet": { + "Name": site, + "Type": "CNAME", + "TTL": 600, + "ResourceRecords": [{"Value": proxy_server}], + }, + }) + self.boto3_client.change_resource_record_sets( + ChangeBatch={"Changes": changes}, HostedZoneId=self.hosted_zone + ) + elif self.dns_provider == "DNSPod": + for site in chunk: + self._dnspod_upsert_record( + record_name=site, + record_type="CNAME", + record_value=proxy_server, + ttl=600, + ) + + # ---------------------------------------------------------------- + # 查找活跃/被改名站点,以判断是否删除解析 + # ---------------------------------------------------------------- + def get_sites_being_renamed(self): + last_hour = datetime.now() - timedelta(hours=1) + renaming_sites = jingrow.get_all( + "Agent Job", + {"job_type": "Rename Site", "creation": (">=", last_hour)}, + pluck="request_data", + ) + return [json.loads(d_str)["new_name"] for d_str in renaming_sites] + + def get_active_site_domains(self): + return jingrow.get_all( + "Site Domain", {"domain": ("like", f"%{self.name}"), "status": "Active"}, pluck="name" + ) + + def get_active_domains(self): + active_sites = jingrow.get_all( + "Site", {"status": ("!=", "Archived"), "domain": self.name}, pluck="name" + ) + active_sites.extend(self.get_sites_being_renamed()) + active_sites.extend(self.get_active_site_domains()) + return active_sites + + # ---------------------------------------------------------------- + # DNSPod 相关调用 + # ---------------------------------------------------------------- + def _dnspod_list_records(self): + """ + DNSPod: Record.List + https://www.dnspod.cn/docs/records/record-list + """ + token = self.dnspod_login_token # "APPID,APPTOKEN" + domain_name = self.name + url = "https://dnsapi.cn/Record.List" + data = { + "login_token": token, + "format": "json", + "domain": domain_name, + } + resp = requests.post(url, data=data).json() + if resp["status"]["code"] != "1": + raise Exception(f"DNSPod List Records Failed: {resp['status']['message']}") + + record_list = [] + for r in resp["records"]: + record_list.append({ + "Name": r["name"] + "." + domain_name, + "Type": r["type"], + "ResourceRecords": [{"Value": r["value"]}], + "TTL": r["ttl"], + "RecordId": r["id"], + }) + return record_list + + def _dnspod_delete_record(self, record): + """ + DNSPod: Record.Remove + https://www.dnspod.cn/docs/records/record-delete + """ + token = self.dnspod_login_token + domain_name = self.name + record_id = record.get("RecordId") + if not record_id: + return + + url = "https://dnsapi.cn/Record.Remove" + data = { + "login_token": token, + "format": "json", + "domain": domain_name, + "record_id": record_id, + } + resp = requests.post(url, data=data).json() + if resp["status"]["code"] != "1": + raise Exception(f"DNSPod Delete Record Failed: {resp['status']['message']}") + + def _dnspod_upsert_record(self, record_name: str, record_type: str, record_value: str, ttl=600): + """ + DNSPod: 先 list 再判断是 Modify 还是 Create。 + https://www.dnspod.cn/docs/records/record-modify + https://www.dnspod.cn/docs/records/record-create + """ + token = self.dnspod_login_token + domain_name = self.name + sub_name = record_name.replace("." + domain_name, "") + + all_records = self._dnspod_list_records() + existing = None + for rec in all_records: + if rec["Name"].strip(".") == record_name.strip(".") and rec["Type"] == record_type: + existing = rec + break + + if existing: + # Record.Modify + url = "https://dnsapi.cn/Record.Modify" + data = { + "login_token": token, + "format": "json", + "domain": domain_name, + "record_id": existing["RecordId"], + "sub_domain": sub_name, + "record_type": record_type, + "record_line": "默认", + "value": record_value, + "ttl": ttl, + } + else: + # Record.Create + url = "https://dnsapi.cn/Record.Create" + data = { + "login_token": token, + "format": "json", + "domain": domain_name, + "sub_domain": sub_name, + "record_type": record_type, + "record_line": "默认", + "value": record_value, + "ttl": ttl, + } + + resp = requests.post(url, data=data).json() + if resp["status"]["code"] != "1": + raise Exception(f"DNSPod Upsert Record Failed: {resp['status']['message']}") + + +def cleanup_cname_records(): + """ + 与 TLS Certificate 等配合,用于定期清理不再使用的 CNAME 记录。 + """ + domains = jingrow.get_all("Root Domain", pluck="name") + for domain_name in domains: + domain = RootDomain("Root Domain", domain_name) + domain.remove_unused_cname_records() diff --git a/jcloud/jcloud/pagetype/root_domain/test_root_domain.py b/jcloud/jcloud/pagetype/root_domain/test_root_domain.py new file mode 100644 index 0000000..efecb2a --- /dev/null +++ b/jcloud/jcloud/pagetype/root_domain/test_root_domain.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# See license.txt + +import json +import unittest +from datetime import datetime, timedelta +from unittest.mock import Mock, patch + +import jingrow + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.root_domain.root_domain import RootDomain + + +@patch.object(RootDomain, "after_insert", new=Mock()) +def create_test_root_domain( + name: str, + default_cluster: str = "Default", +): + root_domain = jingrow.get_pg( + { + "pagetype": "Root Domain", + "name": name, + "default_cluster": default_cluster, + "aws_access_key_id": "a", + "aws_secret_access_key": "b", + } + ).insert(ignore_if_duplicate=True) + root_domain.reload() + return root_domain + + +@patch.object(AgentJob, "after_insert", new=Mock()) +class TestRootDomain(unittest.TestCase): + def tearDown(self): + jingrow.db.rollback() + + def _create_fake_rename_job(self, site_name: str, creation=None): + from jcloud.jcloud.pagetype.database_server.test_database_server import ( + create_test_database_server, + ) + from jcloud.jcloud.pagetype.proxy_server.test_proxy_server import ( + create_test_proxy_server, + ) + from jcloud.jcloud.pagetype.server.test_server import create_test_server + + creation = creation or jingrow.utils.now_datetime() + server = create_test_server( + create_test_proxy_server().name, create_test_database_server().name + ) + + job = jingrow.get_pg( + { + "pagetype": "Agent Job", + "job_type": "Rename Site", + "request_data": json.dumps({"new_name": site_name}), + "server_type": "Server", + "server": server.name, + "request_path": "fake/rename/path", + } + ).insert(ignore_if_duplicate=True) + job.db_set("creation", creation) + + def test_sites_being_renamed_are_considered_active(self): + new_site_name = "new_site.jingrow.cloud" + old_site_name = "old_site.jingrow.cloud" + root_domain = create_test_root_domain("jingrow.dev") + + self._create_fake_rename_job(new_site_name) + self._create_fake_rename_job(old_site_name, datetime.now() - timedelta(hours=2)) + + self.assertIn(new_site_name, root_domain.get_active_domains()) + self.assertNotIn(old_site_name, root_domain.get_active_domains()) diff --git a/jcloud/jcloud/pagetype/scheduled_auto_update_log/__init__.py b/jcloud/jcloud/pagetype/scheduled_auto_update_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/scheduled_auto_update_log/scheduled_auto_update_log.js b/jcloud/jcloud/pagetype/scheduled_auto_update_log/scheduled_auto_update_log.js new file mode 100644 index 0000000..edf3835 --- /dev/null +++ b/jcloud/jcloud/pagetype/scheduled_auto_update_log/scheduled_auto_update_log.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Scheduled Auto Update Log', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/scheduled_auto_update_log/scheduled_auto_update_log.json b/jcloud/jcloud/pagetype/scheduled_auto_update_log/scheduled_auto_update_log.json new file mode 100644 index 0000000..ddfef06 --- /dev/null +++ b/jcloud/jcloud/pagetype/scheduled_auto_update_log/scheduled_auto_update_log.json @@ -0,0 +1,116 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2021-11-06 22:52:46.091867", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "MyISAM", + "field_order": [ + "status", + "error", + "column_break_2", + "document_type", + "document_name", + "was_scheduled_for_time", + "was_scheduled_for_day", + "was_scheduled_for_frequency", + "was_scheduled_for_month_day", + "was_scheduled_for_month_end" + ], + "fields": [ + { + "default": "Success", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Failed\nSuccess", + "read_only": 1 + }, + { + "fieldname": "document_type", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Document Type", + "options": "PageType", + "read_only": 1 + }, + { + "fieldname": "document_name", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Document Name", + "options": "document_type", + "read_only": 1 + }, + { + "fieldname": "error", + "fieldtype": "Text", + "label": "Error", + "read_only": 1 + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "was_scheduled_for_time", + "fieldtype": "Time", + "label": "Was Scheduled For Time", + "read_only": 1 + }, + { + "fieldname": "was_scheduled_for_day", + "fieldtype": "Data", + "label": "Was Scheduled For Day", + "read_only": 1 + }, + { + "fieldname": "was_scheduled_for_frequency", + "fieldtype": "Data", + "label": "Was Scheduled For Frequency", + "read_only": 1 + }, + { + "fieldname": "was_scheduled_for_month_day", + "fieldtype": "Data", + "label": "Was Scheduled For Month Day", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "was_scheduled_for_month_end", + "fieldtype": "Check", + "label": "Was Scheduled For Month End", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2021-11-08 23:18:25.490907", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Scheduled Auto Update Log", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "document_name", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/scheduled_auto_update_log/scheduled_auto_update_log.py b/jcloud/jcloud/pagetype/scheduled_auto_update_log/scheduled_auto_update_log.py new file mode 100644 index 0000000..e7af438 --- /dev/null +++ b/jcloud/jcloud/pagetype/scheduled_auto_update_log/scheduled_auto_update_log.py @@ -0,0 +1,28 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class ScheduledAutoUpdateLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + document_name: DF.DynamicLink | None + document_type: DF.Link | None + error: DF.Text | None + status: DF.Literal["Failed", "Success"] + was_scheduled_for_day: DF.Data | None + was_scheduled_for_frequency: DF.Data | None + was_scheduled_for_month_day: DF.Data | None + was_scheduled_for_month_end: DF.Check + was_scheduled_for_time: DF.Time | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/scheduled_auto_update_log/test_scheduled_auto_update_log.py b/jcloud/jcloud/pagetype/scheduled_auto_update_log/test_scheduled_auto_update_log.py new file mode 100644 index 0000000..f99f1be --- /dev/null +++ b/jcloud/jcloud/pagetype/scheduled_auto_update_log/test_scheduled_auto_update_log.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestScheduledAutoUpdateLog(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/security_update/__init__.py b/jcloud/jcloud/pagetype/security_update/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/security_update/security_update.js b/jcloud/jcloud/pagetype/security_update/security_update.js new file mode 100644 index 0000000..e242a3a --- /dev/null +++ b/jcloud/jcloud/pagetype/security_update/security_update.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Security Update", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/security_update/security_update.json b/jcloud/jcloud/pagetype/security_update/security_update.json new file mode 100644 index 0000000..5336605 --- /dev/null +++ b/jcloud/jcloud/pagetype/security_update/security_update.json @@ -0,0 +1,142 @@ +{ + "actions": [], + "creation": "2023-08-03 15:58:45.992061", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "server_type", + "server", + "column_break_4pwt", + "datetime", + "job_status", + "section_break_gadl", + "package", + "version", + "column_break_8rko", + "priority", + "priority_level", + "security_update_status", + "package_meta_tab", + "package_meta", + "change_log_tab", + "change_log" + ], + "fields": [ + { + "fieldname": "server_type", + "fieldtype": "Select", + "label": "Server Type", + "options": "Server\nDatabase Server\nProxy Server" + }, + { + "fieldname": "server", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Server", + "options": "server_type" + }, + { + "fieldname": "datetime", + "fieldtype": "Datetime", + "label": "Datetime" + }, + { + "fieldname": "column_break_4pwt", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_gadl", + "fieldtype": "Section Break" + }, + { + "fieldname": "package", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Package", + "search_index": 1 + }, + { + "fieldname": "priority", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Priority", + "options": "High\nMedium\nLow" + }, + { + "fieldname": "column_break_8rko", + "fieldtype": "Column Break" + }, + { + "fieldname": "job_status", + "fieldtype": "Select", + "label": "Job Status", + "options": "Pending\nWIP\nCompleted", + "read_only": 1 + }, + { + "fieldname": "version", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Version", + "search_index": 1 + }, + { + "fieldname": "security_update_status", + "fieldtype": "Select", + "label": "Security Update Status", + "options": "Pending\nImplemented" + }, + { + "fieldname": "change_log", + "fieldtype": "Code", + "label": "Change Log" + }, + { + "fieldname": "change_log_tab", + "fieldtype": "Tab Break", + "label": "Change Log" + }, + { + "fieldname": "package_meta_tab", + "fieldtype": "Tab Break", + "label": "Package Meta" + }, + { + "fieldname": "package_meta", + "fieldtype": "Code", + "label": "Package Meta" + }, + { + "fieldname": "priority_level", + "fieldtype": "Select", + "label": "Priority Level", + "options": "1\n2\n3" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-08-09 14:06:04.009765", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Security Update", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/security_update/security_update.py b/jcloud/jcloud/pagetype/security_update/security_update.py new file mode 100644 index 0000000..8a87611 --- /dev/null +++ b/jcloud/jcloud/pagetype/security_update/security_update.py @@ -0,0 +1,164 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import re + +import jingrow +from jingrow.model.document import Document +from jingrow.utils import now_datetime + +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class SecurityUpdate(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + change_log: DF.Code | None + datetime: DF.Datetime | None + job_status: DF.Literal["Pending", "WIP", "Completed"] + package: DF.Data | None + package_meta: DF.Code | None + priority: DF.Literal["High", "Medium", "Low"] + priority_level: DF.Literal["1", "2", "3"] + security_update_status: DF.Literal["Pending", "Implemented"] + server: DF.DynamicLink | None + server_type: DF.Literal["Server", "Database Server", "Proxy Server"] + version: DF.Data | None + # end: auto-generated types + + @staticmethod + def fetch_security_updates(server_obj): + """Fetch security updates""" + try: + ansible = Ansible( + playbook="security_update.yml", + server=server_obj, + ) + play = ansible.run() + if play.status == "Success": + package_list = SecurityUpdate._prepare_package_list(ansible.play) + SecurityUpdate._fetch_package_meta(package_list, server_obj) + + except Exception: + log_error("Fetch security updates exception", server=server_obj.as_dict()) + + @staticmethod + def _prepare_package_list(play): + packages = [] + filters = {"task": "Fetch packages due for security updates", "play": play} + packages_str = jingrow.db.get_value("Ansible Task", filters, "output") + + if packages_str: + for package_string in packages_str.split("\n"): + package_name = package_string.split("/")[0] + + if package_name == "Listing...": + continue + + packages.append(package_name) + + return packages + + @staticmethod + def _fetch_package_meta(package_list, server_obj): + package_list = package_list[:6] + + for package in package_list: + try: + ansible = Ansible( + playbook="security_update.yml", + server=server_obj, + variables={"fetch_package_meta": True, "package": package}, + ) + play = ansible.run() + if play.status == "Success": + SecurityUpdate._create_security_update(package, ansible.play, server_obj) + except Exception: + log_error("Fetch package meta exception", server=server_obj.as_dict()) + + @staticmethod + def _create_security_update(package, play, server_obj): + package_meta = SecurityUpdate.get_package_meta_from_log(play) + package_change_log = SecurityUpdate.get_package_change_log(play) + version = SecurityUpdate.get_package_version(package_meta) + priority, level = SecurityUpdate.get_package_priority_and_level(package_meta) + + if jingrow.db.exists( + "Security Update", + {"package": package, "server": server_obj.name, "version": version}, + ): + return + + try: + security_update = jingrow.new_pg("Security Update") + security_update.update( + { + "package": package, + "server_type": server_obj.pagetype, + "server": server_obj.name, + "package_meta": package_meta, + "change_log": package_change_log, + "version": version, + "priority": priority, + "datetime": now_datetime(), + "priority_level": level, + } + ) + security_update.insert(ignore_permissions=True) + jingrow.db.commit() + except Exception: + log_error("Create security update exception", server=server_obj.as_dict()) + + @staticmethod + def get_package_meta_from_log(play): + filters = {"task": "Fetch package meta", "play": play} + package_meta_str = jingrow.db.get_value("Ansible Task", filters, "output") + + if package_meta_str: + return package_meta_str + + return None + + @staticmethod + def get_package_change_log(play): + filters = {"task": "Fetch package change log", "play": play} + package_change_log = jingrow.db.get_value("Ansible Task", filters, "output") + + if package_change_log: + return package_change_log + + return None + + @staticmethod + def get_package_version(package_meta): + version = re.search("Version:(.*)", package_meta) + + try: + return version.group(1) + except Exception: + pass + + return None + + @staticmethod + def get_package_priority_and_level(package_meta): + priority_mapper = {"required": "High", "standard": "Medium", "optional": "Low"} + priority_level_mapper = {"High": 1, "Medium": 2, "Low": 3} + priority = re.search("Priority:(.*)", package_meta) + + try: + priority = priority_mapper.get(priority.group(1).strip(), "Low") + priority_level = priority_level_mapper.get(priority, 3) + + return priority, priority_level + except Exception: + pass + + return "Low", 3 diff --git a/jcloud/jcloud/pagetype/security_update/test_security_update.py b/jcloud/jcloud/pagetype/security_update/test_security_update.py new file mode 100644 index 0000000..3b08256 --- /dev/null +++ b/jcloud/jcloud/pagetype/security_update/test_security_update.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestSecurityUpdate(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/security_update_check/__init__.py b/jcloud/jcloud/pagetype/security_update_check/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/security_update_check/security_update_check.js b/jcloud/jcloud/pagetype/security_update_check/security_update_check.js new file mode 100644 index 0000000..834a417 --- /dev/null +++ b/jcloud/jcloud/pagetype/security_update_check/security_update_check.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Security Update Check', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/security_update_check/security_update_check.json b/jcloud/jcloud/pagetype/security_update_check/security_update_check.json new file mode 100644 index 0000000..20a4fb8 --- /dev/null +++ b/jcloud/jcloud/pagetype/security_update_check/security_update_check.json @@ -0,0 +1,72 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-05-13 09:24:36.476122", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "server_type", + "server", + "play", + "status" + ], + "fields": [ + { + "fieldname": "server_type", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Server Type", + "options": "PageType", + "reqd": 1 + }, + { + "fieldname": "server", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "server", + "options": "server_type", + "reqd": 1 + }, + { + "fieldname": "play", + "fieldtype": "Link", + "label": "play", + "options": "Ansible Play", + "read_only": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "status", + "options": "Pending\nRunning\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-09-29 20:57:47.002882", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Security Update Check", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/security_update_check/security_update_check.py b/jcloud/jcloud/pagetype/security_update_check/security_update_check.py new file mode 100644 index 0000000..f4836a6 --- /dev/null +++ b/jcloud/jcloud/pagetype/security_update_check/security_update_check.py @@ -0,0 +1,73 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.telegram_message.telegram_message import TelegramMessage +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class SecurityUpdateCheck(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + play: DF.Link | None + server: DF.DynamicLink + server_type: DF.Link + status: DF.Literal["Pending", "Running", "Success", "Failure"] + # end: auto-generated types + + def after_insert(self): + self.start() + + @jingrow.whitelist() + def start(self): + self.status = "Pending" + self.save() + jingrow.db.commit() + jingrow.enqueue_pg(self.pagetype, self.name, "_start", queue="short") + + def _start(self): + try: + _server = jingrow.get_pg(self.server_type, self.server) + ansible = Ansible( + playbook="security_update.yml", + server=_server, + user=_server.ssh_user or "root", + port=_server.ssh_port or 22, + variables={"validate_pending_security_updates": True}, + ) + self.reload() + self.play = ansible.play + self.status = "Running" + self.save() + jingrow.db.commit() + play = ansible.run() + if play.status == "Success": + self.succeed() + else: + self.fail() + except Exception: + log_error("Security Update Check Exception", scan=self.as_dict()) + self.fail() + self.save() + + def succeed(self): + self.status = "Success" + + def fail(self): + self.status = "Failure" + domain = jingrow.get_value("Jcloud Settings", "Jcloud Settings", "domain") + message = f""" +Security Update Check for *{self.server}* failed. + +[Security Update Check]({domain}{self.get_url()}) +""" + TelegramMessage.enqueue(message=message) diff --git a/jcloud/jcloud/pagetype/security_update_check/test_security_update_check.py b/jcloud/jcloud/pagetype/security_update_check/test_security_update_check.py new file mode 100644 index 0000000..f218aa4 --- /dev/null +++ b/jcloud/jcloud/pagetype/security_update_check/test_security_update_check.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestSecurityUpdateCheck(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/self_hosted_server/__init__.py b/jcloud/jcloud/pagetype/self_hosted_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server.js b/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server.js new file mode 100644 index 0000000..3ec26e3 --- /dev/null +++ b/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server.js @@ -0,0 +1,104 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Self Hosted Server', { + refresh: function (frm) { + frm.add_web_link( + `/dashboard/servers/${frm.pg.name}`, + __('Visit Dashboard'), + ); + [ + [__('Ping Server'), 'ping_ansible', false], + [ + __('Create Proxy Server'), + 'create_proxy_server', + false, + !frm.pg.proxy_created && frm.pg.dedicated_proxy, + ], + [ + __('Create Database Server'), + 'create_database_server', + false, + frm.pg.proxy_created && + frm.pg.different_database_server && + !frm.pg.database_setup, + ], + [ + __('Create App Server'), + 'create_application_server', + false, + frm.pg.database_setup && !frm.pg.server_created, + ], + [__('Setup Nginx'), '_setup_nginx', false], + [__('Create TLS Certificate'), 'create_tls_certs', true], + [__('Update TLS'), 'update_tls', false], + [ + __('Restore Files from Existing Sites'), + 'restore_files', + true, + frm.pg.existing_bench_present, + ], + [ + __('Get Apps from Existing Bench'), + 'fetch_apps_and_sites', + false, + frm.pg.existing_bench_present, + ], + [ + __('Create a Release Group for Existing Bench'), + 'create_new_rg', + false, + frm.pg.existing_bench_present, + ], + [ + __('Create Sites from Existing Bench'), + 'create_new_sites', + true, + frm.pg.existing_bench_present && frm.pg.release_group, + ], + [__('Fetch System Details'), 'fetch_system_specifications', false], + [__('Fetch Ram'), 'fetch_system_ram', false, !frm.pg.ram], + [__('Fetch Private IP'), 'fetch_private_ip', false, !frm.pg.private_ip], + [__('Fetch System Details'), 'fetch_system_specifications', false], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + }, + onload: (frm) => { + frm.set_query('server', () => { + return { + filters: { + is_self_hosted: true, + }, + }; + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server.json b/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server.json new file mode 100644 index 0000000..c2c842c --- /dev/null +++ b/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server.json @@ -0,0 +1,489 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-03-07 11:56:33.641999", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "title", + "new_server", + "different_database_server", + "server_url", + "hostname", + "domain", + "column_break_3eap", + "status", + "cluster", + "team", + "release_group", + "server_section", + "server_created", + "server", + "ip", + "ssh_user", + "column_break_smwr", + "existing_bench_present", + "plan", + "private_ip", + "ssh_port", + "section_break_0fky", + "dedicated_proxy", + "proxy_public_ip", + "proxy_created", + "proxy_server", + "column_break_bcsw", + "proxy_private_ip", + "agent_password", + "database_section", + "is_managed_database", + "database_setup", + "mariadb_ip", + "mariadb_root_user", + "database_server", + "database_service", + "column_break_qvmo", + "mariadb_private_ip", + "mariadb_root_password", + "database_plan", + "existing_bench_tab", + "bench_directory", + "column_break_yb8y", + "jingrow_version", + "section_break_33uh", + "apps", + "sites", + "system_information_tab", + "application_server_section", + "vendor", + "vcpus", + "ram", + "total_storage", + "swap_total", + "column_break_ep4j", + "instance_type", + "architecture", + "processor", + "distribution", + "database_server_section", + "db_vcpus", + "db_total_storage", + "column_break_krqz", + "db_ram" + ], + "fields": [ + { + "depends_on": "eval:pg.server_created", + "fieldname": "server", + "fieldtype": "Link", + "label": "Server", + "options": "Server", + "search_index": 1 + }, + { + "fieldname": "bench_directory", + "fieldtype": "Data", + "label": "Bench Directory", + "mandatory_depends_on": "eval:pg.existing_bench_present" + }, + { + "fetch_from": ".ip", + "fieldname": "ip", + "fieldtype": "Data", + "in_list_view": 1, + "label": "IP", + "reqd": 1 + }, + { + "default": "root", + "fetch_from": ".ssh_user", + "fieldname": "ssh_user", + "fieldtype": "Data", + "label": "SSH User" + }, + { + "fieldname": "column_break_3eap", + "fieldtype": "Column Break" + }, + { + "collapsible": 1, + "collapsible_depends_on": "eval:pg.different_database_server", + "fieldname": "database_section", + "fieldtype": "Section Break", + "label": "Database" + }, + { + "depends_on": "eval:!pg.is_managed_database", + "fieldname": "mariadb_root_user", + "fieldtype": "Data", + "label": "MariaDB Root User" + }, + { + "depends_on": "eval:pg.different_database_server && !pg.is_managed_database", + "fieldname": "mariadb_ip", + "fieldtype": "Data", + "label": "MariaDB Public IP" + }, + { + "fieldname": "column_break_qvmo", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval:!pg.is_managed_database", + "description": "This will be the MariaDB Root password if you're setting up a new server. If you're server already has a MariaDB root password, please put in the same password", + "fieldname": "mariadb_root_password", + "fieldtype": "Password", + "label": "MariaDB Root Password", + "mandatory_depends_on": "eval:pg.existing_bench_present", + "reqd": 1 + }, + { + "fetch_from": ".private_ip", + "fieldname": "private_ip", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Private IP" + }, + { + "fetch_from": "server.team", + "fetch_if_empty": 1, + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "fetch_from": "release_group.version", + "fieldname": "jingrow_version", + "fieldtype": "Data", + "label": "Jingrow Version", + "read_only": 1 + }, + { + "fieldname": "apps", + "fieldtype": "Table", + "label": "Apps", + "options": "Site Analytics App" + }, + { + "fieldname": "section_break_33uh", + "fieldtype": "Section Break" + }, + { + "fieldname": "sites", + "fieldtype": "Table", + "label": "Sites and Apps", + "options": "Self Hosted Site Apps" + }, + { + "depends_on": "eval: pg.existing_bench_present", + "fieldname": "existing_bench_tab", + "fieldtype": "Tab Break", + "label": "Existing Bench" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title" + }, + { + "default": "0", + "fieldname": "existing_bench_present", + "fieldtype": "Check", + "label": "Existing Bench Present" + }, + { + "fieldname": "server_section", + "fieldtype": "Section Break", + "label": "Server" + }, + { + "fieldname": "column_break_yb8y", + "fieldtype": "Column Break" + }, + { + "fieldname": "hostname", + "fieldtype": "Data", + "label": "Hostname", + "read_only": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Data", + "label": "Domain", + "read_only": 1 + }, + { + "fieldname": "column_break_smwr", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "server_created", + "fieldtype": "Check", + "label": "Server Created" + }, + { + "default": "0", + "fieldname": "database_setup", + "fieldtype": "Check", + "label": "Database Setup" + }, + { + "depends_on": "eval:pg.database_setup && !pg.is_managed_database", + "fieldname": "database_server", + "fieldtype": "Link", + "label": "Database Server", + "options": "Database Server", + "search_index": 1 + }, + { + "fieldname": "release_group", + "fieldtype": "Link", + "in_standard_filter": 1, + "label": "Release Group", + "options": "Release Group" + }, + { + "default": "Unreachable", + "fieldname": "status", + "fieldtype": "Select", + "hidden": 1, + "in_list_view": 1, + "label": "Status", + "options": "Active\nPending\nBroken\nArchived\nUnreachable", + "read_only": 1 + }, + { + "default": "22", + "fieldname": "ssh_port", + "fieldtype": "Int", + "label": "SSH Port" + }, + { + "default": "0", + "fieldname": "new_server", + "fieldtype": "Check", + "label": "New Server" + }, + { + "collapsible": 1, + "collapsible_depends_on": "eval:pg.dedicated_proxy", + "fieldname": "section_break_0fky", + "fieldtype": "Section Break", + "label": "Proxy" + }, + { + "fieldname": "proxy_server", + "fieldtype": "Link", + "label": "Proxy Server", + "mandatory_depends_on": "eval:pg.proxy_created", + "options": "Proxy Server", + "search_index": 1 + }, + { + "fieldname": "column_break_bcsw", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "proxy_created", + "fieldtype": "Check", + "label": "Proxy Created" + }, + { + "fieldname": "agent_password", + "fieldtype": "Password", + "hidden": 1, + "label": "Agent Password" + }, + { + "fetch_from": "proxy_server.cluster", + "fetch_if_empty": 1, + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster" + }, + { + "fieldname": "server_url", + "fieldtype": "Data", + "label": "Server URL" + }, + { + "fieldname": "plan", + "fieldtype": "Link", + "label": "Plan", + "options": "Server Plan" + }, + { + "default": "0", + "fieldname": "different_database_server", + "fieldtype": "Check", + "label": "Different Database Server" + }, + { + "fieldname": "ram", + "fieldtype": "Data", + "label": "RAM" + }, + { + "fieldname": "system_information_tab", + "fieldtype": "Tab Break", + "label": "System Information" + }, + { + "fieldname": "vcpus", + "fieldtype": "Data", + "label": "vCPUs" + }, + { + "fieldname": "total_storage", + "fieldtype": "Data", + "label": "Total Storage" + }, + { + "fieldname": "swap_total", + "fieldtype": "Data", + "label": "Swap Total" + }, + { + "fieldname": "column_break_ep4j", + "fieldtype": "Column Break" + }, + { + "fieldname": "instance_type", + "fieldtype": "Data", + "label": "Instance Type" + }, + { + "fieldname": "architecture", + "fieldtype": "Data", + "label": "Architecture" + }, + { + "fieldname": "processor", + "fieldtype": "Data", + "label": "Processor" + }, + { + "fieldname": "distribution", + "fieldtype": "Data", + "label": "Distribution" + }, + { + "fieldname": "vendor", + "fieldtype": "Data", + "label": "Vendor" + }, + { + "fieldname": "proxy_public_ip", + "fieldtype": "Data", + "label": "Proxy Public IP" + }, + { + "fieldname": "proxy_private_ip", + "fieldtype": "Data", + "label": "Proxy Private IP" + }, + { + "depends_on": "eval:pg.different_database_server && !pg.is_managed_database", + "fieldname": "mariadb_private_ip", + "fieldtype": "Data", + "label": "MariaDB Private IP" + }, + { + "fieldname": "database_plan", + "fieldtype": "Link", + "label": "Database Plan", + "options": "Server Plan" + }, + { + "default": "0", + "fieldname": "dedicated_proxy", + "fieldtype": "Check", + "label": "Dedicated Proxy" + }, + { + "fieldname": "application_server_section", + "fieldtype": "Section Break", + "label": "Application Server" + }, + { + "fieldname": "database_server_section", + "fieldtype": "Section Break", + "label": "Database Server" + }, + { + "fieldname": "db_vcpus", + "fieldtype": "Data", + "label": "vCPUs" + }, + { + "fieldname": "db_total_storage", + "fieldtype": "Data", + "label": "Total Storage" + }, + { + "fieldname": "column_break_krqz", + "fieldtype": "Column Break" + }, + { + "fieldname": "db_ram", + "fieldtype": "Data", + "label": "RAM" + }, + { + "default": "0", + "fieldname": "is_managed_database", + "fieldtype": "Check", + "label": "Is Managed Database" + }, + { + "default": "AWS - RDS", + "depends_on": "eval:pg.is_managed_database", + "fieldname": "database_service", + "fieldtype": "Select", + "label": "Database Service", + "options": "AWS - RDS" + } + ], + "links": [], + "modified": "2024-05-29 11:41:41.304954", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Self Hosted Server", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server.py b/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server.py new file mode 100644 index 0000000..f271246 --- /dev/null +++ b/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server.py @@ -0,0 +1,838 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json + +import jingrow +from jingrow.model.document import Document +from jingrow.model.naming import make_autoname + +from jcloud.runner import Ansible +from jcloud.utils import log_error + +# from tldextract import extract as sdext + + +class SelfHostedServer(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.self_hosted_site_apps.self_hosted_site_apps import ( + SelfHostedSiteApps, + ) + from jcloud.jcloud.pagetype.site_analytics_app.site_analytics_app import SiteAnalyticsApp + + agent_password: DF.Password | None + apps: DF.Table[SiteAnalyticsApp] + architecture: DF.Data | None + bench_directory: DF.Data | None + cluster: DF.Link | None + database_plan: DF.Link | None + database_server: DF.Link | None + database_service: DF.Literal["AWS - RDS"] + database_setup: DF.Check + db_ram: DF.Data | None + db_total_storage: DF.Data | None + db_vcpus: DF.Data | None + dedicated_proxy: DF.Check + different_database_server: DF.Check + distribution: DF.Data | None + domain: DF.Data | None + existing_bench_present: DF.Check + jingrow_version: DF.Data | None + hostname: DF.Data | None + instance_type: DF.Data | None + ip: DF.Data + is_managed_database: DF.Check + mariadb_ip: DF.Data | None + mariadb_private_ip: DF.Data | None + mariadb_root_password: DF.Password + mariadb_root_user: DF.Data | None + new_server: DF.Check + plan: DF.Link | None + private_ip: DF.Data | None + processor: DF.Data | None + proxy_created: DF.Check + proxy_private_ip: DF.Data | None + proxy_public_ip: DF.Data | None + proxy_server: DF.Link | None + ram: DF.Data | None + release_group: DF.Link | None + server: DF.Link | None + server_created: DF.Check + server_url: DF.Data | None + sites: DF.Table[SelfHostedSiteApps] + ssh_port: DF.Int + ssh_user: DF.Data | None + status: DF.Literal["Active", "Pending", "Broken", "Archived", "Unreachable"] + swap_total: DF.Data | None + team: DF.Link + title: DF.Data | None + total_storage: DF.Data | None + vcpus: DF.Data | None + vendor: DF.Data | None + # end: auto-generated types + + def before_insert(self): + self.validate_is_duplicate() + + def autoname(self): + series = make_autoname("SHS-.#####") + self.name = f"{series}.{self.hybrid_domain}" + + self.hostname = series + self.domain = self.hybrid_domain + + def validate(self): + self.set_proxy_details() + self.set_mariadb_config() + self.set_database_plan() + + if not self.agent_password: + self.agent_password = jingrow.generate_hash(length=32) + + def validate_is_duplicate(self): + filters = { + "ip": self.ip, + "private_ip": self.private_ip, + "mariadb_ip": self.mariadb_ip, + "mariadb_private_ip": self.mariadb_private_ip, + "status": ("not in", ["Archived", "Broken"]), + } + duplicate_server = jingrow.db.get_value("Self Hosted Server", filters, pluck="name") + + if duplicate_server: + raise jingrow.DuplicateEntryError(self.pagetype, duplicate_server) + + def set_proxy_details(self): + if self.proxy_created or self.proxy_server: + self.proxy_public_ip, self.proxy_private_ip = jingrow.db.get_value( + "Proxy Server", self.proxy_server, ["ip", "private_ip"] + ) + + def set_mariadb_config(self): + if not self.mariadb_ip: + self.mariadb_ip = self.ip + if not self.mariadb_private_ip: + self.mariadb_private_ip = self.private_ip + if not self.mariadb_root_user: + self.mariadb_root_user = "root" + if not self.mariadb_root_password: + self.mariadb_root_password = jingrow.generate_hash(length=32) + + @jingrow.whitelist() + def fetch_apps_and_sites(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_get_apps", queue="long", timeout=1200) + jingrow.enqueue_pg(self.pagetype, self.name, "_get_sites", queue="long", timeout=1200) + + @jingrow.whitelist() + def ping_ansible(self): + try: + ansible = Ansible( + playbook="ping.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + ) + play = ansible.run() + if play.status == "Success" and self.status == "Unreachable": + self.status = "Pending" + self.save() + self.reload() + except Exception: + log_error("Server Ping Exception", server=self.as_dict()) + + def _get_sites(self): + """ + Get Sites from Existing Bench in the server + """ + try: + ansible = Ansible( + playbook="get_sites.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or "22", + variables={"bench_path": self.bench_directory}, + ) + play = ansible.run() + if play.status == "Success": + self.append_to_sites() + except Exception: + log_error("Self Hosted Sites Issue", server=self.as_dict()) + + def _get_apps(self): + """ + Get Apps from Existing Bench in the server + """ + try: + ansible = Ansible( + playbook="get_apps.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or "22", + variables={"bench_path": self.bench_directory}, + ) + play = ansible.run() + if play.status == "Success": + self.append_to_apps() + except Exception: + log_error("Self Hosted Apps Issue", server=self.as_dict()) + + def map_branch_to_version(self, branch): + versions = { + "version-13": "Version 13", + "version-14": "Version 14", + "develop": "Nightly", + } + return versions[branch] + + def append_to_sites(self): + """ + Append Sites and the app used by each site from existing bench to Sites Child table + """ + ansible_play = jingrow.get_last_pg("Ansible Play", {"server": self.name}) + ansible_task_op = jingrow.get_value( + "Ansible Task", + {"play": ansible_play.name, "task": "Get Sites from Current Bench"}, + "output", + ) + sites = json.loads(ansible_task_op) + try: + for k, v in sites.items(): + self.append("sites", {"site_name": k, "apps": ",".join(map(str, v))}) + self.save() + self.append_site_configs(ansible_play.name) + self.status = "Active" + except Exception: + self.status = "Broken" + log_error("Append to Sites Failed", server=self.as_dict()) + self.save() + + def append_to_apps(self): + """ + Append apps from existing bench to `apps` child table + Appends app name, app version and app branch + """ + ansible_play = jingrow.get_last_pg("Ansible Play", {"server": self.name}) + ansible_task_op = jingrow.get_value( + "Ansible Task", + {"play": ansible_play.name, "task": "Get Versions from Current Bench"}, + "output", + ).replace("'", '"') + task_output = json.loads(ansible_task_op) + temp_task_result = task_output # Removing risk of mutating the same loop variable + for i, app in enumerate(temp_task_result): # Rearrange JSON if jingrow isn't first app + if app["app"] == "jingrow" and i > 0: + task_output[i], task_output[0] = task_output[0], task_output[i] + try: + for app in task_output: + self.append( + "apps", + {"app_name": app["app"], "version": app["version"], "branch": app["branch"]}, + ) + if app["app"] == "jingrow": + self.jingrow_version = self.map_branch_to_version(app["branch"]) + self.status = "Active" + + except Exception: + self.status = "Broken" + log_error("Appending Apps Error", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def create_new_rg(self): + """ + Create **a** Release Group for the apps in the Existing bench + """ + ansible_play = jingrow.get_last_pg( + "Ansible Play", + {"server": self.server, "play": "Get Bench data from Self Hosted Server"}, + ) + ansible_task_op = jingrow.get_value( + "Ansible Task", + {"play": ansible_play.name, "task": "Get Apps for Release Group"}, + "output", + ).replace("'", '"') + task_result = json.loads(ansible_task_op) + temp_task_result = task_result # Removing risk of mutating the same loop variable + for i, app in enumerate(temp_task_result): # Rearrange JSON if jingrow isn't first app + if app["app"] == "jingrow" and i > 0: + task_result[i], task_result[0] = task_result[0], task_result[i] + release_group = jingrow.new_pg("Release Group") + release_group.title = f"{self.server}-bench" + branches = [] + try: + for app in task_result: + branches.append(app["branch"]) + if not jingrow.db.exists("App Source", {"app": app["app"], "branch": app["branch"]}): # noqa: SIM102 + if not jingrow.db.exists("App", {"_newname": app["app"]}): + app_pg = jingrow.get_pg( + { + "pagetype": "App", + "_newname": app["app"], + "title": app["app"].title(), + "name": app["app"], + } + ) + app_pg.insert() + app_source_pg = jingrow.get_pg( + { + "pagetype": "App Source", + "app": app["app"], + "repository_url": app["remote"], + "team": "Administrator", + "branch": app["branch"], + } + ) + app_source_pg.append("versions", {"version": self.jingrow_version}) + app_source_pg.insert() + jingrow.db.commit() + release_group.append("apps", {"app": app["app"], "source": app_source_pg.name}) + release_group.append( + "apps", + { + "app": app["app"], + "source": jingrow.get_value( + "App Source", + { + "app": app["app"], + "branch": app["branch"], + }, + "name", + ), + }, + ) + release_group.append("servers", {"server": self.server}) + except Exception: + self.status = "Broken" + self.save() + log_error("Creating RG failed", server=self.as_dict()) + release_group.team = self.team + release_group.version = self.map_branch_to_version(max(branches)) + rg = release_group.insert() + self.release_group = rg.name + self.status = "Active" + self.save() + + def set_database_plan(self): + if self.database_plan: + return + + if not self.different_database_server and not jingrow.db.exists("Server Plan", "Unlimited"): + self._create_server_plan("Unlimited") + self.database_plan = "Unlimited" + + def _create_server_plan(self, plan_name): + plan = jingrow.new_pg("Server Plan") + plan.name = plan_name + plan.title = plan_name + plan.price_cny = 0 + plan.price_usd = 0 + plan.save() + + @jingrow.whitelist() + def create_database_server(self): + try: + if not self.mariadb_ip: + jingrow.throw("Public IP for MariaDB not found") + + db_server = jingrow.new_pg( + "Database Server", + **{ + "hostname": self.get_hostname("Database Server"), + "title": f"{self.title} Database", + "is_self_hosted": True, + "domain": self.hybrid_domain, + "self_hosted_server_domain": self.hybrid_domain, + "ip": self.mariadb_ip, + "private_ip": self.mariadb_private_ip, + "team": self.team, + "ssh_user": self.ssh_user, + "ssh_port": self.ssh_port, + "mariadb_root_password": self.get_password("mariadb_root_password"), + "cluster": self.cluster, + "agent_password": self.get_password("agent_password"), + "is_server_setup": not self.new_server, + "plan": self.database_plan, + }, + ).insert() + + db_server.create_subscription(self.database_plan) + self.database_setup = True + self.database_server = db_server.name + self.status = "Active" + self.save() + + if not jingrow.flags.in_test: + db_server.create_dns_record() + + jingrow.db.commit() + + jingrow.msgprint(f"Database server record {db_server.name} created") + except Exception: + jingrow.throw("Adding Server to Database Server Pagetype failed") + self.status = "Broken" + self.save() + log_error("Inserting a new DB server failed") + + def append_site_configs(self, play_name): + """ + Append site_config.json to `sites` Child Table + """ + try: + ansible_task_op = jingrow.get_value( + "Ansible Task", + {"play": play_name, "task": "Get Site Configs from Existing Sites"}, + "output", + ) + task_result = json.loads( + ansible_task_op.replace("'", '"').replace('"{', "{").replace('}"', "}").replace("\\n", "") + ) + self.status = "Pending" + for site in task_result: + for _site in self.sites: + if _site.site_name == site["site"]: + _site.site_config = str(site["config"]).replace( + "'", '"' + ) # JSON Breaks since dict uses only single quotes + self.save() + self.status = "Active" + except Exception as e: + self.status = "Broken" + jingrow.throw("Fetching sites configs from Existing Bench failed", exc=e) + self.save() + + @jingrow.whitelist() + def create_application_server(self): + """ + Add a new record to the Server pagetype + """ + + try: + server = jingrow.new_pg( + "Server", + **{ + "hostname": self.get_hostname("Server"), + "title": f"{self.title} Application", + "is_self_hosted": True, + "domain": self.hybrid_domain, + "self_hosted_server_domain": self.hybrid_domain, + "team": self.team, + "ip": self.ip, + "private_ip": self.private_ip, + "ssh_user": self.ssh_user, + "ssh_port": self.ssh_port, + "proxy_server": self.proxy_server, + "database_server": self.database_server, + "cluster": self.cluster, + "agent_password": self.get_password("agent_password"), + "self_hosted_mariadb_root_password": self.get_password("mariadb_root_password"), + "ram": self.ram, + "new_worker_allocation": True, + "plan": self.plan, + }, + ).insert() + + server.create_subscription(self.plan) + self.server = server.name + self.status = "Active" + self.server_created = True + + if not jingrow.flags.in_test: + server.create_dns_record() + + jingrow.db.commit() + + except Exception as e: + self.status = "Broken" + jingrow.throw("Server Creation Error", exc=e) + + self.save() + + jingrow.msgprint(f"Server record {server.name} created") + return server + + @jingrow.whitelist() + def create_new_sites(self): + """ + Create new FC sites from sites in Current Bench + """ + try: + for _site in self.sites: + if len(_site.site_name) < 5: + sdomain = _site.site_name + "-new" + else: + sdomain = _site.site_name + sdomain = sdomain.replace(".", "-") + domain = self.domain + if not jingrow.db.exists("Site", f"{sdomain}.{domain}"): + new_site = jingrow.new_pg("Site") + new_site.subdomain = sdomain + new_site.domain = domain + try: + new_site.bench = jingrow.get_last_pg( + "Bench", {"group": self.release_group, "server": self.name} + ).name + except Exception as e: + jingrow.throw("Site Creation Failed", exc=e) + new_site.team = self.team + new_site.server = self.name + for app in _site.apps.split(","): + new_site.append("apps", {"app": app}) + config = json.loads(_site.site_config) + for key, value in config.items(): + new_site.append("configuration", {"key": key, "value": value}) + new_site.database_name = config["db_name"] + _new_site = new_site.insert() + _site.site = _new_site.name + self.save() + self.reload() + except Exception: + log_error("New Site Creation Error", server=self.as_dict()) + + @jingrow.whitelist() + def restore_files(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_restore_files", queue="long", timeout=2400) + + def _restore_files(self): + """ + Copy required folder of Existing Bench to new sites + """ + self.status = "Pending" + self.save() + ex_sites = [] + nw_sites = [] + benches = [] + for _site in self.sites: + ex_sites.append(_site.site_name) + nw_sites.append(_site.site) + bench = jingrow.db.get_value("Site", _site.site, "bench") + benches.append(bench) + try: + ansible = Ansible( + playbook="self_hosted_restore.yml", + server=self, + user=self.ssh_user or "root", + port=self.ssh_port or 22, + variables={ + "bench_path": self.bench_directory, + "ex_sites": ex_sites, + "new_benches": benches, + "new_sites": nw_sites, + }, + ) + play = ansible.run() + if play.status == "Success": + self.status = "Active" + except Exception: + self.status = "Broken" + log_error("Self Hosted Restore error", server=self.name) + self.save() + + def get_hostname(self, server_type): + symbolic_name = get_symbolic_name(server_type) + series = f"{symbolic_name}-{self.cluster}.#####" + + index = make_autoname(series)[-5:] + + return f"{symbolic_name}-{index}-{self.cluster}".lower() + + @property + def hybrid_domain(self): + return jingrow.db.get_single_value("Jcloud Settings", "hybrid_domain") + + @jingrow.whitelist() + def create_proxy_server(self): + """ + Add a new record to the Proxy Server pagetype + """ + try: + proxy_server = jingrow.new_pg( + "Proxy Server", + **{ + "hostname": self.get_hostname("Proxy Server"), + "title": self.title, + "is_self_hosted": True, + "domain": self.hybrid_domain, + "self_hosted_server_domain": self.hybrid_domain, + "team": self.team, + "ip": self.proxy_public_ip, + "private_ip": self.proxy_private_ip, + "is_primary": True, + "cluster": self.cluster, + "ssh_user": self.ssh_user, + "ssh_port": self.ssh_port, + }, + ).insert() + + self.agent_password = proxy_server.get_password("agent_password") + self.proxy_server = proxy_server.name + self.status = "Active" + self.proxy_created = True + except Exception as e: + self.status = "Broken" + jingrow.throw("Self Hosted Proxy Server Creation Error", exc=e) + self.save() + + jingrow.msgprint(f"Proxy server record {proxy_server.name} created") + + @jingrow.whitelist() + def create_tls_certs(self, domain): + try: + tls_cert = jingrow.db.get_value("TLS Certificate", {"domain": f"{domain}"}) + + if not tls_cert: + tls_cert = jingrow.new_pg( + "TLS Certificate", + **{ + "domain": domain, + "team": self.team, + "wildcard": False, + }, + ).insert() + tls_cert = tls_cert.name + + return tls_cert + except Exception: + log_error("TLS Certificate(SelfHosted) Creation Error") + + def setup_nginx(self, server): + try: + ansible = Ansible( + playbook="self_hosted_nginx.yml", + server=server, + user=server.ssh_user or "root", + port=server.ssh_port or "22", + variables={ + "domain": self.name, + "jcloud_domain": jingrow.db.get_single_value("Jcloud Settings", "domain"), # for ssl renewal + }, + ) + play = ansible.run() + if play.status == "Success": + return True + except Exception: + log_error("Nginx setup failed for self hosted server", server=self.as_dict()) + return False + + @jingrow.whitelist() + def update_tls(self): + from jcloud.jcloud.pagetype.tls_certificate.tls_certificate import ( + update_server_tls_certifcate, + ) + + try: + cert = jingrow.get_last_pg("TLS Certificate", {"domain": self.server, "status": "Active"}) + except jingrow.DoesNotExistError: + cert = jingrow.get_last_pg("TLS Certificate", {"domain": self.name, "status": "Active"}) + + update_server_tls_certifcate(self, cert) + + def process_tls_cert_update(self): + self.update_tls() + + def setup_server(self): + self._setup_db_server() + + if self.different_database_server: + self._setup_app_server() + + def _setup_db_server(self): + db_server = jingrow.get_pg("Database Server", self.database_server) + db_server.setup_server() + + def _setup_app_server(self): + app_server = jingrow.get_pg("Server", self.server) + app_server.setup_server() + + @property + def subscription(self): + name = jingrow.db.get_value( + "Subscription", {"document_type": self.pagetype, "document_name": self.name} + ) + return jingrow.get_pg("Subscription", name) if name else None + + def can_charge_for_subscription(self, subscription=None): + return ( + self.status not in ["Archived", "Unreachable", "Pending"] + and self.team + and self.team != "Administrator" + ) + + def _get_play_id(self): + try: + play_id = jingrow.get_last_pg("Ansible Play", {"server": self.server, "play": "Ping Server"}).name + except jingrow.DoesNotExistError: + play_id = jingrow.get_last_pg("Ansible Play", {"server": self.name, "play": "Ping Server"}).name + + return play_id + + def _get_play(self, play_id): + play = jingrow.get_pg("Ansible Task", {"status": "Success", "play": play_id, "task": "Gather Facts"}) + + return json.loads(play.result) + + @jingrow.whitelist() + def fetch_system_ram(self, play_id=None, server_type="app"): + """ + Fetch the RAM from the Ping Ansible Play + """ + if not play_id: + play_id = self._get_play_id() + + try: + result = result = self._get_play(play_id) + + if server_type == "app": + self.ram = result["ansible_facts"]["memtotal_mb"] + else: + self.db_ram = result["ansible_facts"]["memtotal_mb"] + + self.save() + except Exception: + log_error("Fetching RAM failed", server=self.as_dict()) + + def validate_private_ip(self, play_id=None, server_type="app"): + if not play_id: + play_id = self._get_play_id() + + all_ipv4_addresses = [] + result = self._get_play(play_id) + + try: + all_ipv4_addresses = result["ansible_facts"]["all_ipv4_addresses"] + except Exception: + log_error("Fetching Private IP failed", server=self.as_dict()) + return + + private_ip = self.private_ip + public_ip = self.ip + if server_type == "db": + private_ip = self.mariadb_private_ip + public_ip = self.mariadb_ip + + if private_ip not in all_ipv4_addresses: + jingrow.throw(f"Private IP {private_ip} is not associated with server having IP {public_ip} ") + + @jingrow.whitelist() + def fetch_private_ip(self, play_id=None, server_type="app"): + """ + Fetch the Private IP from the Ping Ansible Play + """ + if not play_id: + play_id = self._get_play_id() + + try: + result = self._get_play(play_id) + + if server_type == "app": + self.private_ip = fetch_private_ip_based_on_vendor(result) + else: + self.mariadb_private_ip = fetch_private_ip_based_on_vendor(result) + + self.save() + except Exception: + log_error("Fetching Private IP failed", server=self.as_dict()) + + @jingrow.whitelist() + def fetch_system_specifications(self, play_id=None, server_type="app"): + """ + Fetch the RAM from the Ping Ansible Play + """ + if not play_id: + play_id = self._get_play_id() + + try: + result = self._get_play(play_id) + if server_type == "app": + self.vendor = result["ansible_facts"]["system_vendor"] + self.ram = result["ansible_facts"]["memtotal_mb"] + self.vcpus = result["ansible_facts"]["processor_vcpus"] + self.swap_total = result["ansible_facts"]["swaptotal_mb"] + self.architecture = result["ansible_facts"]["architecture"] + self.instance_type = result["ansible_facts"]["product_name"] + self.processor = result["ansible_facts"]["processor"][2] + self.distribution = result["ansible_facts"]["lsb"]["description"] + self.total_storage = self._get_total_storage(result) + + else: + self.db_ram = result["ansible_facts"]["memtotal_mb"] + self.db_vcpus = result["ansible_facts"]["processor_vcpus"] + self.db_total_storage = self._get_total_storage(result) + + self.save() + except Exception: + log_error("Fetching System Details Failed", server=self.as_dict()) + + def _get_total_storage(self, result): + match self.vendor: + case "DigitalOcean": + total_storage = result["ansible_facts"]["devices"]["vda"]["size"] + case "Amazon EC2": + total_storage = result["ansible_facts"]["devices"]["nvme0n1"]["size"] + case _: + total_storage = result["ansible_facts"]["devices"]["sda"]["size"] + + return total_storage + + def check_minimum_specs(self): + """ + Check if the server meets the minimum requirements + ie: RAM >= 4GB,vCPUs >= 2,Storage >= 40GB + """ + + if round(int(self.ram), -3) < 4000: # Round to nearest thousand + jingrow.throw(f"Minimum RAM requirement not met, Minimum is 4GB and available is {self.ram} MB") + if int(self.vcpus) < 2: + jingrow.throw( + f"Minimum vCPU requirement not met, Minimum is 2 Cores and available is {self.vcpus}" + ) + + self._validate_disk() + + return True + + def _validate_disk(self): + disk_size = self.total_storage.split()[0] + disk_storage_unit = self.total_storage.split()[1] + + if disk_storage_unit.upper() == "TB": + return True + + if disk_storage_unit.upper() in ["GB", "MB"] and round(int(float(disk_size)), -1) < 40: + jingrow.throw( + f"Minimum Storage requirement not met, Minimum is 50GB and available is {self.total_storage}" + ) + return None + + +def fetch_private_ip_based_on_vendor(play_result: dict): + vendor = play_result["ansible_facts"]["system_vendor"] + match vendor: + case "DigitalOcean": + return play_result["ansible_facts"]["all_ipv4_addresses"][1] + case "Hetzner": + return play_result["ansible_facts"]["all_ipv4_addresses"][1] + case "Amazon EC2": + return play_result["ansible_facts"]["default_ipv4"]["address"] + case "Microsoft Corporation": + return play_result["ansible_facts"]["all_ipv4_addresses"][0] + case "Google": + return play_result["ansible_facts"]["default_ipv4"]["address"] + case _: + return play_result["ansible_facts"]["default_ipv4"]["address"] + + +def get_symbolic_name(server_type): + return { + "Proxy Server": "hybrid-n", + "Server": "hybrid-f", + "Database Server": "hybrid-m", + }.get(server_type, "hybrid-f") diff --git a/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server_dashboard.py b/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server_dashboard.py new file mode 100644 index 0000000..e1543f3 --- /dev/null +++ b/jcloud/jcloud/pagetype/self_hosted_server/self_hosted_server_dashboard.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow import _ + + +def get_data(): + return { + "fieldname": "server", + "transactions": [ + {"label": _("Related Documents"), "items": ["Release Group", "Site"]}, + {"label": _("Logs"), "items": ["Agent Job", "Ansible Play"]}, + ], + } diff --git a/jcloud/jcloud/pagetype/self_hosted_server/test_self_hosted_server.py b/jcloud/jcloud/pagetype/self_hosted_server/test_self_hosted_server.py new file mode 100644 index 0000000..b31e8dd --- /dev/null +++ b/jcloud/jcloud/pagetype/self_hosted_server/test_self_hosted_server.py @@ -0,0 +1,323 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + + +import json +from unittest.mock import patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase, change_settings + +from jcloud.api.tests.test_server import create_test_server_plan +from jcloud.jcloud.pagetype.ansible_play.test_ansible_play import create_test_ansible_play +from jcloud.jcloud.pagetype.jcloud_settings.test_jcloud_settings import ( + create_test_jcloud_settings, +) +from jcloud.jcloud.pagetype.self_hosted_server.self_hosted_server import SelfHostedServer +from jcloud.jcloud.pagetype.team.test_team import create_test_team + + +class TestSelfHostedServer(JingrowTestCase): + def setUp(self): + create_test_jcloud_settings() + + def tearDown(self): + jingrow.db.rollback() + + # def test_autoname_to_fqdn(self): + # hostnames = ["a1", "a1.b1", "waaaaaaawwwaawwa", "1234561234"] + # for host in hostnames: + # server = create_test_self_hosted_server(host) + # self.assertEqual(server.name, f"{host}.fc.dev") + + def test_successful_ping_ansible_sets_status_to_pending(self): + server = create_test_self_hosted_server("pinger") + with patch( + "jcloud.jcloud.pagetype.self_hosted_server.self_hosted_server.Ansible.run", + new=lambda x: create_test_ansible_play( + "Ping Server", + "ping.yml", + server.pagetype, + server.name, + {"server": server.name}, + ), + ): + server.ping_ansible() + self.assertEqual(server.status, "Pending") + + def test_failed_ping_ansible_sets_status_to_unreachable(self): + server = create_test_self_hosted_server("pinger") + with patch( + "jcloud.jcloud.pagetype.self_hosted_server.self_hosted_server.Ansible.run", + new=lambda x: create_test_ansible_play( + "Ping Server", + "ping.yml", + server.pagetype, + server.name, + {"server": server.name}, + "Failure", + ), + ): + server.ping_ansible() + self.assertEqual(server.status, "Unreachable") + + def test_get_apps_populates_apps_child_table(self): + server = create_test_self_hosted_server("apps") + with patch( + "jcloud.jcloud.pagetype.self_hosted_server.self_hosted_server.Ansible.run", + new=lambda x: _create_test_ansible_play_and_task( + server=server, + playbook="get_apps.yml", + _play="Get Bench data from Self Hosted Server", + task_1="Get Versions from Current Bench", + task_1_output=json.dumps( + [ + { + "commit": "3672c9f", + "app": "jingrow", + "branch": "version-14", + "version": "14.30.0", + } + ] + ), + task_1_result="", + ), + ): + server._get_apps() + server.reload() + self.assertTrue(server.apps) + self.assertEqual(len(server.apps), 1) + self.assertEqual(server.apps[0].app_name, "jingrow") + self.assertEqual(server.apps[0].branch, "version-14") + self.assertEqual(server.apps[0].version, "14.30.0") + + def test_get_sites_populates_site_table_with_config(self): + server = create_test_self_hosted_server("sites") + server.bench_path = "/home/jingrow/jingrow-bench" + with patch( + "jcloud.jcloud.pagetype.self_hosted_server.self_hosted_server.Ansible.run", + new=lambda x: _create_test_ansible_play_and_task( + server=server, + playbook="get_sites.yml", + _play="Sites from Current Bench", + task_1="Get Sites from Current Bench", + task_1_output=json.dumps({"site1.local": ["jingrow", "jerp"]}), + task_1_result="", + task_2="Get Site Configs from Existing Sites", + task_2_output=json.dumps( + [ + { + "site": "site1.local", + "config": { + "activations_last_sync_date": "2023-05-07 00:00:49.152290", + "always_use_account_email_id_as_sender": 1, + }, + } + ] + ), + task_2_result="", + ), + ): + server._get_sites() + server.reload() + self.assertTrue(server.sites) + self.assertTrue(server.sites[0].site_config) + self.assertEqual(len(server.sites), 1) + self.assertEqual( + server.sites[0].site_config, + json.dumps( + { + "activations_last_sync_date": "2023-05-07 00:00:49.152290", + "always_use_account_email_id_as_sender": 1, + } + ), + ) + self.assertEqual(server.sites[0].apps, "jingrow,jerp") + + def test_fetch_system_ram_from_ansible_and_update_ram_field(self): + server = create_test_self_hosted_server("ram") + _create_test_ansible_play_and_task( + server=server, + playbook="ping.yml", + _play="Ping Server", + task_1="Gather Facts", + task_1_output="", + task_1_result='{"ansible_facts": {"memtotal_mb": 16384}}', + ) + server.fetch_system_ram() + server.reload() + self.assertEqual(server.ram, "16384") + + def test_fetch_system_specifications_and_populate_fields_in_pg(self): + server = create_test_self_hosted_server("tester") + _create_test_ansible_play_and_task( + server=server, + playbook="ping.yml", + _play="Ping Server", + task_1="Gather Facts", + task_1_output="", + task_1_result="""{"ansible_facts": {"memtotal_mb": 16384,"system_vendor":"Amazon EC2","processor_vcpus":2,"swaptotal_mb":1024,"architecture":"x86_64","product_name":"c5a.6xLarge","processor":["0","GenuineIntel","Intel(R) Xeon(R) CPU @ 2.20GHz","1","GenuineIntel","Intel(R) Xeon(R) CPU @ 2.20GHz"],"lsb":{"description":"Debian GNU/Linux 11 (bullseye)"},"devices":{"nvme0n1":{"size":"25 GB"}}}}""", + ) + server.fetch_system_specifications() + server.reload() + self.assertEqual(server.vendor, "Amazon EC2") + self.assertEqual(server.ram, "16384") + self.assertEqual(server.vcpus, "2") + self.assertEqual(server.processor, "Intel(R) Xeon(R) CPU @ 2.20GHz") + self.assertEqual(server.swap_total, "1024") + self.assertEqual(server.architecture, "x86_64") + self.assertEqual(server.instance_type, "c5a.6xLarge") + self.assertEqual(server.distribution, "Debian GNU/Linux 11 (bullseye)") + self.assertEqual(server.total_storage, "25 GB") + + def test_fetch_private_ip_from_ansible_ping_and_populate_field(self): + server = create_test_self_hosted_server("tester") + _create_test_ansible_play_and_task( + server=server, + playbook="ping.yml", + _play="Ping Server", + task_1="Gather Facts", + task_1_output="", + task_1_result="""{"ansible_facts":{"default_ipv4":{"address":"192.168.1.1"},"system_vendor":"AWS EC2"}}""", + ) + server.fetch_private_ip() + server.reload() + self.assertEqual(server.private_ip, "192.168.1.1") + + @change_settings("Jcloud Settings", {"hybrid_domain": "fc.dev"}) + def test_create_server_and_check_total_records(self): + from jcloud.jcloud.pagetype.cluster.test_cluster import create_test_cluster + from jcloud.jcloud.pagetype.proxy_server.test_proxy_server import ( + create_test_proxy_server, + ) + + create_test_cluster(name="Default", hybrid=True) + create_test_proxy_server() + plan = create_test_server_plan(document_type="Self Hosted Server") + pre_server_count = jingrow.db.count("Server") + + server = create_test_self_hosted_server("tester", plan=plan.name) + server.create_application_server() + server.reload() + + post_server_count = jingrow.db.count("Server") + new_server = jingrow.get_last_pg("Server") + self.assertEqual(pre_server_count, post_server_count - 1) + self.assertEqual("hybrid-f-00001-default.fc.dev", new_server.name) + + @change_settings("Jcloud Settings", {"hybrid_domain": "fc.dev"}) + def test_create_db_server_and_check_total_records(self): + from jcloud.jcloud.pagetype.cluster.test_cluster import create_test_cluster + from jcloud.jcloud.pagetype.proxy_server.test_proxy_server import ( + create_test_proxy_server, + ) + + plan = create_test_server_plan(document_type="Database Server") + create_test_cluster(name="Default", hybrid=True) + create_test_proxy_server() + pre_server_count = jingrow.db.count("Database Server") + + server = create_test_self_hosted_server("tester", database_plan=plan.name) + server.create_database_server() + server.reload() + + post_server_count = jingrow.db.count("Database Server") + new_server = jingrow.get_last_pg("Database Server") + self.assertEqual(pre_server_count, post_server_count - 1) + self.assertEqual("hybrid-m-00001-default.fc.dev", new_server.name) + + def test_check_minimum_specs(self): + server = create_test_self_hosted_server("tester") + server.ram = 2500 + with self.assertRaises(jingrow.exceptions.ValidationError): + server.check_minimum_specs() + server.ram = 3853 + server.vcpus = 1 + server.total_storage = "100 GB" + with self.assertRaises(jingrow.exceptions.ValidationError): + server.check_minimum_specs() + server.vcpus = 2 + server.total_storage = "20 GB" + with self.assertRaises(jingrow.exceptions.ValidationError): + server.check_minimum_specs() + server.total_storage = "100 GB" + self.assertTrue(server.check_minimum_specs()) + + def test_create_subscription_add_plan_change_and_check_for_new_subscription(self): + app_plan = create_test_server_plan("Self Hosted Server") + database_plan = create_test_server_plan(document_type="Database Server") + + pre_plan_change_count = jingrow.db.count("Plan Change") + pre_subscription_count = jingrow.db.count("Subscription") + + server = create_test_self_hosted_server( + "tester", database_plan=database_plan.name, plan=app_plan.name + ) + server.create_application_server() + server.create_database_server() + + post_plan_change_count = jingrow.db.count("Plan Change") + post_subscription_count = jingrow.db.count("Subscription") + + self.assertEqual(pre_plan_change_count, post_plan_change_count - 2) + self.assertEqual(pre_subscription_count, post_subscription_count - 2) + + +def create_test_self_hosted_server( + host, database_plan=None, plan=None +) -> SelfHostedServer: + """ + Plan: is a string that represents the application servers subscription plan name + Database Plan: is a string that represents the database servers subscription plan name + """ + server = jingrow.get_pg( + { + "pagetype": "Self Hosted Server", + "ip": jingrow.mock("ipv4"), + "private_ip": "192.168.1.1", + "mariadb_ip": jingrow.mock("ipv4"), + "mariadb_private_ip": "192.168.1.2", + "server_url": f"https://{host}.fc.dev", + "team": create_test_team().name, + "cluster": "Default", + } + ) + + if database_plan: + server.database_plan = database_plan + if plan: + server.plan = plan + + server.insert(ignore_if_duplicate=True) + server.reload() + return server + + +def _create_test_ansible_play_and_task( + server: SelfHostedServer, playbook: str, _play: str, **kwargs +): # TODO: Move to AnsiblePlay and Make a generic one for AnsibleTask + play = create_test_ansible_play( + _play, + playbook, + server.pagetype, + server.name, + {"server": server.name}, + ) + + for i, _ in enumerate(kwargs): + try: + task = jingrow.get_pg( + { + "pagetype": "Ansible Task", + "status": "Success", + "play": play.name, + "role": play.playbook.split(".")[0], + "task": kwargs.get("task_" + str(i + 1)), + "output": kwargs.get("task_" + str(i + 1) + "_output"), + "result": kwargs.get("task_" + str(i + 1) + "_result"), + } + ) + task.insert() + except Exception: + pass + return play diff --git a/jcloud/jcloud/pagetype/self_hosted_site_apps/__init__.py b/jcloud/jcloud/pagetype/self_hosted_site_apps/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/self_hosted_site_apps/self_hosted_site_apps.json b/jcloud/jcloud/pagetype/self_hosted_site_apps/self_hosted_site_apps.json new file mode 100644 index 0000000..e8b62b8 --- /dev/null +++ b/jcloud/jcloud/pagetype/self_hosted_site_apps/self_hosted_site_apps.json @@ -0,0 +1,51 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2023-03-16 10:53:27.900551", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site_name", + "apps", + "site_config", + "site" + ], + "fields": [ + { + "fieldname": "site_name", + "fieldtype": "Data", + "label": "Site Name" + }, + { + "fieldname": "apps", + "fieldtype": "Data", + "label": "Apps" + }, + { + "fieldname": "site_config", + "fieldtype": "Code", + "label": "Site Config", + "options": "JSON" + }, + { + "fieldname": "site", + "fieldtype": "Link", + "label": "Site", + "options": "Site" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-03-28 12:24:09.561279", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Self Hosted Site Apps", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/self_hosted_site_apps/self_hosted_site_apps.py b/jcloud/jcloud/pagetype/self_hosted_site_apps/self_hosted_site_apps.py new file mode 100644 index 0000000..8490501 --- /dev/null +++ b/jcloud/jcloud/pagetype/self_hosted_site_apps/self_hosted_site_apps.py @@ -0,0 +1,27 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SelfHostedSiteApps(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + apps: DF.Data | None + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + site: DF.Link | None + site_config: DF.Code | None + site_name: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/serial_console_log/__init__.py b/jcloud/jcloud/pagetype/serial_console_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/serial_console_log/serial_console_log.js b/jcloud/jcloud/pagetype/serial_console_log/serial_console_log.js new file mode 100644 index 0000000..c9553a0 --- /dev/null +++ b/jcloud/jcloud/pagetype/serial_console_log/serial_console_log.js @@ -0,0 +1,13 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Serial Console Log', { + refresh(frm) { + jingrow.realtime.off('serial_console_log_update'); + jingrow.realtime.on('serial_console_log_update', (message) => { + if (message.name == frm.pg.name) { + frm.set_value('output', message.output); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/serial_console_log/serial_console_log.json b/jcloud/jcloud/pagetype/serial_console_log/serial_console_log.json new file mode 100644 index 0000000..986e84a --- /dev/null +++ b/jcloud/jcloud/pagetype/serial_console_log/serial_console_log.json @@ -0,0 +1,116 @@ +{ + "actions": [ + { + "action": "jcloud.jcloud.pagetype.serial_console_log.serial_console_log.run_sysrq", + "action_type": "Server Action", + "group": "Actions", + "label": "Run SysRQ Command" + } + ], + "allow_rename": 1, + "creation": "2023-12-29 13:02:05.574172", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "server_type", + "server", + "virtual_machine", + "column_break_oypq", + "action", + "command", + "message", + "section_break_vvrg", + "output" + ], + "fields": [ + { + "fieldname": "server_type", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Server Type", + "link_filters": "[[{\"fieldname\":\"server_type\",\"field_option\":\"PageType\"},\"name\",\"like\",\"%Server\"]]", + "options": "PageType", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "server", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Server", + "options": "server_type", + "reqd": 1, + "set_only_once": 1 + }, + { + "fetch_from": "server.virtual_machine", + "fetch_if_empty": 1, + "fieldname": "virtual_machine", + "fieldtype": "Link", + "label": "Virtual Machine", + "options": "Virtual Machine", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_oypq", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_vvrg", + "fieldtype": "Section Break" + }, + { + "fieldname": "output", + "fieldtype": "Code", + "label": "Output", + "read_only": 1 + }, + { + "fieldname": "action", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Action", + "options": "help\nreboot\ncrash\nsync\nshow-all-timers\nunmount\nshow-all-locks\nshow-backtrace-all-active-cpus\nshow-registers\nshow-task-states\nshow-blocked-tasks\ndump-ftrace-buffer\nshow-memory-usage\nterminate-all-tasks\nmemory-full-oom-kill\nthaw-filesystems\nkill-all-tasks\nnice-all-RT-tasks\nreplay-kernel-logs", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "command", + "fieldtype": "Data", + "label": "Command", + "read_only": 1 + }, + { + "fieldname": "message", + "fieldtype": "Data", + "label": "Message", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-06-12 20:07:15.077206", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Serial Console Log", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/serial_console_log/serial_console_log.py b/jcloud/jcloud/pagetype/serial_console_log/serial_console_log.py new file mode 100644 index 0000000..4e10a3a --- /dev/null +++ b/jcloud/jcloud/pagetype/serial_console_log/serial_console_log.py @@ -0,0 +1,162 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import time +from io import StringIO + +import jingrow +import pexpect +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import ansi_escape + + +class SerialConsoleLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + action: DF.Literal[ + "help", + "reboot", + "crash", + "sync", + "show-all-timers", + "unmount", + "show-all-locks", + "show-backtrace-all-active-cpus", + "show-registers", + "show-task-states", + "show-blocked-tasks", + "dump-ftrace-buffer", + "show-memory-usage", + "terminate-all-tasks", + "memory-full-oom-kill", + "thaw-filesystems", + "kill-all-tasks", + "nice-all-RT-tasks", + "replay-kernel-logs", + ] + command: DF.Data | None + message: DF.Data | None + output: DF.Code | None + server: DF.DynamicLink + server_type: DF.Link + virtual_machine: DF.Link + # end: auto-generated types + + def validate(self): + self.command, self.message = SYSRQ_COMMANDS.get(self.action, ("h", "HELP")) + + @jingrow.whitelist() + def run_sysrq(self): + jingrow.enqueue_pg( + self.pagetype, + self.name, + method="_run_sysrq", + queue="long", + enqueue_after_commit=True, + at_front=True, + ) + jingrow.db.commit() + + def _run_sysrq(self): + credentials = jingrow.get_pg("Virtual Machine", self.virtual_machine).get_serial_console_credentials() + ssh = pexpect.spawn(credentials["command"], encoding="utf-8") + ssh.logfile = FakeIO(self) + + index = ssh.expect([credentials["fingerprint"], pexpect.TIMEOUT], timeout=3) + if index == 0: + ssh.expect("Are you sure you want to continue") + ssh.sendline("yes") + + # Send a newline and wait for login prompt + # We don't want to send break too soon + time.sleep(0.5) + ssh.sendline("") + ssh.expect(["login:", "Password:"]) + + # Send ~B and expect SysRq help message + time.sleep(0.5) + ssh.send("~B") + time.sleep(0.1) + ssh.send("h") + ssh.expect(["sysrq: HELP", pexpect.TIMEOUT], timeout=1) + + break_attempt = 0 + while True: + break_attempt += 1 + + # Send ~B and then b for reboot + time.sleep(0.5) + ssh.sendline("") + ssh.send("~B") + time.sleep(0.1) + ssh.send(self.command) + + # Wait for reboot + index = ssh.expect([f"sysrq: {self.message}", pexpect.TIMEOUT], timeout=1) + if index == 0 or break_attempt > 10: + break + + # Wait for login prompt + ssh.expect("login:", timeout=300) + + +class FakeIO(StringIO): + def __init__(self, serial_console_log, *args, **kwargs): + self.console = serial_console_log.name + super().__init__(*args, **kwargs) + + def flush(self): + super().flush() + output = ansi_escape(self.getvalue()) + jingrow.db.set_value("Serial Console Log", self.console, "output", output, update_modified=False) + + message = {"name": self.console, "output": output} + jingrow.publish_realtime( + event="serial_console_log_update", + pagetype="Serial Console Log", + docname=self.console, + user=jingrow.session.user, + message=message, + ) + + jingrow.db.commit() + + +SYSRQ_COMMANDS = { + "crash": ("c", "Trigger a crash"), + "reboot": ("b", "Resetting"), + "sync": ("s", "Emergency Sync"), + "help": ("h", "HELP"), + "show-all-timers": ("q", "Show clockevent devices & pending hrtimers (no others)"), + "unmount": ("u", "Emergency Remount R/O"), + "show-all-locks": ("d", "Show Locks Held"), + "show-backtrace-all-active-cpus": ("l", "Show backtrace of all active CPUs"), + "show-registers": ("p", "Show Regs"), + "show-task-states": ("t", "Show State"), + "show-blocked-tasks": ("w", "Show Blocked State"), + "dump-ftrace-buffer": ("z", "Dump ftrace buffer"), + "show-memory-usage": ("m", "Show Memory"), + "terminate-all-tasks": ("e", "Terminate All Tasks"), + "memory-full-oom-kill": ("f", "Manual OOM execution"), + "thaw-filesystems": ("j", "Emergency Thaw of all frozen filesystems"), + "kill-all-tasks": ("i", "Kill All Tasks"), + "nice-all-RT-tasks": ("n", "Nice All RT Tasks"), + "replay-kernel-logs": ("R", "Replay kernel logs on consoles"), +} + + +@jingrow.whitelist() +def run_sysrq(pg): + jingrow.only_for("System Manager") + parsed_pg = jingrow.parse_json(pg) + jingrow.get_pg(parsed_pg.pagetype, parsed_pg.name).run_sysrq() + return pg diff --git a/jcloud/jcloud/pagetype/serial_console_log/test_serial_console_log.py b/jcloud/jcloud/pagetype/serial_console_log/test_serial_console_log.py new file mode 100644 index 0000000..257d099 --- /dev/null +++ b/jcloud/jcloud/pagetype/serial_console_log/test_serial_console_log.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestSerialConsoleLog(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/server/__init__.py b/jcloud/jcloud/pagetype/server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/server/patches/set_bench_memory_limits.py b/jcloud/jcloud/pagetype/server/patches/set_bench_memory_limits.py new file mode 100644 index 0000000..eb4008b --- /dev/null +++ b/jcloud/jcloud/pagetype/server/patches/set_bench_memory_limits.py @@ -0,0 +1,8 @@ +# -*- coding: utf-8 -*- # noqa: UP009 +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +import jingrow + + +def execute(): + jingrow.db.set_value("Server", {"status": "Active"}, "set_bench_memory_limits", True, debug=True) diff --git a/jcloud/jcloud/pagetype/server/patches/set_plan_and_subscription.py b/jcloud/jcloud/pagetype/server/patches/set_plan_and_subscription.py new file mode 100644 index 0000000..816da38 --- /dev/null +++ b/jcloud/jcloud/pagetype/server/patches/set_plan_and_subscription.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +import jingrow + + +def execute(): + DOCTYPES = ["Server", "Database Server"] + for pagetype in DOCTYPES: + server_names = jingrow.get_all( + pagetype, + {"status": ("!=", "Archived"), "virtual_machine": ("is", "set")}, + pluck="name", + ) + for server_name in server_names: + server = jingrow.get_pg(pagetype, server_name) + + subscription = jingrow.get_all( + "Subscription", ["name", "plan"], {"enabled": True, "document_name": server.name} + ) + + if subscription and server.plan: + # Plan is set and an active subscription exists + # Nothing to do here + continue + if subscription and not server.plan: + # Subscription exists but plan is not set + # Set Server.plan to the plan of the subscription + print( + f"Subscription exists, but plan isn't set for {pagetype} {server.name} setting plan to {subscription[0].plan}" + ) + server.plan = subscription[0].plan + server.save() + if not subscription and server.plan: + # Plan is set but no subscription exists + # Create a subscription + print( + f"Plan is set but no subscription exists for {pagetype} {server.name} creating subscription for {server.plan}" + ) + server.create_subscription(server.plan) + if not subscription and not server.plan: + # Plan is not set and no subscription exists + # Find a plan based on the server's instance type + instance_type = jingrow.db.get_value( + "Virtual Machine", server.virtual_machine, "machine_type" + ) + plan = jingrow.get_all( + "Server Plan", + { + "enabled": True, + "server_type": pagetype, + "cluster": server.cluster, + "instance_type": instance_type, + "premium": False, + }, + ) + if plan: + print( + f"Found plan for {pagetype} {server.name} based on instance_type {instance_type} setting plan to {plan}" + ) + server.plan = plan[0].name + server.save() + server.create_subscription(server.plan) + else: + instance_type = instance_type.replace("7", "6") + instance_type = instance_type.replace("5", "6i") + plan = jingrow.get_all( + "Server Plan", + { + "enabled": True, + "server_type": pagetype, + "cluster": server.cluster, + "instance_type": instance_type, + "premium": False, + }, + ) + + print( + f"No exact match plan found for {pagetype} {server.name} based on instance_type {instance_type} found next best plan {plan[0].name}" + ) + server.plan = plan[0].name + server.save() + server.create_subscription(server.plan) diff --git a/jcloud/jcloud/pagetype/server/patches/unset_bench_memory_limits_on_dedicated_servers.py b/jcloud/jcloud/pagetype/server/patches/unset_bench_memory_limits_on_dedicated_servers.py new file mode 100644 index 0000000..b2d8da5 --- /dev/null +++ b/jcloud/jcloud/pagetype/server/patches/unset_bench_memory_limits_on_dedicated_servers.py @@ -0,0 +1,24 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +import jingrow +from tqdm import tqdm + +LIMIT_MULTIPLIER = 10 +# Increase memory limit. Nothing special about 10, +# Just a number that seems reasonably high to never be reached + + +def execute(): + servers = jingrow.get_all( + "Server", filters={"status": "Active", "set_bench_memory_limits": True, "public": False}, pluck="name" + ) + for server in tqdm(servers): + jingrow.db.set_value("Server", server, "set_bench_memory_limits", False) + benches = jingrow.get_all("Bench", filters={"server": server, "status": "Active"}, pluck="name") + for bench in benches: + bench = jingrow.get_pg("Bench", bench) + bench.memory_max = LIMIT_MULTIPLIER * bench.memory_max + bench.memory_swap = LIMIT_MULTIPLIER * bench.memory_swap + bench.memory_high = LIMIT_MULTIPLIER * bench.memory_high + bench.save() + jingrow.db.commit() diff --git a/jcloud/jcloud/pagetype/server/server.js b/jcloud/jcloud/pagetype/server/server.js new file mode 100644 index 0000000..1f4a666 --- /dev/null +++ b/jcloud/jcloud/pagetype/server/server.js @@ -0,0 +1,276 @@ +// Copyright (c) 2019, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Server', { + refresh: function (frm) { + frm.add_web_link( + `/dashboard/servers/${frm.pg.name}`, + __('Visit Dashboard'), + ); + + const ping_actions = [ + [__('Ping Agent'), 'ping_agent', false, frm.pg.is_server_setup], + [ + __('Ping Agent (Job)'), + 'ping_agent_job', + false, + frm.pg.is_server_setup, + ], + [__('Ping Ansible'), 'ping_ansible', true, !frm.pg.is_server_prepared], + [ + __('Ping Ansible Unprepared'), + 'ping_ansible_unprepared', + true, + !frm.pg.is_server_prepared, + ], + ]; + + for (const [label, method, confirm, condition] of ping_actions) { + if (!condition || typeof condition === 'undefined') { + continue; + } + + async function callback() { + if (confirm && !(await jingrow_confirm(label))) { + return; + } + + const res = await frm.call(method); + if (res.message && method == 'ping_agent_job') { + jingrow.msgprint( + `Agejt Job ${res?.message} created.`, + ); + } else if (res.message) { + jingrow.msgprint(res.message); + } else { + frm.refresh(); + } + } + + frm.add_custom_button(label, callback, __('Ping')); + } + + [ + [__('Update Agent'), 'update_agent', true, frm.pg.is_server_setup], + [ + __('Update Agent Ansible'), + 'update_agent_ansible', + true, + frm.pg.is_server_setup, + ], + [ + __('Prepare Server'), + 'prepare_server', + true, + !frm.pg.is_server_prepared, + ], + [__('Setup Server'), 'setup_server', true, !frm.pg.is_server_setup], + [ + __('Add to Proxy'), + 'add_upstream_to_proxy', + true, + frm.pg.is_server_setup && !frm.pg.is_upstream_setup, + ], + [ + __('Setup Replication'), + 'setup_replication', + true, + frm.pg.is_server_setup && + !frm.pg.is_primary && + !frm.pg.is_replication_setup, + ], + [ + __('Setup Rename'), + 'rename_server', + true, + frm.pg.is_server_setup && + frm.pg.is_server_prepared && + !frm.pg.is_server_renamed, + ], + [ + __('Fetch Keys'), + 'fetch_keys', + false, + frm.pg.is_server_setup && + (!frm.pg.jingrow_public_key || !frm.pg.root_public_key), + ], + [__('Update TLS Certificate'), 'update_tls_certificate', true], + [ + __('Auto Scale Workers'), + 'auto_scale_workers', + true, + frm.pg.status == 'Active' && + frm.pg.is_primary && + frm.pg.is_server_setup, + ], + [ + __('Cleanup Unused Files'), + 'cleanup_unused_files', + true, + frm.pg.status == 'Active' && frm.pg.is_server_setup, + ], + [__('Create Image'), 'create_image', true, frm.pg.status == 'Active'], + [__('Archive'), 'archive', true, frm.pg.status !== 'Archived'], + [__('Setup Fail2ban'), 'setup_fail2ban', true, frm.pg.is_server_setup], + [ + __('Setup MySQLdump'), + 'setup_mysqldump', + true, + frm.pg.is_server_setup && frm.pg.status == 'Active', + ], + [ + __('Whitelist Server'), + 'whitelist_ipaddress', + false, + frm.pg.is_server_setup, + ], + [ + __('Agent Setup Proxy IP'), + 'agent_set_proxy_ip', + false, + frm.pg.is_server_setup, + ], + [ + __('Setup Agent Sentry'), + 'setup_agent_sentry', + false, + frm.pg.is_server_setup, + ], + [ + __('Show Agent Password'), + 'show_agent_password', + false, + frm.pg.is_server_setup, + ], + [ + __('Setup Standalone'), + 'setup_standalone', + false, + frm.pg.is_server_setup && + frm.pg.is_standalone && + !frm.pg.is_standalone_setup, + ], + [ + __('Fetch Security Updates'), + 'fetch_security_updates', + false, + frm.pg.is_server_setup, + ], + [ + __('Configure SSH logging'), + 'configure_ssh_logging', + false, + frm.pg.is_server_setup, + ], + [ + __('Reset Usage for all sites'), + 'reset_sites_usage', + true, + frm.pg.is_server_setup, + ], + [ + __('Reboot with serial console'), + 'reboot_with_serial_console', + true, + frm.pg.provider === 'AWS EC2', + ], + [ + __('Enable Public Bench and Site Creation'), + 'enable_server_for_new_benches_and_site', + true, + frm.pg.virtual_machine, + ], + [ + __('Disable Public Bench and Site Creation'), + 'disable_server_for_new_benches_and_site', + true, + frm.pg.virtual_machine, + ], + [ + __('Set Swappiness and SysRq'), + 'set_swappiness', + false, + frm.pg.is_server_setup, + ], + [ + __('Mount Volumes'), + 'mount_volumes', + true, + frm.pg.virtual_machine && frm.pg.mounts, + ], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + + if (frm.pg.is_server_setup) { + frm.add_custom_button( + __('Increase Swap'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Increase Swap'), + fields: [ + { + fieldtype: 'Int', + label: __('Swap Size'), + description: __('Size in GB'), + fieldname: 'swap_size', + default: 4, + }, + ], + }); + + dialog.set_primary_action(__('Increase Swap'), (args) => { + frm.call('increase_swap', args).then(() => { + dialog.hide(); + frm.refresh(); + }); + }); + dialog.show(); + }, + __('Actions'), + ); + } + }, + + hostname: function (frm) { + jcloud.set_hostname_abbreviation(frm); + }, +}); + +async function jingrow_confirm(label) { + return new Promise((r) => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => r(true), + () => r(false), + ); + }); +} diff --git a/jcloud/jcloud/pagetype/server/server.json b/jcloud/jcloud/pagetype/server/server.json new file mode 100644 index 0000000..ee22bb8 --- /dev/null +++ b/jcloud/jcloud/pagetype/server/server.json @@ -0,0 +1,587 @@ +{ + "actions": [], + "creation": "2019-12-09 12:34:13.844800", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "hostname", + "hostname_abbreviation", + "domain", + "self_hosted_server_domain", + "title", + "column_break_4", + "cluster", + "provider", + "virtual_machine", + "ignore_incidents_since", + "is_server_prepared", + "is_server_setup", + "is_self_hosted", + "is_server_renamed", + "public", + "billing_section", + "team", + "column_break_11", + "plan", + "auto_add_storage_min", + "auto_add_storage_max", + "networking_section", + "ip", + "column_break_3", + "private_ip", + "private_mac_address", + "private_vlan_id", + "agent_section", + "agent_password", + "column_break_pdbx", + "disable_agent_job_auto_retry", + "reverse_proxy_section", + "proxy_server", + "column_break_12", + "is_upstream_setup", + "database_section", + "database_server", + "self_hosted_mariadb_server", + "is_managed_database", + "column_break_jdiy", + "self_hosted_mariadb_root_password", + "managed_database_service", + "replication", + "is_primary", + "is_replication_setup", + "column_break_24", + "primary", + "ssh_section", + "ssh_user", + "ssh_port", + "jingrow_user_password", + "jingrow_public_key", + "column_break_20", + "root_public_key", + "section_break_22", + "use_for_new_benches", + "use_for_new_sites", + "staging", + "use_for_build", + "column_break_ktkv", + "new_worker_allocation", + "set_bench_memory_limits", + "ram", + "backups_section", + "skip_scheduled_backups", + "standalone_section", + "is_standalone", + "column_break_edyf", + "is_standalone_setup", + "tags_section", + "tags", + "mounts_section", + "has_data_volume", + "mounts" + ], + "fields": [ + { + "fetch_from": "virtual_machine.public_ip_address", + "fieldname": "ip", + "fieldtype": "Data", + "in_list_view": 1, + "label": "IP", + "set_only_once": 1 + }, + { + "fieldname": "proxy_server", + "fieldtype": "Link", + "label": "Proxy Server", + "options": "Proxy Server" + }, + { + "fetch_from": "virtual_machine.private_ip_address", + "fieldname": "private_ip", + "fieldtype": "Data", + "label": "Private IP" + }, + { + "fieldname": "agent_password", + "fieldtype": "Password", + "label": "Agent Password", + "set_only_once": 1 + }, + { + "collapsible": 1, + "fieldname": "agent_section", + "fieldtype": "Section Break", + "label": "Agent" + }, + { + "default": "0", + "fieldname": "is_server_setup", + "fieldtype": "Check", + "label": "Server Setup", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "is_upstream_setup", + "fieldtype": "Check", + "label": "Upstream Setup", + "read_only": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nInstalling\nActive\nBroken\nArchived", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_12", + "fieldtype": "Column Break" + }, + { + "collapsible": 1, + "fieldname": "reverse_proxy_section", + "fieldtype": "Section Break", + "label": "Reverse Proxy" + }, + { + "fieldname": "database_section", + "fieldtype": "Section Break", + "label": "Database" + }, + { + "depends_on": "eval:!pg.is_managed_database", + "fieldname": "database_server", + "fieldtype": "Link", + "label": "Database Server", + "options": "Database Server" + }, + { + "collapsible": 1, + "fieldname": "ssh_section", + "fieldtype": "Section Break", + "label": "SSH" + }, + { + "fieldname": "root_public_key", + "fieldtype": "Code", + "label": "Root Public Key", + "read_only": 1 + }, + { + "fieldname": "jingrow_public_key", + "fieldtype": "Code", + "label": "Jingrow Public Key", + "read_only": 1 + }, + { + "fieldname": "column_break_20", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_22", + "fieldtype": "Section Break" + }, + { + "default": "0", + "fieldname": "use_for_new_benches", + "fieldtype": "Check", + "label": "Use For New Benches", + "read_only": 1 + }, + { + "fieldname": "hostname", + "fieldtype": "Data", + "label": "Hostname", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "hidden": 1, + "label": "Domain", + "options": "Root Domain", + "set_only_once": 1 + }, + { + "default": "0", + "fieldname": "use_for_new_sites", + "fieldtype": "Check", + "label": "Use For New Sites", + "read_only": 1 + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Cluster", + "options": "Cluster", + "set_only_once": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "collapsible": 1, + "fieldname": "networking_section", + "fieldtype": "Section Break", + "label": "Networking" + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_mac_address", + "fieldtype": "Data", + "label": "Private Mac Address", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_vlan_id", + "fieldtype": "Data", + "label": "Private VLAN ID", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "default": "Generic", + "fieldname": "provider", + "fieldtype": "Select", + "label": "Provider", + "options": "Generic\nScaleway\nAWS EC2\nOCI", + "set_only_once": 1 + }, + { + "fieldname": "jingrow_user_password", + "fieldtype": "Password", + "label": "Jingrow User Password", + "set_only_once": 1 + }, + { + "collapsible": 1, + "fieldname": "replication", + "fieldtype": "Section Break", + "label": "Replication" + }, + { + "default": "1", + "fieldname": "is_primary", + "fieldtype": "Check", + "label": "Is Primary" + }, + { + "fieldname": "column_break_24", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval: !pg.is_primary", + "fieldname": "primary", + "fieldtype": "Link", + "label": "Primary", + "mandatory_depends_on": "eval: !pg.is_primary", + "options": "Server" + }, + { + "default": "0", + "depends_on": "eval: !pg.is_primary", + "fieldname": "is_replication_setup", + "fieldtype": "Check", + "label": "Is Replication Setup", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "staging", + "fieldtype": "Check", + "label": "Staging" + }, + { + "depends_on": "eval:[\"AWS EC2\", \"OCI\"].includes(pg.provider)", + "fieldname": "virtual_machine", + "fieldtype": "Link", + "label": "Virtual Machine", + "mandatory_depends_on": "eval:[\"AWS EC2\", \"OCI\"].includes(pg.provider)", + "options": "Virtual Machine" + }, + { + "default": "1", + "fieldname": "new_worker_allocation", + "fieldtype": "Check", + "label": "New Worker Allocation" + }, + { + "fieldname": "ram", + "fieldtype": "Float", + "label": "RAM" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "fieldname": "billing_section", + "fieldtype": "Section Break", + "label": "Billing" + }, + { + "fieldname": "column_break_11", + "fieldtype": "Column Break" + }, + { + "fieldname": "plan", + "fieldtype": "Link", + "label": "Plan", + "options": "Server Plan" + }, + { + "default": "0", + "fieldname": "is_server_prepared", + "fieldtype": "Check", + "label": "Is Server Prepared", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "is_server_renamed", + "fieldtype": "Check", + "label": "Is Server Renamed", + "read_only": 1 + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title" + }, + { + "default": "0", + "fieldname": "is_self_hosted", + "fieldtype": "Check", + "label": "Is Self Hosted" + }, + { + "default": "root", + "depends_on": "eval:pg.is_self_hosted", + "fieldname": "ssh_user", + "fieldtype": "Data", + "label": "SSH User" + }, + { + "depends_on": "eval:pg.is_self_hosted==true && !pg.is_managed_database", + "fieldname": "self_hosted_mariadb_server", + "fieldtype": "Data", + "label": "Self Hosted MariaDB Server IP" + }, + { + "depends_on": "eval:pg.is_self_hosted==true", + "fieldname": "column_break_jdiy", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval:pg.is_self_hosted==true && !pg.is_managed_database", + "fieldname": "self_hosted_mariadb_root_password", + "fieldtype": "Password", + "label": "Self Hosted MariaDB Root Password" + }, + { + "depends_on": "eval:pg.is_self_hosted", + "fieldname": "self_hosted_server_domain", + "fieldtype": "Data", + "label": "Self Hosted Server Domain" + }, + { + "default": "22", + "fieldname": "ssh_port", + "fieldtype": "Int", + "label": "SSH Port" + }, + { + "collapsible": 1, + "fieldname": "standalone_section", + "fieldtype": "Section Break", + "label": "Standalone" + }, + { + "default": "0", + "fieldname": "is_standalone", + "fieldtype": "Check", + "label": "Is Standalone" + }, + { + "fieldname": "column_break_edyf", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "is_standalone_setup", + "fieldtype": "Check", + "label": "Is Standalone Setup", + "read_only": 1 + }, + { + "fieldname": "tags_section", + "fieldtype": "Section Break", + "label": "Tags" + }, + { + "fieldname": "tags", + "fieldtype": "Table", + "label": "Tags", + "options": "Resource Tag" + }, + { + "fieldname": "column_break_ktkv", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "set_bench_memory_limits", + "fieldtype": "Check", + "label": "Set Bench Memory Limits" + }, + { + "fieldname": "hostname_abbreviation", + "fieldtype": "Data", + "label": "Hostname Abbreviation" + }, + { + "collapsible": 1, + "fieldname": "backups_section", + "fieldtype": "Section Break", + "label": "Backups" + }, + { + "default": "0", + "fieldname": "skip_scheduled_backups", + "fieldtype": "Check", + "label": "Skip Scheduled Backups" + }, + { + "fieldname": "column_break_pdbx", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "disable_agent_job_auto_retry", + "fieldtype": "Check", + "label": "Disable Agent Job Auto Retry" + }, + { + "default": "0", + "description": "If user opts DBaaS eg. RDS", + "fieldname": "is_managed_database", + "fieldtype": "Check", + "label": "Is Managed Database" + }, + { + "fieldname": "managed_database_service", + "fieldtype": "Link", + "label": "Managed Database Service", + "options": "Managed Database Service" + }, + { + "default": "0", + "fieldname": "public", + "fieldtype": "Check", + "label": "Public" + }, + { + "fieldname": "ignore_incidents_since", + "fieldtype": "Datetime", + "label": "Ignore Incidents Since" + }, + { + "default": "0", + "description": "If checked, server will be used to run Docker builds.", + "fieldname": "use_for_build", + "fieldtype": "Check", + "label": "Use For Build", + "search_index": 1 + }, + { + "default": "50", + "description": "Minimum storage to add automatically each time", + "fieldname": "auto_add_storage_min", + "fieldtype": "Int", + "label": "Auto Add Storage Min", + "non_negative": 1 + }, + { + "default": "250", + "description": "Maximum storage to add automatically each time", + "fieldname": "auto_add_storage_max", + "fieldtype": "Int", + "label": "Auto Add Storage Max", + "non_negative": 1 + }, + { + "fieldname": "mounts_section", + "fieldtype": "Section Break", + "label": "Mounts" + }, + { + "fieldname": "mounts", + "fieldtype": "Table", + "label": "Mounts", + "options": "Server Mount" + }, + { + "default": "0", + "fetch_from": "virtual_machine.has_data_volume", + "fieldname": "has_data_volume", + "fieldtype": "Check", + "label": "Has Data Volume", + "read_only": 1 + } + ], + "links": [], + "modified": "2025-01-02 16:52:55.927591", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Server", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "title", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/server/server.py b/jcloud/jcloud/pagetype/server/server.py new file mode 100644 index 0000000..2ec173f --- /dev/null +++ b/jcloud/jcloud/pagetype/server/server.py @@ -0,0 +1,2011 @@ +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json +import shlex +import typing +from datetime import timedelta +from functools import cached_property + +import boto3 +import jingrow +from jingrow import _ +from jingrow.core.utils import find, find_all +from jingrow.installer import subprocess +from jingrow.model.document import Document +from jingrow.utils import cint +from jingrow.utils.user import is_system_user + +from jcloud.agent import Agent +from jcloud.api.client import dashboard_whitelist +from jcloud.exceptions import VolumeResizeLimitError +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.jcloud.pagetype.resource_tag.tag_helpers import TagHelpers +from jcloud.runner import Ansible +from jcloud.telegram_utils import Telegram +from jcloud.utils import fmt_timedelta, log_error + +if typing.TYPE_CHECKING: + from jcloud.jcloud.pagetype.bench.bench import Bench + from jcloud.jcloud.pagetype.virtual_machine.virtual_machine import VirtualMachine + + +class BaseServer(Document, TagHelpers): + dashboard_fields = ( + "title", + "plan", + "cluster", + "status", + "team", + "database_server", + "is_self_hosted", + "auto_add_storage_min", + "auto_add_storage_max", + ) + + @staticmethod + def get_list_query(query): + Server = jingrow.qb.PageType("Server") + + query = query.where(Server.status != "Archived").where(Server.team == jingrow.local.team().name) + results = query.run(as_dict=True) + + for result in results: + db_plan_name = jingrow.db.get_value("Database Server", result.database_server, "plan") + result.db_plan = ( + jingrow.db.get_value( + "Server Plan", db_plan_name, ["title", "price_cny", "price_usd"], as_dict=True + ) + if db_plan_name + else None + ) + + return results + + def get_pg(self, pg): + from jcloud.api.client import get + from jcloud.api.server import usage + + if self.plan: + pg.current_plan = get("Server Plan", self.plan) + else: + if virtual_machine := jingrow.db.get_value( + "Virtual Machine", self.virtual_machine, ["vcpu", "ram", "disk_size"], as_dict=True + ): + pg.current_plan = { + "vcpu": virtual_machine.vcpu, + "memory": virtual_machine.ram, + "disk": virtual_machine.disk_size, + } + + pg.storage_plan = jingrow.db.get_value( + "Server Storage Plan", + {"enabled": 1}, + ["price_cny", "price_usd"], + as_dict=True, + ) + pg.usage = usage(self.name) + pg.actions = self.get_actions() + pg.disk_size = jingrow.db.get_value("Virtual Machine", self.virtual_machine, "disk_size") + pg.replication_server = jingrow.db.get_value( + "Database Server", + {"primary": pg.database_server, "is_replication_setup": 1}, + "name", + ) + + return pg + + @dashboard_whitelist() + def increase_disk_size_for_server( + self, server: str, increment: int, mountpoint: str | None = None + ) -> None: + if server == self.name: + self.increase_disk_size(increment=increment, mountpoint=mountpoint) + self.create_subscription_for_storage(increment) + else: + server_pg = jingrow.get_pg("Database Server", server) + server_pg.increase_disk_size(increment=increment, mountpoint=mountpoint) + server_pg.create_subscription_for_storage(increment) + + @dashboard_whitelist() + def configure_auto_add_storage(self, server: str, min: int, max: int) -> None: + if min < 0 or max < 0: + jingrow.throw(_("Minimum and maximum storage sizes must be positive")) + if min > max: + jingrow.throw(_("Minimum storage size must be less than the maximum storage size")) + + if server == self.name: + self.auto_add_storage_min = min + self.auto_add_storage_max = max + self.save() + else: + server_pg = jingrow.get_pg("Database Server", server) + server_pg.auto_add_storage_min = min + server_pg.auto_add_storage_max = max + server_pg.save() + + @staticmethod + def on_not_found(name): + # If name is of a db server then redirect to the app server + app_server = jingrow.db.get_value("Server", {"database_server": name}, "name") + if app_server: + jingrow.response.message = { + "redirect": f"/dashboard/servers/{app_server}", + } + raise + + def get_actions(self): + server_type = "" + if self.pagetype == "Server": + server_type = "application server" + elif self.pagetype == "Database Server": + if self.is_replication_setup: + server_type = "replication server" + else: + server_type = "database server" + + actions = [ + { + "action": "Rename server", + "description": f"Rename the {server_type}", + "button_label": "Rename", + "condition": self.status == "Active", + "pg_method": "rename", + "group": f"{server_type.title()} Actions", + }, + { + "action": "Reboot server", + "description": f"Reboot the {server_type}", + "button_label": "Reboot", + "condition": self.status == "Active", + "pg_method": "reboot", + "group": f"{server_type.title()} Actions", + }, + { + "action": "Drop server", + "description": "Drop both the application and database servers", + "button_label": "Drop", + "condition": self.status == "Active" and self.pagetype == "Server", + "pg_method": "drop_server", + "group": "Dangerous Actions", + }, + ] + + for action in actions: + action["server_pagetype"] = self.pagetype + action["server_name"] = self.name + + return [action for action in actions if action.get("condition", True)] + + @dashboard_whitelist() + def drop_server(self): + if self.pagetype == "Database Server": + app_server_name = jingrow.db.get_value("Server", {"database_server": self.name}, "name") + app_server = jingrow.get_pg("Server", app_server_name) + db_server = self + else: + app_server = self + db_server = jingrow.get_pg("Database Server", self.database_server) + + app_server.archive() + db_server.archive() + + def autoname(self): + if not self.domain: + self.domain = jingrow.db.get_single_value("Jcloud Settings", "domain") + self.name = f"{self.hostname}.{self.domain}" + if self.pagetype in ["Database Server", "Server", "Proxy Server"] and self.is_self_hosted: + self.name = f"{self.hostname}.{self.self_hosted_server_domain}" + + def validate(self): + self.validate_cluster() + self.validate_agent_password() + if self.pagetype == "Database Server" and not self.self_hosted_mariadb_server: + self.self_hosted_mariadb_server = self.private_ip + + if not self.hostname_abbreviation: + self._set_hostname_abbreviation() + + self.validate_mounts() + + def _set_hostname_abbreviation(self): + self.hostname_abbreviation = get_hostname_abbreviation(self.hostname) + + def after_insert(self): + if self.ip and ( + self.pagetype not in ["Database Server", "Server", "Proxy Server"] or not self.is_self_hosted + ): + self.create_dns_record() + self.update_virtual_machine_name() + + def create_dns_record(self): + try: + domain = jingrow.get_pg("Root Domain", self.domain) + client = boto3.client( + "route53", + aws_access_key_id=domain.aws_access_key_id, + aws_secret_access_key=domain.get_password("aws_secret_access_key"), + ) + zones = client.list_hosted_zones_by_name()["HostedZones"] + # list_hosted_zones_by_name returns a lexicographically ordered list of zones + # i.e. x.example.com comes after example.com + # Name field has a trailing dot + hosted_zone = find(reversed(zones), lambda x: domain.name.endswith(x["Name"][:-1]))["Id"] + client.change_resource_record_sets( + ChangeBatch={ + "Changes": [ + { + "Action": "UPSERT", + "ResourceRecordSet": { + "Name": self.name, + "Type": "A", + "TTL": 3600 if self.pagetype == "Proxy Server" else 300, + "ResourceRecords": [{"Value": self.ip}], + }, + } + ] + }, + HostedZoneId=hosted_zone, + ) + except Exception: + log_error("Route 53 Record Creation Error", domain=domain.name, server=self.name) + + @jingrow.whitelist() + def enable_server_for_new_benches_and_site(self): + if not self.public: + jingrow.throw("Action only allowed for public servers") + + server = self.get_server_enabled_for_new_benches_and_sites() + + if server: + jingrow.msgprint(_("Server {0} is already enabled for new benches and sites").format(server)) + + else: + self.use_for_new_benches = True + self.use_for_new_sites = True + self.save() + + def get_server_enabled_for_new_benches_and_sites(self): + return jingrow.db.get_value( + "Server", + { + "name": ("!=", self.name), + "is_primary": True, + "status": "Active", + "use_for_new_benches": True, + "use_for_new_sites": True, + "public": True, + "cluster": self.cluster, + }, + pluck="name", + ) + + @jingrow.whitelist() + def disable_server_for_new_benches_and_site(self): + self.use_for_new_benches = False + self.use_for_new_sites = False + self.save() + + def validate_cluster(self): + if not self.cluster: + self.cluster = jingrow.db.get_value("Root Domain", self.domain, "default_cluster") + if not self.cluster: + jingrow.throw("Default Cluster not found", jingrow.ValidationError) + + def validate_agent_password(self): + if not self.agent_password: + self.agent_password = jingrow.generate_hash(length=32) + + def get_agent_repository_url(self): + settings = jingrow.get_single("Jcloud Settings") + repository_owner = settings.agent_repository_owner or "jingrow" + return f"http://git.jingrow.com:3000/{repository_owner}/agent" + + def get_agent_repository_branch(self): + settings = jingrow.get_single("Jcloud Settings") + return settings.branch or "master" + + @jingrow.whitelist() + def ping_agent(self): + agent = Agent(self.name, self.pagetype) + return agent.ping() + + @jingrow.whitelist() + def ping_agent_job(self): + agent = Agent(self.name, self.pagetype) + return agent.create_agent_job("Ping Job", "ping_job").name + + @jingrow.whitelist() + def update_agent(self): + agent = Agent(self.name, self.pagetype) + return agent.update() + + @jingrow.whitelist() + def prepare_server(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_prepare_server", queue="long", timeout=2400) + + def _prepare_server(self): + try: + if self.provider == "Scaleway": + ansible = Ansible( + playbook="scaleway.yml", + server=self, + user="ubuntu", + variables={ + "private_ip": self.private_ip, + "private_mac_address": self.private_mac_address, + "private_vlan_id": self.private_vlan_id, + }, + ) + elif self.provider == "AWS EC2": + ansible = Ansible(playbook="aws.yml", server=self, user="ubuntu") + elif self.provider == "OCI": + ansible = Ansible(playbook="oci.yml", server=self, user="ubuntu") + + ansible.run() + self.reload() + self.is_server_prepared = True + self.save() + except Exception: + log_error("Server Preparation Exception", server=self.as_dict()) + + @jingrow.whitelist() + def setup_server(self): + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_server", queue="long", timeout=2400) + + @jingrow.whitelist() + def install_nginx(self): + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_install_nginx", queue="long", timeout=1200) + + def _install_nginx(self): + try: + ansible = Ansible( + playbook="nginx.yml", + server=self, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("NGINX Install Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def install_filebeat(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_install_filebeat", queue="long", timeout=1200) + + def _install_filebeat(self): + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if log_server: + kibana_password = jingrow.get_pg("Log Server", log_server).get_password("kibana_password") + else: + kibana_password = None + + try: + ansible = Ansible( + playbook="filebeat.yml", + server=self, + user=self._ssh_user(), + port=self._ssh_port(), + variables={ + "server_type": self.pagetype, + "server": self.name, + "log_server": log_server, + "kibana_password": kibana_password, + }, + ) + ansible.run() + except Exception: + log_error("Filebeat Install Exception", server=self.as_dict()) + + @jingrow.whitelist() + def install_exporters(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_install_exporters", queue="long", timeout=1200) + + @jingrow.whitelist() + def ping_ansible(self): + try: + ansible = Ansible( + playbook="ping.yml", + server=self, + user=self._ssh_user(), + port=self._ssh_port(), + ) + ansible.run() + except Exception: + log_error("Server Ping Exception", server=self.as_dict()) + + @jingrow.whitelist() + def update_agent_ansible(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_update_agent_ansible") + + def _update_agent_ansible(self): + try: + ansible = Ansible( + playbook="update_agent.yml", + variables={ + "agent_repository_url": self.get_agent_repository_url(), + "agent_repository_branch": self.get_agent_repository_branch(), + }, + server=self, + user=self._ssh_user(), + port=self._ssh_port(), + ) + ansible.run() + except Exception: + log_error("Agent Update Exception", server=self.as_dict()) + + @jingrow.whitelist() + def fetch_keys(self): + try: + ansible = Ansible(playbook="keys.yml", server=self) + ansible.run() + except Exception: + log_error("Server Key Fetch Exception", server=self.as_dict()) + + @jingrow.whitelist() + def ping_ansible_unprepared(self): + try: + if self.provider == "Scaleway" or self.provider in ("AWS EC2", "OCI"): + ansible = Ansible( + playbook="ping.yml", + server=self, + user="ubuntu", + ) + ansible.run() + except Exception: + log_error("Unprepared Server Ping Exception", server=self.as_dict()) + + @jingrow.whitelist() + def cleanup_unused_files(self): + if self.is_build_server(): + return + + jingrow.enqueue_pg(self.pagetype, self.name, "_cleanup_unused_files", queue="long", timeout=2400) + + def is_build_server(self) -> bool: + # Not a field in all subclasses + if getattr(self, "use_for_build", False): + return True + + name = jingrow.db.get_single_value("Jcloud Settings", "build_server") + if name == self.name: + return True + + # Whether build_server explicitly set on Release Group + count = jingrow.db.count( + "Release Group", + { + "enabled": True, + "build_server": self.name, + }, + ) + if isinstance(count, (int, float)): + return count > 0 + return False + + def _cleanup_unused_files(self): + agent = Agent(self.name, self.pagetype) + if agent.should_skip_requests(): + return + agent.cleanup_unused_files() + + def on_trash(self): + plays = jingrow.get_all("Ansible Play", filters={"server": self.name}) + for play in plays: + jingrow.delete_pg("Ansible Play", play.name) + + def break_glass(self): + """ + Remove glass file with simple ssh command to make free space + + Space is required for playbooks to run, growpart command, etc. + """ + try: + subprocess.check_output( + shlex.split( + f"ssh -o BatchMode=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@{self.ip} -t rm /root/glass" + ), + stderr=subprocess.STDOUT, + ) + except subprocess.CalledProcessError as e: + log_error(f"Error removing glassfile: {e.output.decode()}") + + @jingrow.whitelist() + def extend_ec2_volume(self, device=None): + if self.provider not in ("AWS EC2", "OCI"): + return + # Restart MariaDB if MariaDB disk is full + mountpoint = self.guess_data_disk_mountpoint() + restart_mariadb = self.pagetype == "Database Server" and self.is_disk_full( + mountpoint + ) # check before breaking glass to ensure state of mariadb + self.break_glass() + if not device: + # Try the best guess. Try extending the data volume + volume = self.find_mountpoint_volume(mountpoint) + device = self.get_device_from_volume_id(volume.volume_id) + try: + ansible = Ansible( + playbook="extend_ec2_volume.yml", + server=self, + variables={"restart_mariadb": restart_mariadb, "device": device}, + ) + ansible.run() + except Exception: + log_error("EC2 Volume Extend Exception", server=self.as_dict()) + + def enqueue_extend_ec2_volume(self, device): + jingrow.enqueue_pg(self.pagetype, self.name, "extend_ec2_volume", device=device) + + @cached_property + def time_to_wait_before_updating_volume(self) -> timedelta | int: + if self.provider != "AWS EC2": + return 0 + if not ( + last_updated_at := jingrow.get_value( + "Virtual Machine Volume", + {"parent": self.virtual_machine, "idx": 1}, # first volume is likely main + "last_updated_at", + ) + ): + return 0 + diff = jingrow.utils.now_datetime() - last_updated_at + return diff if diff < timedelta(hours=6) else 0 + + @jingrow.whitelist() + def increase_disk_size(self, increment=50, mountpoint=None) -> bool: + if self.provider not in ("AWS EC2", "OCI"): + return + if self.provider == "AWS EC2" and self.time_to_wait_before_updating_volume: + jingrow.throw( + f"Please wait {fmt_timedelta(self.time_to_wait_before_updating_volume)} before resizing volume", + VolumeResizeLimitError, + ) + if not mountpoint: + mountpoint = self.guess_data_disk_mountpoint() + + volume = self.find_mountpoint_volume(mountpoint) + + virtual_machine: "VirtualMachine" = jingrow.get_pg("Virtual Machine", self.virtual_machine) + virtual_machine.increase_disk_size(volume.volume_id, increment) + if self.provider == "AWS EC2": + device = self.get_device_from_volume_id(volume.volume_id) + self.enqueue_extend_ec2_volume(device) + elif self.provider == "OCI": + # TODO: Add support for volumes on OCI + # Non-boot volumes might not need resize + self.break_glass() + self.reboot() + + def guess_data_disk_mountpoint(self) -> str: + if not self.has_data_volume: + return "/" + + volumes = self.get_volume_mounts() + if volumes: + if self.pagetype == "Server": + mountpoint = "/opt/volumes/benches" + elif self.pagetype == "Database Server": + mountpoint = "/opt/volumes/mariadb" + else: + mountpoint = "/" + return mountpoint + + def find_mountpoint_volume(self, mountpoint): + machine: "VirtualMachine" = jingrow.get_pg("Virtual Machine", self.virtual_machine) + + if len(machine.volumes) == 1: + # If there is only one volume, + # then all mountpoints are on the same volume + return machine.volumes[0] + + volumes = self.get_volume_mounts() + volume = find(volumes, lambda x: x.mount_point == mountpoint) + if volume: + # If the volume is in `mounts`, that means it's a data volume + return volume + # Otherwise it's a root volume + return find(machine.volumes, lambda v: v.device == "/dev/sda1") + + def update_virtual_machine_name(self): + if self.provider not in ("AWS EC2", "OCI"): + return None + virtual_machine = jingrow.get_pg("Virtual Machine", self.virtual_machine) + return virtual_machine.update_name_tag(self.name) + + def create_subscription(self, plan): + self._create_initial_plan_change(plan) + + def _create_initial_plan_change(self, plan): + jingrow.get_pg( + { + "pagetype": "Plan Change", + "document_type": self.pagetype, + "document_name": self.name, + "from_plan": "", + "to_plan": plan, + "type": "Initial Plan", + "timestamp": self.creation, + } + ).insert(ignore_permissions=True) + + @property + def subscription(self): + name = jingrow.db.get_value( + "Subscription", + { + "document_type": self.pagetype, + "document_name": self.name, + "team": self.team, + "plan_type": "Server Plan", + }, + ) + return jingrow.get_pg("Subscription", name) if name else None + + @property + def add_on_storage_subscription(self): + name = jingrow.db.get_value( + "Subscription", + { + "document_type": self.pagetype, + "document_name": self.name, + "team": self.team, + "plan_type": "Server Storage Plan", + }, + ) + return jingrow.get_pg("Subscription", name) if name else None + + def create_subscription_for_storage(self, increment: int) -> None: + plan_type = "Server Storage Plan" + plan = jingrow.get_value(plan_type, {"enabled": 1}, "name") + + if existing_subscription := jingrow.db.get_value( + "Subscription", + { + "document_type": self.pagetype, + "document_name": self.name, + "team": self.team, + "plan_type": plan_type, + "plan": plan, + }, + ["name", "additional_storage"], + as_dict=True, + ): + jingrow.db.set_value( + "Subscription", + existing_subscription.name, + "additional_storage", + increment + cint(existing_subscription.additional_storage), + ) + else: + jingrow.get_pg( + { + "pagetype": "Subscription", + "document_type": self.pagetype, + "document_name": self.name, + "team": self.team, + "plan_type": plan_type, + "plan": plan, + "additional_storage": increment, + } + ).insert() + + @jingrow.whitelist() + def rename_server(self): + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_rename_server", queue="long", timeout=2400) + + @jingrow.whitelist() + def archive(self): + if jingrow.get_all( + "Site", + filters={"server": self.name, "status": ("!=", "Archived")}, + ignore_ifnull=True, + ): + jingrow.throw(_("Cannot archive server with sites")) + if jingrow.get_all( + "Bench", + filters={"server": self.name, "status": ("!=", "Archived")}, + ignore_ifnull=True, + ): + jingrow.throw(_("Cannot archive server with benches")) + self.status = "Pending" + self.save() + if self.is_self_hosted: + self.status = "Archived" + self.save() + + if self.pagetype == "Server": + jingrow.db.set_value("Self Hosted Server", {"server": self.name}, "status", "Archived") + + else: + jingrow.enqueue_pg(self.pagetype, self.name, "_archive", queue="long") + self.disable_subscription() + + jingrow.db.delete("Jcloud Role Permission", {"server": self.name}) + + def _archive(self): + self.run_jcloud_job("Archive Server") + + def disable_subscription(self): + subscription = self.subscription + if subscription: + subscription.disable() + + # disable add-on storage subscription + add_on_storage_subscription = self.add_on_storage_subscription + if add_on_storage_subscription: + add_on_storage_subscription.disable() + + def can_change_plan(self, ignore_card_setup): + if is_system_user(jingrow.session.user): + return + + if ignore_card_setup: + return + + team = jingrow.get_pg("Team", self.team) + + if team.parent_team: + team = jingrow.get_pg("Team", team.parent_team) + + if team.payment_mode == "Paid By Partner" and team.billing_team: + team = jingrow.get_pg("Team", team.billing_team) + + if team.is_defaulter(): + jingrow.throw("Cannot change plan because you have unpaid invoices") + + if not (team.default_payment_method or team.get_balance()): + jingrow.throw("Cannot change plan because you haven't added a card and not have enough balance") + + @dashboard_whitelist() + def change_plan(self, plan, ignore_card_setup=False): + self.can_change_plan(ignore_card_setup) + plan = jingrow.get_pg("Server Plan", plan) + self._change_plan(plan) + self.run_jcloud_job("Resize Server", {"machine_type": plan.instance_type}) + + def _change_plan(self, plan): + self.ram = plan.memory + self.save() + self.reload() + jingrow.get_pg( + { + "pagetype": "Plan Change", + "document_type": self.pagetype, + "document_name": self.name, + "from_plan": self.plan, + "to_plan": plan.name, + } + ).insert() + + @jingrow.whitelist() + def create_image(self): + self.run_jcloud_job("Create Server Snapshot") + + def run_jcloud_job(self, job_name, arguments=None): + if arguments is None: + arguments = {} + return jingrow.get_pg( + { + "pagetype": "Jcloud Job", + "job_type": job_name, + "server_type": self.pagetype, + "server": self.name, + "virtual_machine": self.virtual_machine, + "arguments": json.dumps(arguments, indent=2, sort_keys=True), + } + ).insert() + + def get_certificate(self): + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": self.domain}, "name" + ) + + if not certificate_name and self.is_self_hosted: + certificate_name = jingrow.db.get_value("TLS Certificate", {"domain": f"{self.name}"}, "name") + + if not certificate_name: + self_hosted_server = jingrow.db.get_value( + "Self Hosted Server", {"server": self.name}, ["hostname", "domain"], as_dict=1 + ) + + certificate_name = jingrow.db.get_value( + "TLS Certificate", + {"domain": f"{self_hosted_server.hostname}.{self_hosted_server.domain}"}, + "name", + ) + + return jingrow.get_pg("TLS Certificate", certificate_name) + + def get_log_server(self): + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if log_server: + kibana_password = jingrow.get_pg("Log Server", log_server).get_password("kibana_password") + else: + kibana_password = None + return log_server, kibana_password + + def get_monitoring_password(self): + return jingrow.get_pg("Cluster", self.cluster).get_password("monitoring_password") + + @jingrow.whitelist() + def increase_swap(self, swap_size=4): + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_increase_swap", + queue="long", + timeout=1200, + **{"swap_size": swap_size}, + ) + + def add_glass_file(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_add_glass_file") + + def _add_glass_file(self): + try: + ansible = Ansible(playbook="glass_file.yml", server=self) + ansible.run() + except Exception: + log_error("Add Glass File Exception", pg=self) + + def _increase_swap(self, swap_size=4): + """Increase swap by size defined in playbook""" + from jcloud.api.server import calculate_swap + + existing_swap_size = calculate_swap(self.name).get("swap", 0) + # We used to do 4 GB minimum swap files, to avoid conflict, name files accordingly + swap_file_name = "swap" + str(int((existing_swap_size // 4) + 1)) + try: + ansible = Ansible( + playbook="increase_swap.yml", + server=self, + variables={ + "swap_size": swap_size, + "swap_file": swap_file_name, + }, + ) + ansible.run() + except Exception: + log_error("Increase swap exception", pg=self) + + @jingrow.whitelist() + def setup_mysqldump(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_mysqldump") + + def _setup_mysqldump(self): + try: + ansible = Ansible( + playbook="mysqldump.yml", + server=self, + ) + ansible.run() + except Exception: + log_error("MySQLdump Setup Exception", pg=self) + + @jingrow.whitelist() + def set_swappiness(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_set_swappiness") + + def _set_swappiness(self): + try: + ansible = Ansible( + playbook="swappiness.yml", + server=self, + ) + ansible.run() + except Exception: + log_error("Swappiness Setup Exception", pg=self) + + def update_filebeat(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_update_filebeat") + + def _update_filebeat(self): + try: + ansible = Ansible( + playbook="filebeat_update.yml", + server=self, + ) + ansible.run() + except Exception: + log_error("Filebeat Update Exception", pg=self) + + @jingrow.whitelist() + def update_tls_certificate(self): + from jcloud.jcloud.pagetype.tls_certificate.tls_certificate import ( + update_server_tls_certifcate, + ) + + filters = {"wildcard": True, "status": "Active", "domain": self.domain} + + if ( + hasattr(self, "is_self_hosted") + and self.is_self_hosted + and self.domain != self.self_hosted_server_domain + ): + filters["domain"] = self.name + del filters["wildcard"] + + certificate = jingrow.get_last_pg("TLS Certificate", filters) + + update_server_tls_certifcate(self, certificate) + + @jingrow.whitelist() + def show_agent_password(self): + return self.get_password("agent_password") + + @property + def agent(self): + return Agent(self.name, server_type=self.pagetype) + + @jingrow.whitelist() + def fetch_security_updates(self): + from jcloud.jcloud.pagetype.security_update.security_update import SecurityUpdate + + jingrow.enqueue(SecurityUpdate.fetch_security_updates, server_obj=self) + + @jingrow.whitelist() + def configure_ssh_logging(self): + try: + ansible = Ansible( + playbook="configure_ssh_logging.yml", + server=self, + ) + ansible.run() + except Exception: + log_error("Set SSH Session Logging Exception", server=self.as_dict()) + + @property + def real_ram(self): + """Ram detected by OS after h/w reservation""" + return 0.972 * self.ram - 218 + + @jingrow.whitelist() + def reboot_with_serial_console(self): + if self.provider != "AWS EC2": + raise NotImplementedError + console = jingrow.new_pg("Serial Console Log") + console.server_type = self.pagetype + console.server = self.name + console.virtual_machine = self.virtual_machine + console.action = "reboot" + console.save() + console.reload() + console.run_sysrq() + + @dashboard_whitelist() + def reboot(self): + if self.provider in ("AWS EC2", "OCI"): + virtual_machine = jingrow.get_pg("Virtual Machine", self.virtual_machine) + virtual_machine.reboot() + + @dashboard_whitelist() + def rename(self, title): + self.title = title + self.save() + + def validate_mounts(self): + if not self.virtual_machine: + return + machine = jingrow.get_pg("Virtual Machine", self.virtual_machine) + if machine.has_data_volume and len(machine.volumes) > 1 and not self.mounts: + self.fetch_volumes_from_virtual_machine() + self.set_default_mount_points() + self.set_mount_properties() + + def fetch_volumes_from_virtual_machine(self): + machine = jingrow.get_pg("Virtual Machine", self.virtual_machine) + for volume in machine.volumes: + if volume.device == "/dev/sda1": + # Skip root volume. This is for AWS other providers may have different root volume + continue + self.append("mounts", {"volume_id": volume.volume_id}) + + def set_default_mount_points(self): + first = self.mounts[0] + if self.pagetype == "Server": + first.mount_point = "/opt/volumes/benches" + self.append( + "mounts", + { + "mount_type": "Bind", + "mount_point": "/home/jingrow/benches", + "source": "/opt/volumes/benches/home/jingrow/benches", + "mount_point_owner": "jingrow", + "mount_point_group": "jingrow", + }, + ) + elif self.pagetype == "Database Server": + first.mount_point = "/opt/volumes/mariadb" + self.append( + "mounts", + { + "mount_type": "Bind", + "mount_point": "/var/lib/mysql", + "source": "/opt/volumes/mariadb/var/lib/mysql", + }, + ) + self.append( + "mounts", + { + "mount_type": "Bind", + "mount_point": "/etc/mysql", + "source": "/opt/volumes/mariadb/etc/mysql", + }, + ) + + def set_mount_properties(self): + for mount in self.mounts: + # set_defaults doesn't seem to work on children in a controller hook + default_fields = find_all(jingrow.get_meta("Server Mount").fields, lambda x: x.default) + for field in default_fields: + fieldname = field.fieldname + if not mount.get(fieldname): + mount.set(fieldname, field.default) + + mount_options = "defaults,nofail" # Set default mount options + if mount.mount_options: + mount_options = f"{mount_options},{mount.mount_options}" + + mount.mount_options = mount_options + if mount.mount_type == "Bind": + mount.filesystem = "none" + mount.mount_options = f"{mount.mount_options},bind" + + if mount.volume_id: + # EBS volumes are named by their volume id + # There's likely a better way to do this + # https://docs.aws.amazon.com/ebs/latest/userguide/ebs-using-volumes.html + stripped_id = mount.volume_id.replace("-", "") + mount.source = self.get_device_from_volume_id(mount.volume_id) + if not mount.mount_point: + # If we don't know where to mount, mount it in /mnt/ + mount.mount_point = f"/mnt/{stripped_id}" + + def get_device_from_volume_id(self, volume_id): + stripped_id = volume_id.replace("-", "") + return f"/dev/disk/by-id/nvme-Amazon_Elastic_Block_Store_{stripped_id}" + + def get_mount_variables(self): + return { + "all_mounts_json": json.dumps([mount.as_dict() for mount in self.mounts], indent=4, default=str), + "volume_mounts_json": json.dumps( + self.get_volume_mounts(), + indent=4, + default=str, + ), + "bind_mounts_json": json.dumps( + [mount.as_dict() for mount in self.mounts if mount.mount_type == "Bind"], + indent=4, + default=str, + ), + } + + def get_volume_mounts(self): + return [mount.as_dict() for mount in self.mounts if mount.mount_type == "Volume"] + + @jingrow.whitelist() + def mount_volumes(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_mount_volumes", queue="short", timeout=1200) + + def _mount_volumes(self): + try: + ansible = Ansible( + playbook="mount.yml", + server=self, + variables={**self.get_mount_variables()}, + ) + play = ansible.run() + self.reload() + if self._set_mount_status(play): + self.save() + except Exception: + log_error("Server Mount Exception", server=self.as_dict()) + + def _set_mount_status(self, play): + tasks = jingrow.get_all( + "Ansible Task", + ["result", "task"], + { + "play": play.name, + "status": ("in", ("Success", "Failure")), + "task": ("in", ("Mount Volumes", "Mount Bind Mounts", "Show Block Device UUIDs")), + }, + ) + mounts_changed = False + for task in tasks: + result = json.loads(task.result) + for row in result.get("results", []): + mount = find(self.mounts, lambda x: x.name == row.get("item", {}).get("name")) + if not mount: + mount = find( + self.mounts, lambda x: x.name == row.get("item", {}).get("item", {}).get("name") + ) + if not mount: + continue + if task.task == "Show Block Device UUIDs": + mount.uuid = row.get("stdout", "").strip() + mounts_changed = True + else: + mount_status = {True: "Failure", False: "Success"}[row.get("failed", False)] + if mount.status != mount_status: + mount.status = mount_status + mounts_changed = True + return mounts_changed + + def wait_for_cloud_init(self): + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_wait_for_cloud_init", + queue="short", + ) + + def _wait_for_cloud_init(self): + try: + ansible = Ansible( + playbook="wait_for_cloud_init.yml", + server=self, + ) + ansible.run() + except Exception: + log_error("Cloud Init Wait Exception", server=self.as_dict()) + + def free_space(self, mountpoint: str) -> int: + from jcloud.api.server import prometheus_query + + response = prometheus_query( + f"""node_filesystem_avail_bytes{{instance="{self.name}", job="node", mountpoint="{mountpoint}"}}""", + lambda x: x["mountpoint"], + "Asia/Kolkata", + 60, + 60, + )["datasets"] + if response: + return response[0]["values"][-1] + return 50 * 1024 * 1024 * 1024 # Assume 50GB free space + + def is_disk_full(self, mountpoint: str) -> bool: + return self.free_space(mountpoint) == 0 + + def space_available_in_6_hours(self, mountpoint: str) -> int: + from jcloud.api.server import prometheus_query + + response = prometheus_query( + f"""predict_linear( +node_filesystem_avail_bytes{{instance="{self.name}", mountpoint="{mountpoint}"}}[3h], 6*3600 + )""", + lambda x: x["mountpoint"], + "Asia/Kolkata", + 120, + 120, + )["datasets"] + if not response: + return -20 * 1024 * 1024 * 1024 + return response[0]["values"][-1] + + def disk_capacity(self, mountpoint: str) -> int: + from jcloud.api.server import prometheus_query + + response = prometheus_query( + f"""node_filesystem_size_bytes{{instance="{self.name}", job="node", mountpoint="{mountpoint}"}}""", + lambda x: x["mountpoint"], + "Asia/Kolkata", + 120, + 120, + )["datasets"] + if response: + return response[0]["values"][-1] + return jingrow.db.get_value("Virtual Machine", self.virtual_machine, "disk_size") * 1024 * 1024 * 1024 + + def size_to_increase_by_for_20_percent_available(self, mountpoint: str): # min 50 GB, max 250 GB + return int( + min( + self.auto_add_storage_max, + max( + self.auto_add_storage_min, + abs(self.disk_capacity(mountpoint) - self.space_available_in_6_hours(mountpoint) * 5) + / 4 + / 1024 + / 1024 + / 1024, + ), + ) + ) + + def calculated_increase_disk_size( + self, + additional: int = 0, + mountpoint: str | None = None, + ): + telegram = Telegram("Information") + buffer = self.size_to_increase_by_for_20_percent_available(mountpoint) + telegram.send( + f"Increasing disk (mount point {mountpoint})on [{self.name}]({jingrow.utils.get_url_to_form(self.pagetype, self.name)}) by {buffer + additional}G" + ) + self.increase_disk_size_for_server(self.name, buffer + additional, mountpoint) + + def prune_docker_system(self): + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_prune_docker_system", + queue="long", + timeout=8000, + ) + + def _prune_docker_system(self): + try: + ansible = Ansible( + playbook="docker_system_prune.yml", + server=self, + ) + ansible.run() + except Exception: + log_error("Prune Docker System Exception", pg=self) + + def reload_nginx(self): + agent = Agent(self.name, server_type=self.pagetype) + agent.reload_nginx() + + def _ssh_user(self): + if not hasattr(self, "ssh_user"): + return "root" + return self.ssh_user or "root" + + def _ssh_port(self): + if not hasattr(self, "ssh_port"): + return 22 + return self.ssh_port or 22 + + def get_primary_jingrow_public_key(self): + if primary_public_key := jingrow.db.get_value(self.pagetype, self.primary, "jingrow_public_key"): + return primary_public_key + + primary = jingrow.get_pg(self.pagetype, self.primary) + ansible = Ansible( + playbook="fetch_jingrow_public_key.yml", + server=primary, + ) + play = ansible.run() + if play.status == "Success": + return jingrow.db.get_value(self.pagetype, self.primary, "jingrow_public_key") + jingrow.throw(f"Failed to fetch {primary.name}'s Jingrow public key") + return None + + def copy_files(self, source, destination): + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_copy_files", + source=source, + destination=destination, + queue="long", + timeout=7200, + ) + + def _copy_files(self, source, destination): + try: + ansible = Ansible( + playbook="copy.yml", + server=self, + variables={ + "source": source, + "destination": destination, + }, + ) + ansible.run() + except Exception: + log_error("Sever File Copy Exception", server=self.as_dict()) + + +class Server(BaseServer): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.resource_tag.resource_tag import ResourceTag + from jcloud.jcloud.pagetype.server_mount.server_mount import ServerMount + + agent_password: DF.Password | None + auto_add_storage_max: DF.Int + auto_add_storage_min: DF.Int + cluster: DF.Link | None + database_server: DF.Link | None + disable_agent_job_auto_retry: DF.Check + domain: DF.Link | None + jingrow_public_key: DF.Code | None + jingrow_user_password: DF.Password | None + has_data_volume: DF.Check + hostname: DF.Data + hostname_abbreviation: DF.Data | None + ignore_incidents_since: DF.Datetime | None + ip: DF.Data | None + is_managed_database: DF.Check + is_primary: DF.Check + is_replication_setup: DF.Check + is_self_hosted: DF.Check + is_server_prepared: DF.Check + is_server_renamed: DF.Check + is_server_setup: DF.Check + is_standalone: DF.Check + is_standalone_setup: DF.Check + is_upstream_setup: DF.Check + managed_database_service: DF.Link | None + mounts: DF.Table[ServerMount] + new_worker_allocation: DF.Check + plan: DF.Link | None + primary: DF.Link | None + private_ip: DF.Data | None + private_mac_address: DF.Data | None + private_vlan_id: DF.Data | None + provider: DF.Literal["Generic", "Scaleway", "AWS EC2", "OCI"] + proxy_server: DF.Link | None + public: DF.Check + ram: DF.Float + root_public_key: DF.Code | None + self_hosted_mariadb_root_password: DF.Password | None + self_hosted_mariadb_server: DF.Data | None + self_hosted_server_domain: DF.Data | None + set_bench_memory_limits: DF.Check + skip_scheduled_backups: DF.Check + ssh_port: DF.Int + ssh_user: DF.Data | None + staging: DF.Check + status: DF.Literal["Pending", "Installing", "Active", "Broken", "Archived"] + tags: DF.Table[ResourceTag] + team: DF.Link | None + title: DF.Data | None + use_for_build: DF.Check + use_for_new_benches: DF.Check + use_for_new_sites: DF.Check + virtual_machine: DF.Link | None + # end: auto-generated types + + GUNICORN_MEMORY = 150 # avg ram usage of 1 gunicorn worker + BACKGROUND_JOB_MEMORY = 3 * 80 # avg ram usage of 3 sets of bg workers + + def on_update(self): + # If Database Server is changed for the server then change it for all the benches + if not self.is_new() and ( + self.has_value_changed("database_server") or self.has_value_changed("managed_database_service") + ): + benches = jingrow.get_all("Bench", {"server": self.name, "status": ("!=", "Archived")}) + for bench in benches: + bench = jingrow.get_pg("Bench", bench) + bench.database_server = self.database_server + bench.managed_database_service = self.managed_database_service + bench.save() + + if self.database_server: + database_server_public = jingrow.db.get_value("Database Server", self.database_server, "public") + if database_server_public != self.public: + jingrow.db.set_value("Database Server", self.database_server, "public", self.public) + + if not self.is_new() and self.has_value_changed("team"): + self.update_subscription() + jingrow.db.delete("Jcloud Role Permission", {"server": self.name}) + + # Enable bench memory limits for public servers + if self.public: + self.set_bench_memory_limits = True + + def after_insert(self): + from jcloud.jcloud.pagetype.jcloud_role.jcloud_role import ( + add_permission_for_newly_created_pg, + ) + + super().after_insert() + add_permission_for_newly_created_pg(self) + + def update_subscription(self): + subscription = jingrow.db.get_value( + "Subscription", + { + "document_type": self.pagetype, + "document_name": self.name, + "plan_type": "Server Plan", + "plan": self.plan, + "enabled": 1, + }, + ["name", "team"], + as_dict=True, + ) + if subscription and subscription.team != self.team: + jingrow.get_pg("Subscription", subscription).disable() + + if subscription := jingrow.db.get_value( + "Subscription", + { + "document_type": self.pagetype, + "document_name": self.name, + "team": self.team, + "plan_type": "Server Plan", + "plan": self.plan, + }, + ): + jingrow.db.set_value("Subscription", subscription, "enabled", 1) + else: + try: + # create new subscription + jingrow.get_pg( + { + "pagetype": "Subscription", + "document_type": self.pagetype, + "document_name": self.name, + "team": self.team, + "plan_type": "Server Plan", + "plan": self.plan, + } + ).insert() + except Exception: + jingrow.log_error("Server Subscription Creation Error") + + add_on_storage_subscription = jingrow.db.get_value( + "Subscription", + { + "document_type": self.pagetype, + "document_name": self.name, + "plan_type": "Server Storage Plan", + "enabled": 1, + }, + ["name", "team", "additional_storage"], + as_dict=True, + ) + if add_on_storage_subscription and add_on_storage_subscription.team != self.team: + jingrow.get_pg("Subscription", add_on_storage_subscription).disable() + + if existing_add_on_storage_subscription := jingrow.db.get_value( + "Subscription", + filters={ + "document_type": self.pagetype, + "document_name": self.name, + "team": self.team, + "plan_type": "Server Storage Plan", + }, + ): + jingrow.db.set_value( + "Subscription", + existing_add_on_storage_subscription, + { + "enabled": 1, + "additional_storage": add_on_storage_subscription.additional_storage, + }, + ) + else: + try: + # create new subscription + jingrow.get_pg( + { + "pagetype": "Subscription", + "document_type": self.pagetype, + "document_name": self.name, + "team": self.team, + "plan_type": "Server Storage Plan", + "plan": add_on_storage_subscription.plan, + } + ).insert() + except Exception: + jingrow.log_error("Server Storage Subscription Creation Error") + + @jingrow.whitelist() + def add_upstream_to_proxy(self): + agent = Agent(self.proxy_server, server_type="Proxy Server") + agent.new_server(self.name) + + def _setup_server(self): + agent_password = self.get_password("agent_password") + agent_repository_url = self.get_agent_repository_url() + certificate = self.get_certificate() + log_server, kibana_password = self.get_log_server() + agent_sentry_dsn = jingrow.db.get_single_value("Jcloud Settings", "agent_sentry_dsn") + + try: + ansible = Ansible( + playbook="self_hosted.yml" if getattr(self, "is_self_hosted", False) else "server.yml", + server=self, + user=self._ssh_user(), + port=self._ssh_port(), + variables={ + "server": self.name, + "private_ip": self.private_ip, + "proxy_ip": self.get_proxy_ip(), + "workers": "2", + "agent_password": agent_password, + "agent_repository_url": agent_repository_url, + "agent_sentry_dsn": agent_sentry_dsn, + "monitoring_password": self.get_monitoring_password(), + "log_server": log_server, + "kibana_password": kibana_password, + "certificate_private_key": certificate.private_key, + "certificate_full_chain": certificate.full_chain, + "certificate_intermediate_chain": certificate.intermediate_chain, + "docker_depends_on_mounts": self.docker_depends_on_mounts, + **self.get_mount_variables(), + }, + ) + play = ansible.run() + self.reload() + self._set_mount_status(play) + if play.status == "Success": + self.status = "Active" + self.is_server_setup = True + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Server Setup Exception", server=self.as_dict()) + self.save() + + def get_proxy_ip(self): + """In case of standalone setup proxy will not required""" + + if self.is_standalone: + return self.ip + + return jingrow.db.get_value("Proxy Server", self.proxy_server, "private_ip") + + @jingrow.whitelist() + def setup_standalone(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_standalone", queue="short", timeout=1200) + + def _setup_standalone(self): + try: + ansible = Ansible( + playbook="standalone.yml", + server=self, + user=self._ssh_user(), + port=self._ssh_port(), + variables={ + "server": self.name, + "domain": self.domain, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.is_standalone_setup = True + except Exception: + log_error("Standalone Server Setup Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def setup_agent_sentry(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_agent_sentry") + + def _setup_agent_sentry(self): + agent_sentry_dsn = jingrow.db.get_single_value("Jcloud Settings", "agent_sentry_dsn") + try: + ansible = Ansible( + playbook="agent_sentry.yml", + server=self, + variables={"agent_sentry_dsn": agent_sentry_dsn}, + ) + ansible.run() + except Exception: + log_error("Agent Sentry Setup Exception", server=self.as_dict()) + + @jingrow.whitelist() + def whitelist_ipaddress(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_whitelist_ip", queue="short", timeout=1200) + + def _whitelist_ip(self): + proxy_server = jingrow.get_value("Server", self.name, "proxy_server") + proxy_server_ip = jingrow.get_pg("Proxy Server", proxy_server).ip + + try: + ansible = Ansible( + playbook="whitelist_ipaddress.yml", + server=self, + variables={"ip_address": proxy_server_ip}, + ) + play = ansible.run() + self.reload() + self.reload() + if play.status == "Success": + self.status = "Active" + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Proxy IP Whitelist Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def agent_set_proxy_ip(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_agent_set_proxy_ip", queue="short", timeout=1200) + + def _agent_set_proxy_ip(self): + agent_password = self.get_password("agent_password") + + try: + ansible = Ansible( + playbook="agent_set_proxy_ip.yml", + server=self, + user=self._ssh_user(), + port=self._ssh_port(), + variables={ + "server": self.name, + "proxy_ip": self.get_proxy_ip(), + "workers": "2", + "agent_password": agent_password, + }, + ) + ansible.run() + except Exception: + log_error("Agent Proxy IP Setup Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def setup_fail2ban(self): + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_fail2ban", queue="long", timeout=1200) + + def _setup_fail2ban(self): + try: + ansible = Ansible( + playbook="fail2ban.yml", + server=self, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Fail2ban Setup Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def setup_replication(self): + self.status = "Installing" + self.save() + jingrow.enqueue_pg(self.pagetype, self.name, "_setup_replication", queue="long", timeout=1200) + + def _setup_replication(self): + self._setup_secondary() + if self.status == "Active": + primary = jingrow.get_pg("Server", self.primary) + primary._setup_primary(self.name) + if primary.status == "Active": + self.is_replication_setup = True + self.save() + + def _setup_primary(self, secondary): + secondary_private_ip = jingrow.db.get_value("Server", secondary, "private_ip") + try: + ansible = Ansible( + playbook="primary_app.yml", + server=self, + variables={"secondary_private_ip": secondary_private_ip}, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Primary Server Setup Exception", server=self.as_dict()) + self.save() + + def _setup_secondary(self): + try: + ansible = Ansible( + playbook="secondary_app.yml", + server=self, + variables={"primary_public_key": self.get_primary_jingrow_public_key()}, + ) + play = ansible.run() + self.reload() + + if play.status == "Success": + self.status = "Active" + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Secondary Server Setup Exception", server=self.as_dict()) + self.save() + + def _install_exporters(self): + monitoring_password = jingrow.get_pg("Cluster", self.cluster).get_password("monitoring_password") + try: + ansible = Ansible( + playbook="server_exporters.yml", + server=self, + variables={ + "private_ip": self.private_ip, + "monitoring_password": monitoring_password, + }, + ) + ansible.run() + except Exception: + log_error("Exporters Install Exception", server=self.as_dict()) + + @classmethod + def get_all_prod(cls, **kwargs) -> list[str]: + """Active prod servers.""" + return jingrow.get_all("Server", {"status": "Active"}, pluck="name", **kwargs) + + @classmethod + def get_all_primary_prod(cls) -> list[str]: + """Active primary prod servers.""" + return jingrow.get_all("Server", {"status": "Active", "is_primary": True}, pluck="name") + + @classmethod + def get_all_staging(cls, **kwargs) -> list[str]: + """Active staging servers.""" + return jingrow.get_all("Server", {"status": "Active", "staging": True}, pluck="name", **kwargs) + + @classmethod + def get_one_staging(cls) -> str: + return cls.get_all_staging(limit=1)[0] + + @classmethod + def get_prod_for_new_bench(cls, extra_filters=None) -> str | None: + filters = {"status": "Active", "use_for_new_benches": True} + if extra_filters: + filters.update(extra_filters) + servers = jingrow.get_all("Server", {**filters}, pluck="name", limit=1) + if servers: + return servers[0] + return None + + def _rename_server(self): + agent_password = self.get_password("agent_password") + agent_repository_url = self.get_agent_repository_url() + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": self.domain}, "name" + ) + certificate = jingrow.get_pg("TLS Certificate", certificate_name) + monitoring_password = jingrow.get_pg("Cluster", self.cluster).get_password("monitoring_password") + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if log_server: + kibana_password = jingrow.get_pg("Log Server", log_server).get_password("kibana_password") + else: + kibana_password = None + + try: + ansible = Ansible( + playbook="rename.yml", + server=self, + user=self._ssh_user(), + port=self._ssh_port(), + variables={ + "server": self.name, + "private_ip": self.private_ip, + "proxy_ip": self.get_proxy_ip(), + "workers": "2", + "agent_password": agent_password, + "agent_repository_url": agent_repository_url, + "monitoring_password": monitoring_password, + "log_server": log_server, + "kibana_password": kibana_password, + "certificate_private_key": certificate.private_key, + "certificate_full_chain": certificate.full_chain, + "certificate_intermediate_chain": certificate.intermediate_chain, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_server_renamed = True + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Server Rename Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def auto_scale_workers(self, commit=True): + if self.new_worker_allocation: + self._auto_scale_workers_new(commit) + else: + self._auto_scale_workers_old() + + @cached_property + def bench_workloads(self) -> dict["Bench", int]: + bench_workloads = {} + benches = jingrow.get_all( + "Bench", + filters={"server": self.name, "status": "Active", "auto_scale_workers": True}, + pluck="name", + ) + for bench_name in benches: + bench = jingrow.get_pg("Bench", bench_name) + bench_workloads[bench] = bench.workload + return bench_workloads + + @cached_property + def workload(self) -> int: + return sum(self.bench_workloads.values()) + + @cached_property + def usable_ram(self) -> float: + return max(self.ram - 3000, self.ram * 0.75) # in MB (leaving some for disk cache + others) + + @cached_property + def max_gunicorn_workers(self) -> int: + usable_ram_for_gunicorn = 0.6 * self.usable_ram # 60% of usable ram + return usable_ram_for_gunicorn / self.GUNICORN_MEMORY + + @cached_property + def max_bg_workers(self) -> int: + usable_ram_for_bg = 0.4 * self.usable_ram # 40% of usable ram + return usable_ram_for_bg / self.BACKGROUND_JOB_MEMORY + + def _auto_scale_workers_new(self, commit): + for bench in self.bench_workloads: + try: + bench.reload() + bench.allocate_workers( + self.workload, + self.max_gunicorn_workers, + self.max_bg_workers, + self.set_bench_memory_limits, + self.GUNICORN_MEMORY, + self.BACKGROUND_JOB_MEMORY, + ) + if commit: + jingrow.db.commit() + except jingrow.TimestampMismatchError: + if commit: + jingrow.db.rollback() + continue + except Exception: + log_error("Bench Auto Scale Worker Error", bench=bench, workload=self.bench_workloads[bench]) + if commit: + jingrow.db.rollback() + + def _auto_scale_workers_old(self): # noqa: C901 + benches = jingrow.get_all( + "Bench", + filters={"server": self.name, "status": "Active", "auto_scale_workers": True}, + pluck="name", + ) + for bench_name in benches: + bench = jingrow.get_pg("Bench", bench_name) + workload = bench.workload + + if workload <= 10: + background_workers, gunicorn_workers = 1, 2 + elif workload <= 20: + background_workers, gunicorn_workers = 2, 4 + elif workload <= 30: + background_workers, gunicorn_workers = 3, 6 + elif workload <= 50: + background_workers, gunicorn_workers = 4, 8 + elif workload <= 100: + background_workers, gunicorn_workers = 6, 12 + elif workload <= 250: + background_workers, gunicorn_workers = 8, 16 + elif workload <= 500: + background_workers, gunicorn_workers = 16, 32 + else: + background_workers, gunicorn_workers = 24, 48 + + if (bench.background_workers, bench.gunicorn_workers) != ( + background_workers, + gunicorn_workers, + ): + bench = jingrow.get_pg("Bench", bench.name) + bench.background_workers, bench.gunicorn_workers = ( + background_workers, + gunicorn_workers, + ) + bench.save() + + @jingrow.whitelist() + def reset_sites_usage(self): + sites = jingrow.get_all( + "Site", + filters={"server": self.name, "status": "Active"}, + pluck="name", + ) + for site_name in sites: + site = jingrow.get_pg("Site", site_name) + site.reset_site_usage() + + def install_earlyoom(self): + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_install_earlyoom", + ) + + def _install_earlyoom(self): + try: + ansible = Ansible( + playbook="server_memory_limits.yml", + server=self, + ) + ansible.run() + except Exception: + log_error("Earlyoom Install Exception", server=self.as_dict()) + + @property + def docker_depends_on_mounts(self): + mount_points = set(mount.mount_point for mount in self.mounts) + bench_mount_points = set(["/home/jingrow/benches"]) + return bench_mount_points.issubset(mount_points) + + +def scale_workers(now=False): + servers = jingrow.get_all("Server", {"status": "Active", "is_primary": True}) + for server in servers: + try: + if now: + jingrow.get_pg("Server", server.name).auto_scale_workers() + else: + jingrow.enqueue_pg( + "Server", + server.name, + method="auto_scale_workers", + job_id=f"auto_scale_workers:{server.name}", + deduplicate=True, + queue="long", + enqueue_after_commit=True, + ) + jingrow.db.commit() + except Exception: + log_error("Auto Scale Worker Error", server=server) + jingrow.db.rollback() + + +def process_new_server_job_update(job): + if job.status == "Success": + jingrow.db.set_value("Server", job.upstream, "is_upstream_setup", True) + + +def cleanup_unused_files(): + servers = jingrow.get_all("Server", fields=["name"], filters={"status": "Active"}) + for server in servers: + try: + jingrow.get_pg("Server", server.name).cleanup_unused_files() + except Exception: + log_error("Server File Cleanup Error", server=server) + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Server") + + +def get_hostname_abbreviation(hostname): + hostname_parts = hostname.split("-") + + abbr = hostname_parts[0] + + for part in hostname_parts[1:]: + abbr += part[0] + + return abbr + + +def is_dedicated_server(server_name): + if not isinstance(server_name, str): + jingrow.throw("Invalid argument") + is_public = jingrow.db.get_value("Server", server_name, "public") + return not is_public diff --git a/jcloud/jcloud/pagetype/server/server_dashboard.py b/jcloud/jcloud/pagetype/server/server_dashboard.py new file mode 100644 index 0000000..3c98e98 --- /dev/null +++ b/jcloud/jcloud/pagetype/server/server_dashboard.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow import _ + + +def get_data(): + return { + "fieldname": "server", + "transactions": [ + {"label": _("Related Documents"), "items": ["Bench", "Site"]}, + {"label": _("Logs"), "items": ["Agent Job", "Ansible Play"]}, + ], + } diff --git a/jcloud/jcloud/pagetype/server/test_server.py b/jcloud/jcloud/pagetype/server/test_server.py new file mode 100644 index 0000000..93311c5 --- /dev/null +++ b/jcloud/jcloud/pagetype/server/test_server.py @@ -0,0 +1,146 @@ +# Copyright (c) 2019, JINGROW +# See license.txt + +from __future__ import annotations + +import typing +import unittest +from unittest.mock import Mock, patch + +import jingrow +from jingrow.model.naming import make_autoname + +from jcloud.jcloud.pagetype.database_server.test_database_server import ( + create_test_database_server, +) +from jcloud.jcloud.pagetype.jcloud_settings.test_jcloud_settings import ( + create_test_jcloud_settings, +) +from jcloud.jcloud.pagetype.proxy_server.test_proxy_server import create_test_proxy_server +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.jcloud.pagetype.server_plan.test_server_plan import create_test_server_plan +from jcloud.jcloud.pagetype.team.test_team import create_test_team +from jcloud.jcloud.pagetype.virtual_machine.test_virtual_machine import create_test_virtual_machine + +if typing.TYPE_CHECKING: + from jcloud.jcloud.pagetype.server.server import Server + + +@patch.object(BaseServer, "after_insert", new=Mock()) +def create_test_server( + proxy_server: str | None = None, + database_server: str | None = None, + cluster: str = "Default", + plan: str | None = None, + team: str | None = None, + public: bool = False, +) -> "Server": + """Create test Server pg.""" + if not proxy_server: + proxy_server = create_test_proxy_server().name + if not database_server: + database_server = create_test_database_server().name + if not team: + team = create_test_team().name + server = jingrow.get_pg( + { + "pagetype": "Server", + "status": "Active", + "proxy_server": proxy_server, + "database_server": database_server, + "ip": jingrow.mock("ipv4"), + "private_ip": jingrow.mock("ipv4_private"), + "domain": "fc.dev", + "hostname": make_autoname("f-.####"), + "cluster": cluster, + "new_worker_allocation": True, + "ram": 16000, + "team": team, + "plan": plan, + "public": public, + "virtual_machine": create_test_virtual_machine(), + } + ).insert() + server.reload() + return server + + +@patch.object(BaseServer, "after_insert", new=Mock()) +class TestServer(unittest.TestCase): + def test_create_generic_server(self): + create_test_jcloud_settings() + proxy_server = create_test_proxy_server() + database_server = create_test_database_server() + + server = jingrow.get_pg( + { + "pagetype": "Server", + "hostname": jingrow.mock("domain_word"), + "domain": "fc.dev", + "ip": jingrow.mock("ipv4"), + "private_ip": jingrow.mock("ipv4_private"), + "agent_password": jingrow.mock("password"), + "proxy_server": proxy_server.name, + "database_server": database_server.name, + } + ) + server.insert() + self.assertEqual(server.cluster, "Default") + self.assertEqual(server.name, f"{server.hostname}.{server.domain}") + + def test_set_agent_password(self): + create_test_jcloud_settings() + proxy_server = create_test_proxy_server() + database_server = create_test_database_server() + + server = jingrow.get_pg( + { + "pagetype": "Server", + "hostname": jingrow.mock("domain_word"), + "domain": "fc.dev", + "ip": jingrow.mock("ipv4"), + "private_ip": jingrow.mock("ipv4_private"), + "proxy_server": proxy_server.name, + "database_server": database_server.name, + } + ) + server.insert() + self.assertEqual(len(server.get_password("agent_password")), 32) + + def test_subscription_creation_on_server_creation(self): + create_test_jcloud_settings() + server_plan = create_test_server_plan() + server = create_test_server(plan=server_plan.name) + server.create_subscription(server.plan) + subscription = jingrow.get_pg( + "Subscription", + {"document_type": "Server", "document_name": server.name, "enabled": 1}, + ) + self.assertEqual(server.team, subscription.team) + self.assertEqual(server.plan, subscription.plan) + + def test_new_subscription_on_server_team_update(self): + create_test_jcloud_settings() + server_plan = create_test_server_plan() + server = create_test_server(plan=server_plan.name) + server.create_subscription(server.plan) + subscription = jingrow.get_pg( + "Subscription", + {"document_type": "Server", "document_name": server.name, "enabled": 1}, + ) + self.assertEqual(server.team, subscription.team) + self.assertEqual(server.plan, subscription.plan) + + # update server team + team2 = create_test_team() + server.team = team2.name + server.save() + subscription = jingrow.get_pg( + "Subscription", + {"document_type": "Server", "document_name": server.name, "enabled": 1}, + ) + self.assertEqual(server.team, subscription.team) + self.assertEqual(server.plan, subscription.plan) + + def tearDown(self): + jingrow.db.rollback() diff --git a/jcloud/jcloud/pagetype/server_mount/__init__.py b/jcloud/jcloud/pagetype/server_mount/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/server_mount/server_mount.json b/jcloud/jcloud/pagetype/server_mount/server_mount.json new file mode 100644 index 0000000..dda6265 --- /dev/null +++ b/jcloud/jcloud/pagetype/server_mount/server_mount.json @@ -0,0 +1,145 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2024-10-28 17:06:07.172615", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "mount_type", + "volume_id", + "filesystem", + "column_break_ygbk", + "status", + "source", + "column_break_uvrc", + "uuid", + "mount_point", + "mount_options", + "permissions_section", + "mount_point_owner", + "mount_point_group", + "column_break_kwsz", + "mount_point_mode" + ], + "fields": [ + { + "columns": 2, + "fieldname": "volume_id", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Volume ID", + "mandatory_depends_on": "eval: pg.mount_type === \"Volume\"", + "read_only": 1 + }, + { + "fieldname": "column_break_ygbk", + "fieldtype": "Column Break" + }, + { + "default": "ext4", + "fieldname": "filesystem", + "fieldtype": "Select", + "label": "Filesystem", + "options": "ext4\nnone", + "reqd": 1 + }, + { + "fieldname": "mount_options", + "fieldtype": "Data", + "label": "Mount Options" + }, + { + "columns": 1, + "default": "Volume", + "fieldname": "mount_type", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Mount Type", + "options": "Volume\nBind", + "reqd": 1 + }, + { + "columns": 3, + "fieldname": "mount_point", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Mount Point", + "reqd": 1 + }, + { + "columns": 3, + "fieldname": "source", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Source", + "mandatory_depends_on": "eval: pg.mount_type === \"Bind\"", + "read_only_depends_on": "eval: pg.mount_type === \"Volume\"", + "reqd": 1 + }, + { + "columns": 1, + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "uuid", + "fieldtype": "Data", + "label": "UUID", + "read_only": 1 + }, + { + "fieldname": "permissions_section", + "fieldtype": "Section Break", + "label": "Permissions" + }, + { + "default": "root", + "fieldname": "mount_point_owner", + "fieldtype": "Data", + "label": "Mount Point Owner", + "reqd": 1 + }, + { + "fieldname": "column_break_kwsz", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_uvrc", + "fieldtype": "Column Break" + }, + { + "default": "0755", + "fieldname": "mount_point_mode", + "fieldtype": "Data", + "label": "Mount Point Mode", + "reqd": 1 + }, + { + "default": "root", + "fieldname": "mount_point_group", + "fieldtype": "Data", + "label": "Mount Point Group", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-11-15 17:24:16.761964", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Server Mount", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/server_mount/server_mount.py b/jcloud/jcloud/pagetype/server_mount/server_mount.py new file mode 100644 index 0000000..44399e8 --- /dev/null +++ b/jcloud/jcloud/pagetype/server_mount/server_mount.py @@ -0,0 +1,36 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from __future__ import annotations + +from jingrow.model.document import Document + + +class ServerMount(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + filesystem: DF.Literal["ext4", "none"] + mount_options: DF.Data | None + mount_point: DF.Data + mount_point_group: DF.Data + mount_point_mode: DF.Data + mount_point_owner: DF.Data + mount_type: DF.Literal["Volume", "Bind"] + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + source: DF.Data + status: DF.Literal["Pending", "Success", "Failure"] + uuid: DF.Data | None + volume_id: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/server_plan/__init__.py b/jcloud/jcloud/pagetype/server_plan/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/server_plan/server_plan.js b/jcloud/jcloud/pagetype/server_plan/server_plan.js new file mode 100644 index 0000000..6058f1f --- /dev/null +++ b/jcloud/jcloud/pagetype/server_plan/server_plan.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Server Plan", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/server_plan/server_plan.json b/jcloud/jcloud/pagetype/server_plan/server_plan.json new file mode 100644 index 0000000..52ccfc1 --- /dev/null +++ b/jcloud/jcloud/pagetype/server_plan/server_plan.json @@ -0,0 +1,157 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "prompt", + "creation": "2024-02-05 22:21:47.560972", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "enabled", + "title", + "pricing_section", + "price_cny", + "column_break_sjmg", + "price_usd", + "section_break_nifk", + "premium", + "server_type", + "cluster", + "instance_type", + "platform", + "column_break_ypkt", + "vcpu", + "memory", + "disk", + "allowed_roles_section", + "roles" + ], + "fields": [ + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "server_type", + "fieldtype": "Select", + "label": "Server Type", + "options": "Server\nDatabase Server\nProxy Server\nSelf Hosted Server" + }, + { + "fieldname": "section_break_nifk", + "fieldtype": "Section Break", + "label": "Details" + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster" + }, + { + "fieldname": "instance_type", + "fieldtype": "Data", + "label": "Instance Type" + }, + { + "fieldname": "column_break_ypkt", + "fieldtype": "Column Break" + }, + { + "fieldname": "vcpu", + "fieldtype": "Int", + "label": "vCPU" + }, + { + "fieldname": "memory", + "fieldtype": "Int", + "label": "Memory" + }, + { + "fieldname": "disk", + "fieldtype": "Int", + "label": "Disk" + }, + { + "fieldname": "pricing_section", + "fieldtype": "Section Break", + "label": "Pricing" + }, + { + "fieldname": "price_cny", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Price (CNY)", + "options": "CNY", + "reqd": 1 + }, + { + "fieldname": "column_break_sjmg", + "fieldtype": "Column Break" + }, + { + "fieldname": "price_usd", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Price (USD)", + "options": "CNY", + "reqd": 1 + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title" + }, + { + "fieldname": "allowed_roles_section", + "fieldtype": "Section Break", + "label": "Allowed Roles" + }, + { + "fieldname": "roles", + "fieldtype": "Table", + "label": "Roles", + "options": "Has Role" + }, + { + "default": "0", + "fieldname": "premium", + "fieldtype": "Check", + "label": "Premium" + }, + { + "default": "x86_64", + "fieldname": "platform", + "fieldtype": "Select", + "label": "Platform", + "options": "x86_64\narm64", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-11-21 13:49:02.682602", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Server Plan", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/server_plan/server_plan.py b/jcloud/jcloud/pagetype/server_plan/server_plan.py new file mode 100644 index 0000000..aec2345 --- /dev/null +++ b/jcloud/jcloud/pagetype/server_plan/server_plan.py @@ -0,0 +1,47 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +from jcloud.jcloud.pagetype.site_plan.plan import Plan + + +class ServerPlan(Plan): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.core.pagetype.has_role.has_role import HasRole + from jingrow.types import DF + + cluster: DF.Link | None + disk: DF.Int + enabled: DF.Check + instance_type: DF.Data | None + memory: DF.Int + platform: DF.Literal["x86_64", "arm64"] + premium: DF.Check + price_cny: DF.Currency + price_usd: DF.Currency + roles: DF.Table[HasRole] + server_type: DF.Literal["Server", "Database Server", "Proxy Server", "Self Hosted Server"] + title: DF.Data | None + vcpu: DF.Int + # end: auto-generated types + + dashboard_fields = ( + "title", + "price_cny", + "price_usd", + "vcpu", + "memory", + "disk", + "platform", + "premium", + ) + + def get_pg(self, pg): + pg["price_per_day_cny"] = self.get_price_per_day("CNY") + pg["price_per_day_usd"] = self.get_price_per_day("USD") + return pg diff --git a/jcloud/jcloud/pagetype/server_plan/test_server_plan.py b/jcloud/jcloud/pagetype/server_plan/test_server_plan.py new file mode 100644 index 0000000..268fbc4 --- /dev/null +++ b/jcloud/jcloud/pagetype/server_plan/test_server_plan.py @@ -0,0 +1,32 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +import typing + +import jingrow +from jingrow.model.naming import make_autoname +from jingrow.tests.utils import JingrowTestCase + +if typing.TYPE_CHECKING: + from jcloud.jcloud.pagetype.server_plan.server_plan import ServerPlan + + +def create_test_server_plan(server_type: str = "Server") -> "ServerPlan": + """Create test Server Plan pg.""" + server_plan = jingrow.get_pg( + { + "pagetype": "Server Plan", + "name": make_autoname("SP-.####"), + "server_type": server_type, + "title": jingrow.mock("name"), + "price_cny": 1000, + "price_usd": 200, + "enabled": 1, + } + ).insert() + server_plan.reload() + return server_plan + + +class TestServerPlan(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/server_storage_plan/__init__.py b/jcloud/jcloud/pagetype/server_storage_plan/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/server_storage_plan/patches/add_subscription_for_servers_with_additional_disk.py b/jcloud/jcloud/pagetype/server_storage_plan/patches/add_subscription_for_servers_with_additional_disk.py new file mode 100644 index 0000000..e5cc5fb --- /dev/null +++ b/jcloud/jcloud/pagetype/server_storage_plan/patches/add_subscription_for_servers_with_additional_disk.py @@ -0,0 +1,47 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + + +import jingrow +from tqdm import tqdm + + +def execute(): + Server = jingrow.qb.PageType("Server") + DatabaseServer = jingrow.qb.PageType("Database Server") + VirtualMachine = jingrow.qb.PageType("Virtual Machine") + ServerPlan = jingrow.qb.PageType("Server Plan") + + servers = ( + jingrow.qb.from_(Server) + .select(Server.name, Server.team, ServerPlan.disk, VirtualMachine.disk_size) + .join(VirtualMachine) + .on(Server.virtual_machine == VirtualMachine.name) + .join(ServerPlan) + .on(Server.plan == ServerPlan.name) + .where(ServerPlan.disk < VirtualMachine.disk_size) + .where(Server.public == 0) + .run(as_dict=True) + ) + + database_servers = ( + jingrow.qb.from_(DatabaseServer) + .select( + DatabaseServer.name, DatabaseServer.team, ServerPlan.disk, VirtualMachine.disk_size + ) + .join(VirtualMachine) + .on(DatabaseServer.virtual_machine == VirtualMachine.name) + .join(ServerPlan) + .on(DatabaseServer.plan == ServerPlan.name) + .where(ServerPlan.disk < VirtualMachine.disk_size) + .where(DatabaseServer.public == 0) + .run(as_dict=True) + ) + + for server in tqdm(servers): + jingrow.get_pg("Server", server.name).create_subscription_for_storage() + + for database_server in tqdm(database_servers): + jingrow.get_pg( + "Database Server", database_server.name + ).create_subscription_for_storage() diff --git a/jcloud/jcloud/pagetype/server_storage_plan/server_storage_plan.js b/jcloud/jcloud/pagetype/server_storage_plan/server_storage_plan.js new file mode 100644 index 0000000..2dfd0a2 --- /dev/null +++ b/jcloud/jcloud/pagetype/server_storage_plan/server_storage_plan.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Server Storage Plan", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/server_storage_plan/server_storage_plan.json b/jcloud/jcloud/pagetype/server_storage_plan/server_storage_plan.json new file mode 100644 index 0000000..14e24c6 --- /dev/null +++ b/jcloud/jcloud/pagetype/server_storage_plan/server_storage_plan.json @@ -0,0 +1,75 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "prompt", + "creation": "2024-06-26 14:03:45.071081", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "enabled", + "title", + "pricing_section", + "price_cny", + "column_break_xauk", + "price_usd" + ], + "fields": [ + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title" + }, + { + "fieldname": "pricing_section", + "fieldtype": "Section Break", + "label": "Pricing" + }, + { + "fieldname": "price_cny", + "fieldtype": "Currency", + "label": "Price per GB (CNY)" + }, + { + "fieldname": "column_break_xauk", + "fieldtype": "Column Break" + }, + { + "fieldname": "price_usd", + "fieldtype": "Currency", + "label": "Price per GB (CNY)" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-06-26 17:50:43.521110", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Server Storage Plan", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "creation", + "sort_order": "DESC", + "states": [], + "title_field": "title" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/server_storage_plan/server_storage_plan.py b/jcloud/jcloud/pagetype/server_storage_plan/server_storage_plan.py new file mode 100644 index 0000000..4bbb7c3 --- /dev/null +++ b/jcloud/jcloud/pagetype/server_storage_plan/server_storage_plan.py @@ -0,0 +1,28 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow + +from jcloud.jcloud.pagetype.site_plan.plan import Plan + + +class ServerStoragePlan(Plan): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + enabled: DF.Check + price_cny: DF.Currency + price_usd: DF.Currency + title: DF.Data | None + # end: auto-generated types + + def validate(self): + if self.enabled and jingrow.db.exists( + "Server Storage Plan", {"enabled": 1, "name": ("!=", self.name)} + ): + jingrow.throw("Only one storage add-on plan can be enabled at a time") diff --git a/jcloud/jcloud/pagetype/server_storage_plan/test_server_storage_plan.py b/jcloud/jcloud/pagetype/server_storage_plan/test_server_storage_plan.py new file mode 100644 index 0000000..8280579 --- /dev/null +++ b/jcloud/jcloud/pagetype/server_storage_plan/test_server_storage_plan.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestServerStoragePlan(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/silenced_alert/__init__.py b/jcloud/jcloud/pagetype/silenced_alert/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/silenced_alert/silenced_alert.js b/jcloud/jcloud/pagetype/silenced_alert/silenced_alert.js new file mode 100644 index 0000000..5dc270b --- /dev/null +++ b/jcloud/jcloud/pagetype/silenced_alert/silenced_alert.js @@ -0,0 +1,29 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Silenced Alert', { + refresh(frm) { + if (!frm.pg.__unsaved) { + frm.add_custom_button('Preview Alerts filered by Instance', () => { + frm.call('preview_alerts').then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + }); + } + if (!frm.pg.__unsaved && !frm.pg.silence_id) { + frm.add_custom_button('Create Silence', () => { + frm.call('create_new_silence').then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + }); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/silenced_alert/silenced_alert.json b/jcloud/jcloud/pagetype/silenced_alert/silenced_alert.json new file mode 100644 index 0000000..67a8cdd --- /dev/null +++ b/jcloud/jcloud/pagetype/silenced_alert/silenced_alert.json @@ -0,0 +1,133 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-10-18 13:46:51.308480", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "instance_type", + "instance", + "alert_comment", + "duration_column", + "from_time", + "to_time", + "duration", + "silence_id", + "section_break_hvst", + "alert_previews_column", + "total_alerts", + "alert_previews" + ], + "fields": [ + { + "fieldname": "instance_type", + "fieldtype": "Link", + "label": "Instance Type", + "options": "PageType" + }, + { + "fieldname": "instance", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Instance", + "options": "instance_type", + "reqd": 1 + }, + { + "fieldname": "duration_column", + "fieldtype": "Column Break", + "label": "Duration" + }, + { + "default": "now", + "fieldname": "from_time", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "From Time", + "reqd": 1 + }, + { + "fieldname": "to_time", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "To Time", + "reqd": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Data", + "label": "Duration", + "read_only": 1 + }, + { + "fieldname": "section_break_hvst", + "fieldtype": "Section Break" + }, + { + "fieldname": "alert_comment", + "fieldtype": "Small Text", + "in_list_view": 1, + "label": "Alert Comment", + "reqd": 1 + }, + { + "default": "Preview", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "\nPreview\nActive\nExpired", + "read_only": 1 + }, + { + "fieldname": "alert_previews", + "fieldtype": "Code", + "label": "Alert Previews", + "options": "JSON" + }, + { + "fieldname": "silence_id", + "fieldtype": "Data", + "label": "Silence ID", + "read_only": 1 + }, + { + "fieldname": "alert_previews_column", + "fieldtype": "Column Break", + "label": "Alert Previews" + }, + { + "fieldname": "total_alerts", + "fieldtype": "Data", + "label": "Total Alerts", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-10-19 10:56:12.281776", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Silenced Alert", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/silenced_alert/silenced_alert.py b/jcloud/jcloud/pagetype/silenced_alert/silenced_alert.py new file mode 100644 index 0000000..f51b5af --- /dev/null +++ b/jcloud/jcloud/pagetype/silenced_alert/silenced_alert.py @@ -0,0 +1,149 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import base64 +import json +from datetime import timezone + +import jingrow +import requests +from jingrow.model.document import Document +from jingrow.utils.data import format_duration, get_datetime + +from jcloud.utils import log_error + + +class SilencedAlert(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + alert_comment: DF.SmallText + alert_previews: DF.Code | None + duration: DF.Data | None + from_time: DF.Datetime + instance: DF.DynamicLink + instance_type: DF.Link | None + silence_id: DF.Data | None + status: DF.Literal["", "Preview", "Active", "Expired"] + to_time: DF.Datetime + total_alerts: DF.Data | None + # end: auto-generated types + + def validate(self): + self.get_duration() + + def get_duration(self): + diff = get_datetime(self.to_time) - get_datetime(self.from_time) + self.duration = format_duration(diff.total_seconds()) + + def get_keyword_based_on_instance_type(self): + match self.instance_type: + case "Site": + return "instance" + case "Server": + return "instance" + case "Cluster": + return "cluster" + case "Release Group": + return "group" + case "Bench": + return "bench" + case "Prometheus Alert Rule": + return "alertname" + case _: + return "" + + @jingrow.whitelist() + def preview_alerts(self): + monitor_server = jingrow.get_pg( + "Monitor Server", "monitor.athul.fc.jingrow.dev" + ) # jingrow.db.get_single_value("Jcloud Settings","monitor_server")) + auth_token = base64.b64encode( + f"jingrow:{monitor_server.get_password('grafana_password')}".encode("utf-8") + ).decode("utf-8") + # keyword = f'{self.get_keyword_based_on_instance_type()}%3D%22{self.instance.replace(" ","%20")}%22' + keyword = f'{self.get_keyword_based_on_instance_type()}="{"erpdb.innoterra.co.in" or self.instance}"' + print(keyword) + res = requests.get( + f"https://monitor.jingrow.cloud/alertmanager/api/v2/alerts/groups?filter={keyword}&silenced=false&active=true", + headers={"Authorization": f"Basic {auth_token}"}, + ) + if res.status_code == 200: + alerts = res.json() + self.total_alerts = len(alerts) + self.alert_previews = json.dumps(alerts, indent=2) + self.save() + else: + jingrow.throw("Unable to fetch alerts from Alertmanager") + + @jingrow.whitelist() + def create_new_silence(self): + monitor_server = jingrow.get_pg( + "Monitor Server", "monitor.athul.fc.jingrow.dev" + ) # jingrow.db.get_single_value("Jcloud Settings","monitor_server")) + auth_token = base64.b64encode( + f"jingrow:{monitor_server.get_password('grafana_password')}".encode("utf-8") + ).decode("utf-8") + data = { + "matchers": [ + { + "name": self.get_keyword_based_on_instance_type(), + "value": "erpdb.innoterra.co.in", + "isRegex": False, + "isEqual": True, + } + ], + "startsAt": get_datetime(self.from_time).astimezone(timezone.utc).isoformat(), + "endsAt": get_datetime(self.to_time).astimezone(timezone.utc).isoformat(), + "createdBy": self.owner, + "comment": self.alert_comment, + "id": None, + } + res = requests.post( + "https://monitor.jingrow.cloud/alertmanager/api/v2/silences", + headers={"Authorization": f"Basic {auth_token}"}, + json=data, + ) + if res.status_code == 200: + alerts = res.json() + self.status = "Active" + self.silence_id = alerts["silenceID"] + self.save() + else: + jingrow.throw("Unable to fetch alerts from Alertmanager") + + +def check_silenced_alerts(): + """ + Checks for silenced alerts in Alertmanager and updates the status of the silenced alert in Jcloud + Runs every hour + """ + silences = jingrow.get_all( + "Silenced Alert", fields=["silence_id"], filters={"status": "Active"} + ) + monitor_server = jingrow.get_pg( + "Monitor Server", "monitor.athul.fc.jingrow.dev" + ) # jingrow.db.get_single_value("Jcloud Settings","monitor_server")) + auth_token = base64.b64encode( + f"jingrow:{monitor_server.get_password('grafana_password')}".encode("utf-8") + ).decode("utf-8") + req = requests.get( + "https://monitor.jingrow.cloud/alertmanager/api/v2/silences?silenced=false&inhibited=false&active=true", + headers={"Authorization": f"Basic {auth_token}"}, + ) + if req.status_code == 200: + silences_from_alertmanager = req.json() + s_ids = [x["silence_id"] for x in silences] + for silence in silences_from_alertmanager: + if not silence["status"]["state"] == "active" and silence["id"] in s_ids: + jingrow.db.set_value( + "Silenced Alert", {"silence_id": silence["id"]}, "status", "Expired" + ) + jingrow.db.commit() + else: + log_error("Failed to fetch silences from Alertmanager") diff --git a/jcloud/jcloud/pagetype/silenced_alert/test_silenced_alert.py b/jcloud/jcloud/pagetype/silenced_alert/test_silenced_alert.py new file mode 100644 index 0000000..19e8dac --- /dev/null +++ b/jcloud/jcloud/pagetype/silenced_alert/test_silenced_alert.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestSilencedAlert(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/site/__init__.py b/jcloud/jcloud/pagetype/site/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site/archive.py b/jcloud/jcloud/pagetype/site/archive.py new file mode 100644 index 0000000..c06cbf9 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/archive.py @@ -0,0 +1,82 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow + +from jcloud.utils import log_error + + +def archive_suspended_trial_sites(): + ARCHIVE_AFTER_DAYS = 21 + ARCHIVE_AT_ONCE = 10 + + filters = [ + ["status", "=", "Suspended"], + ["trial_end_date", "is", "set"], + [ + "trial_end_date", + "<", + jingrow.utils.add_to_date(None, days=-(ARCHIVE_AFTER_DAYS + 1)), + ], # Don't look at sites that are unlikely to be archived + ] + + sites = jingrow.get_all( + "Site", + filters=filters, + fields=["name", "team", "trial_end_date"], + order_by="creation asc", + ) + + archived_now = 0 + for site in sites: + if archived_now > ARCHIVE_AT_ONCE: + break + try: + suspension_date = jingrow.get_all( + "Site Activity", + filters={"site": site.name, "action": "Suspend Site"}, + pluck="creation", + order_by="creation desc", + limit=1, + )[0] + suspended_days = jingrow.utils.date_diff(jingrow.utils.today(), suspension_date) + + if suspended_days > ARCHIVE_AFTER_DAYS: + site = jingrow.get_pg("Site", site.name, for_update=True) + site.archive(reason="Archive suspended trial site", skip_reload=True) + archived_now = archived_now + 1 + except Exception: + log_error("Suspended Site Archive Error") + + +def delete_offsite_backups_for_archived_sites(): + archived_sites = jingrow.db.sql( + """ + SELECT + backup.site, + COUNT(*) as offsite_backups + FROM + `tabSite Backup` backup + LEFT JOIN + `tabSite` site + ON + backup.site = site.name + WHERE + site.status = "Archived" AND + backup.files_availability = "Available" AND + backup.offsite = True + GROUP BY + backup.site + HAVING + offsite_backups > 1 + ORDER BY + offsite_backups DESC + """, + as_dict=True, + ) + for site in archived_sites: + try: + jingrow.get_pg("Site", site.site).delete_offsite_backups() + jingrow.db.commit() + except Exception: + jingrow.db.rollback() diff --git a/jcloud/jcloud/pagetype/site/backups.py b/jcloud/jcloud/pagetype/site/backups.py new file mode 100644 index 0000000..d4c4a77 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/backups.py @@ -0,0 +1,325 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import functools +from collections import deque +from datetime import datetime, timedelta +from functools import wraps +from itertools import groupby +from time import time +from typing import Dict, List + +import jingrow +import pytz + +from jcloud.jcloud.pagetype.jcloud_settings.jcloud_settings import JcloudSettings +from jcloud.jcloud.pagetype.remote_file.remote_file import delete_remote_backup_objects +from jcloud.jcloud.pagetype.site.site import Site +from jcloud.jcloud.pagetype.site_backup.site_backup import SiteBackup +from jcloud.jcloud.pagetype.subscription.subscription import Subscription +from jcloud.utils import log_error + + +def timing(f): + @wraps(f) + def wrap(*args, **kw): + ts = time() + result = f(*args, **kw) + te = time() + print(f"Took {te-ts}s") + return result + + return wrap + + +class BackupRotationScheme: + """ + Represents backup rotation scheme for maintaining offsite backups. + + Rotation is maintained by controlled deletion of daily backups. + """ + + def _expire_and_get_remote_files( + self, offsite_backups: List[Dict[str, str]] + ) -> List[str]: + """Mark backup as unavailable and return remote files to delete.""" + remote_files_to_delete = [] + for backup in offsite_backups: + remote_files = jingrow.db.get_value( + "Site Backup", + backup, + ["remote_database_file", "remote_private_file", "remote_public_file"], + ) + remote_files_to_delete.extend(remote_files) + jingrow.db.set_value("Site Backup", backup, "files_availability", "Unavailable") + return remote_files_to_delete + + def expire_local_backups(self): + """Mark local backups deleted by FF as unavailable.""" + sites_with_config = jingrow.db.sql( + """ + SELECT tabSite.name, tabBench.config + FROM tabSite + JOIN tabBench ON tabSite.bench=tabBench.name + WHERE tabSite.status != "Archived" + ORDER BY tabBench.config + """, + as_dict=True, + ) + for d in sites_with_config: + d.config = self._get_expiry(d.config) + + for config, site_confs in groupby(sites_with_config, lambda d: d.config): + sites = [] + for site_conf in list(site_confs): + sites.append(site_conf.name) + self._expire_backups_of_site_in_bench(sites, config) + + @functools.lru_cache(maxsize=128) + def _get_expiry(self, config: str): + return jingrow.parse_json(config or "{}").keep_backups_for_hours or 24 + + def _expire_backups_of_site_in_bench(self, sites: List[str], expiry: int): + if sites: + jingrow.db.set_value( + "Site Backup", + { + "site": ("in", sites), + "status": "Success", + "files_availability": "Available", + "offsite": False, + "creation": ("<", jingrow.utils.add_to_date(None, hours=-expiry)), + }, + "files_availability", + "Unavailable", + ) + + def expire_offsite_backups(self) -> List[str]: + """Expire and return list of offsite backups to delete.""" + raise NotImplementedError + + def cleanup_offsite(self): + """Expire backups according to the rotation scheme.""" + expired_remote_files = self.expire_offsite_backups() + delete_remote_backup_objects(expired_remote_files) + + +class FIFO(BackupRotationScheme): + """Represents First-in-First-out backup rotation scheme.""" + + def __init__(self): + self.offsite_backups_count = ( + jingrow.db.get_single_value("Jcloud Settings", "offsite_backups_count") or 30 + ) + + def expire_offsite_backups(self) -> List[str]: + offsite_expiry = self.offsite_backups_count + to_be_expired_backups = [] + sites = jingrow.get_all("Site", {"status": ("!=", "Archived")}, pluck="name") + for site in sites: + to_be_expired_backups += jingrow.get_all( + "Site Backup", + filters={ + "site": site, + "status": "Success", + "files_availability": "Available", + "offsite": True, + }, + order_by="creation desc", + )[offsite_expiry:] + return self._expire_and_get_remote_files(to_be_expired_backups) + + +class GFS(BackupRotationScheme): + """ + Represents Grandfather-father-son backup rotation scheme. + + Daily backups are kept for specified number of days. + Weekly backups are kept for 4 weeks. + Monthly backups are kept for a year. + Yearly backups are kept for a decade. + """ + + daily = 7 # no. of daily backups to keep + weekly_backup_day = 1 # days of the week (1-7) (SUN-SAT) + monthly_backup_day = 1 # days of the month (1-31) + yearly_backup_day = 1 # days of the year (1-366) + + def expire_offsite_backups(self) -> List[str]: + today = jingrow.utils.getdate() + oldest_daily = today - timedelta(days=self.daily) + oldest_weekly = today - timedelta(weeks=4) + oldest_monthly = today - timedelta(days=366) + oldest_yearly = today - timedelta(days=3653) + to_be_expired_backups = jingrow.db.sql( + f""" + SELECT name from `tabSite Backup` + WHERE + site in (select name from tabSite where status != "Archived") and + status="Success" and + files_availability="Available" and + offsite=True and + creation < "{oldest_daily}" and + (DAYOFWEEK(creation) != {self.weekly_backup_day} or creation < "{oldest_weekly}") and + (DAYOFMONTH(creation) != {self.monthly_backup_day} or creation < "{oldest_monthly}") and + (DAYOFYEAR(creation) != {self.yearly_backup_day} or creation < "{oldest_yearly}") + """, + as_dict=True, + ) + # XXX: DAYOFWEEK in sql gives 1-7 for SUN-SAT in sql + # datetime.weekday() in python gives 0-6 for MON-SUN + # datetime.isoweekday() in python gives 1-7 for MON-SUN + + return self._expire_and_get_remote_files(to_be_expired_backups) + + +class ScheduledBackupJob: + """Represents Scheduled Backup Job that takes backup for all active sites.""" + + def is_backup_hour(self, hour: int) -> bool: + """ + hour: 0-23 + + Return true if backup is supposed to be taken at this hour + """ + # return (hour + self.offset) % self.interval == 0 + return True + + def __init__(self): + self.interval: int = ( + jingrow.get_cached_value("Jcloud Settings", "Jcloud Settings", "backup_interval") or 6 + ) + self.offset: int = ( + jingrow.get_cached_value("Jcloud Settings", "Jcloud Settings", "backup_offset") or 0 + ) + self.limit = ( + jingrow.get_cached_value("Jcloud Settings", "Jcloud Settings", "backup_limit") or 100 + ) + + self.offsite_setup = JcloudSettings.is_offsite_setup() + self.server_time = datetime.now() + self.sites = Site.get_sites_for_backup(self.interval) + self.sites_without_offsite = Subscription.get_sites_without_offsite_backups() + + def take_offsite(self, site: jingrow._dict, day: datetime.date) -> bool: + return ( + self.offsite_setup + and site.name not in self.sites_without_offsite + and not SiteBackup.offsite_backup_exists(site.name, day) + ) + + def get_site_time(self, site: Dict[str, str]) -> datetime: + timezone = site.timezone or "Asia/Kolkata" + site_timezone = pytz.timezone(timezone) + return self.server_time.astimezone(site_timezone) + + class ModifiableCycle: + def __init__(self, items=()): + self.deque = deque(items) + + def __iter__(self): + return self + + def __next__(self): + if not self.deque: + raise StopIteration + item = self.deque.popleft() + self.deque.append(item) + return item + + def delete_next(self): + self.deque.popleft() + + def delete_prev(self): + self.deque.pop() + + def start(self): + """Schedule backups for all Active sites based on their local timezones. Also trigger offsite backups once a day.""" + sites_by_server = [] + for server, sites in groupby(self.sites, lambda d: d.server): + sites_by_server.append((server, iter(list(sites)))) + + sites_by_server_cycle = self.ModifiableCycle(sites_by_server) + self._take_backups_in_round_robin(sites_by_server_cycle) + + def _take_backups_in_round_robin(self, sites_by_server_cycle: ModifiableCycle): + limit = min(len(self.sites), self.limit) + for server, sites in sites_by_server_cycle: + try: + site = next(sites) + while not self.backup(site): + site = next(sites) + except StopIteration: + sites_by_server_cycle.delete_prev() # no more sites in this server + continue + limit -= 1 + if limit <= 0: + break + + def backup(self, site) -> bool: + """Return true if backup was taken.""" + try: + site_time = self.get_site_time(site) + if self.is_backup_hour(site_time.hour): + today = jingrow.utils.getdate() + + offsite = self.take_offsite(site, today) + with_files = offsite or not SiteBackup.file_backup_exists(site.name, today) + + jingrow.get_pg("Site", site.name).backup(with_files=with_files, offsite=offsite) + jingrow.db.commit() + return True + return False + + except Exception: + log_error("Site Backup Exception", site=site) + jingrow.db.rollback() + + +def schedule_for_sites_with_backup_time(): + """ + Schedule backups for sites with backup time. + + Run this hourly only + """ + sites = Site.get_sites_with_backup_time() + now = jingrow.utils.now_datetime() + for site in sites: + if now.hour != site.backup_time.total_seconds() // 3600: + continue + site_pg = jingrow.get_pg("Site", site.name) + site_pg.backup(with_files=True, offsite=True) + jingrow.db.commit() + + +def schedule(): + scheduled_backup_job = ScheduledBackupJob() + scheduled_backup_job.start() + + +def cleanup_offsite(): + """Delete expired (based on policy) offsite backups and mark em as Unavailable.""" + jingrow.enqueue( + "jcloud.jcloud.pagetype.site.backups._cleanup_offsite", queue="long", timeout=3600 + ) + + +def _cleanup_offsite(): + scheme = ( + jingrow.db.get_single_value("Jcloud Settings", "backup_rotation_scheme") or "FIFO" + ) + if scheme == "FIFO": + rotation = FIFO() + elif scheme == "Grandfather-father-son": + rotation = GFS() + rotation.cleanup_offsite() + jingrow.db.commit() + + +def cleanup_local(): + """Mark expired onsite backups as Unavailable.""" + brs = BackupRotationScheme() + brs.expire_local_backups() + jingrow.db.commit() diff --git a/jcloud/jcloud/pagetype/site/jerp_site.py b/jcloud/jcloud/pagetype/site/jerp_site.py new file mode 100644 index 0000000..fc02982 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/jerp_site.py @@ -0,0 +1,108 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + +from jcloud.jcloud.pagetype.account_request.account_request import AccountRequest +from jcloud.jcloud.pagetype.jerp_consultant.jerp_consultant import JERPConsultant +from jcloud.jcloud.pagetype.site.site import Site + + +class JERPSite(Site): + def __init__(self, site=None, account_request: AccountRequest = None): + if site: + super().__init__("Site", site) + elif account_request: + super().__init__( + { + "pagetype": "Site", + "subdomain": account_request.subdomain, + "domain": get_jerp_domain(), + "bench": get_jerp_bench(), + "apps": [{"app": app} for app in get_jerp_apps()], + "team": "Administrator", + "account_request": account_request.name, + "subscription_plan": get_jerp_plan(), + "jerp_consultant": JERPConsultant.get_one_for_country( + account_request.country + ), + "trial_end_date": jingrow.utils.add_days(None, 14), + } + ) + + def rename_pooled_site(self, account_request): + self.subdomain = account_request.subdomain + self.is_standby = False + self.account_request = account_request.name + self.trial_end_date = jingrow.utils.add_days(None, 14) + plan = get_jerp_plan() + self._update_configuration(self.get_plan_config(plan), save=False) + self.jerp_consultant = JERPConsultant.get_one_for_country( + account_request.country + ) + self.save(ignore_permissions=True) + self.create_subscription(plan) + + def can_change_plan(self): + return True + + def can_create_site(self): + return True + + +def get_jerp_bench(): + domain = get_jerp_domain() + cluster = get_jerp_cluster() + + proxy_servers = jingrow.get_all( + "Proxy Server", + [ + ["status", "=", "Active"], + ["cluster", "=", cluster], + ["Proxy Server Domain", "domain", "=", domain], + ], + pluck="name", + ) + release_group = get_jerp_group() + query = """ + SELECT + bench.name + FROM + tabBench bench + LEFT JOIN + tabServer server + ON + bench.server = server.name + WHERE + server.proxy_server in %s AND bench.status = "Active" AND bench.group = %s + ORDER BY + server.use_for_new_sites DESC, bench.creation DESC + LIMIT 1 + """ + return jingrow.db.sql(query, [proxy_servers, release_group], as_dict=True)[0].name + + +def get_jerp_domain(): + return jingrow.db.get_single_value("Jcloud Settings", "jerp_domain") + + +def get_jerp_plan(): + return jingrow.db.get_single_value("Jcloud Settings", "jerp_plan") + + +def get_jerp_group(): + return jingrow.db.get_single_value("Jcloud Settings", "jerp_group") + + +def get_jerp_cluster(): + return jingrow.db.get_single_value("Jcloud Settings", "jerp_cluster") + + +def get_jerp_apps(): + return [app.app for app in jingrow.get_single("Jcloud Settings").jerp_apps] + + +def process_setup_jerp_site_job_update(job): + if job.status == "Success": + jingrow.db.set_value("Site", job.site, "is_jerp_setup", True) diff --git a/jcloud/jcloud/pagetype/site/patches/set_database_access_credentials.py b/jcloud/jcloud/pagetype/site/patches/set_database_access_credentials.py new file mode 100644 index 0000000..b53c108 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/patches/set_database_access_credentials.py @@ -0,0 +1,29 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt +import jingrow +from tqdm import tqdm + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "site") + sites = jingrow.get_all( + "Site", + { + "status": ("!=", "Archived"), + "is_database_access_enabled": True, + "database_access_user": ("is", "not set"), + }, + ignore_ifnull=True, + ) + for site in tqdm(sites): + try: + site = jingrow.get_pg("Site", site.name) + config = site.fetch_info()["config"] + site.database_access_user = config["db_name"] + site.database_access_password = config["db_password"] + site.database_access_mode = "read_write" + site.save() + jingrow.db.commit() + except Exception as e: + jingrow.db.rollback() + print(f"Couldn't set DB credentials for site {site.name}: {e}") diff --git a/jcloud/jcloud/pagetype/site/patches/set_plan_in_site.py b/jcloud/jcloud/pagetype/site/patches/set_plan_in_site.py new file mode 100644 index 0000000..ae70f79 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/patches/set_plan_in_site.py @@ -0,0 +1,35 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +import jingrow + + +def execute(): + # set plan in all non-archived sites that have active subscription + jingrow.reload_pagetype("Site") + + jingrow.db.sql( + """ + UPDATE + tabSite s + LEFT JOIN tabSubscription p ON s.name = p.document_name + AND p.document_type = 'Site' + SET + s.plan = p.plan + WHERE + s.status != 'Archived' + and p.enabled = 1 + """ + ) + # set plan to '' in all sites that have disabled subscription + jingrow.db.sql( + """ + UPDATE + tabSite s + LEFT JOIN tabSubscription p ON s.name = p.document_name + AND p.document_type = 'Site' + SET + s.plan = '' + WHERE + p.enabled = 0 + """ + ) diff --git a/jcloud/jcloud/pagetype/site/patches/set_plan_limit_in_site_config.py b/jcloud/jcloud/pagetype/site/patches/set_plan_limit_in_site_config.py new file mode 100644 index 0000000..6e8509d --- /dev/null +++ b/jcloud/jcloud/pagetype/site/patches/set_plan_limit_in_site_config.py @@ -0,0 +1,30 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +import jingrow +from tqdm import tqdm + + +def execute(): + key_name = "plan_limit" + if not jingrow.db.exists("Site Config Key", {"key": key_name}): + jingrow.get_pg( + { + "pagetype": "Site Config Key", + "key": key_name, + "type": "JSON", + "internal": True, + } + ).insert(ignore_permissions=True) + + non_archived_sites = jingrow.get_all( + "Site", filters={"status": ("!=", "Archived")}, pluck="name" + ) + + for site_name in tqdm(non_archived_sites): + try: + site = jingrow.get_pg("Site", site_name, for_update=True) + site.update_site_config(site.get_plan_config()) + jingrow.db.commit() + except Exception as e: + print(f"Couldn't set plan limit for site {site_name}: {e}") + jingrow.db.rollback() diff --git a/jcloud/jcloud/pagetype/site/patches/set_status_wizard_check_next_retry_datetime_in_site.py b/jcloud/jcloud/pagetype/site/patches/set_status_wizard_check_next_retry_datetime_in_site.py new file mode 100644 index 0000000..7aaebc7 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/patches/set_status_wizard_check_next_retry_datetime_in_site.py @@ -0,0 +1,23 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +import jingrow + + +def execute(): + jingrow.reload_pagetype("Site") + # set setup_wizard_status_check_next_retry_on to current datetime + # in saas sites that has setup_wizard_complete = false + # and setup_wizard_status_check_next_retry_on is null + + jingrow.db.sql( + """ + UPDATE + tabSite s + SET + s.setup_wizard_status_check_next_retry_on = NOW() + WHERE + s.setup_wizard_complete = 0 + and s.setup_wizard_status_check_next_retry_on is null + and s.status != 'Archived' + """ + ) diff --git a/jcloud/jcloud/pagetype/site/pool.py b/jcloud/jcloud/pagetype/site/pool.py new file mode 100644 index 0000000..7669508 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/pool.py @@ -0,0 +1,74 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.naming import make_autoname + +from jcloud.jcloud.pagetype.site.jerp_site import ( + get_jerp_apps, + get_jerp_bench, + get_jerp_domain, +) +from jcloud.utils import log_error + + +class SitePool: + def __init__(self): + self.site_count = jingrow.db.count("Site", filters={"is_standby": True, "status": "Active"}) + self.pool_size = jingrow.db.get_single_value("Jcloud Settings", "standby_pool_size") + self.queue_size = jingrow.db.get_single_value("Jcloud Settings", "standby_queue_size") + + def create(self): + pooling_enabled = jingrow.db.get_single_value("Jcloud Settings", "enable_site_pooling") + if pooling_enabled and self.site_count < self.pool_size: + sites_created = 0 + while sites_created < self.queue_size: + self.create_one() + sites_created += 1 + + def create_one(self): + try: + domain = get_jerp_domain() + bench = get_jerp_bench() + subdomain = self.get_subdomain() + apps = get_jerp_apps() + jingrow.get_pg( + { + "pagetype": "Site", + "subdomain": subdomain, + "domain": domain, + "is_standby": True, + "team": "Administrator", + "bench": bench, + "apps": [{"app": app} for app in apps], + } + ).insert() + except Exception: + log_error( + "Pool Site Creation Error", + domain=domain, + subdomain=subdomain, + bench=bench, + apps=apps, + ) + raise + + def get_subdomain(self): + return make_autoname("standby-.########") + + def get(self): + return jingrow.db.get_value( + "Site", + {"is_standby": True, "status": "Active", "standby_for": ("=", "")}, + "name", + order_by="creation", + ) + + +def create(): + SitePool().create() + + +def get(): + return SitePool().get() diff --git a/jcloud/jcloud/pagetype/site/saas_pool.py b/jcloud/jcloud/pagetype/site/saas_pool.py new file mode 100644 index 0000000..8f99079 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/saas_pool.py @@ -0,0 +1,131 @@ +import jingrow +from jingrow.model.naming import make_autoname + +from jcloud.jcloud.pagetype.site.saas_site import ( + create_app_subscriptions, + get_pool_apps, + get_saas_apps, + get_saas_bench, + get_saas_domain, + set_site_in_subscription_docs, +) +from jcloud.utils import log_error + + +class SaasSitePool: + def __init__(self, app): + self.app = app + self.site_count = jingrow.db.count( + "Site", + filters={ + "is_standby": True, + "status": ["in", ["Active", "Pending", "Installing", "Updating"]], + "standby_for": self.app, + "hybrid_saas_pool": "", + }, + ) + self.saas_settings = jingrow.get_pg("Saas Settings", app) + + def create(self): + if self.saas_settings.enable_pooling: + if self.site_count < self.saas_settings.standby_pool_size: + sites_created = 0 + while sites_created < self.saas_settings.standby_queue_size: + self.create_one() + jingrow.db.commit() + sites_created += 1 + + if jingrow.db.get_value("Saas Settings", self.app, "enable_hybrid_pools"): + self.create_hybrid_pool_sites() + + def create_one(self, pool_name: str = ""): + bench, apps, subdomain, domain = None, None, None, None + try: + domain = get_saas_domain(self.app) + bench = get_saas_bench(self.app) + subdomain = self.get_subdomain() + apps = get_saas_apps(self.app) + if pool_name: + apps.extend(get_pool_apps(pool_name)) + site = jingrow.get_pg( + { + "pagetype": "Site", + "subdomain": subdomain, + "domain": domain, + "is_standby": True, + "standby_for": self.app, + "hybrid_saas_pool": pool_name, + "team": jingrow.get_value("Team", {"user": "Administrator"}, "name"), + "bench": bench, + "apps": [{"app": app} for app in apps], + } + ) + subscription_docs = create_app_subscriptions(site, self.app) + site.insert() + set_site_in_subscription_docs(subscription_docs, site.name) + except Exception: + log_error( + "Pool Site Creation Error", + domain=domain, + subdomain=subdomain, + bench=bench, + apps=apps, + ) + raise + + def create_hybrid_pool_sites(self): + # create a Site according to Site Rules child table in each Hybrid Saas Pool + for pool_name in jingrow.get_all("Hybrid Saas Pool", {"app": self.app}, pluck="name"): + # only has app rules for now, will add site config and other rules later + hybrid_standby_count = jingrow.db.count( + "Site", + { + "is_standby": 1, + "standby_for": self.app, + "hybrid_saas_pool": pool_name, + "status": ("in", ["Active", "Pending", "Installing", "Updating"]), + }, + ) + + if hybrid_standby_count > self.saas_settings.standby_pool_size: + continue + + sites_created = 0 + while sites_created < self.saas_settings.standby_queue_size: + self.create_one(pool_name) + jingrow.db.commit() + sites_created += 1 + + def get_subdomain(self): + return make_autoname("standby-.########") + + def get(self, hybrid_saas_pool): + filters = { + "is_standby": True, + "standby_for": self.app, + "status": "Active", + } + + if hybrid_saas_pool: + filters.update({"hybrid_saas_pool": hybrid_saas_pool}) + else: + filters.update({"hybrid_saas_pool": ("is", "not set")}) + + sites = jingrow.get_all("Site", filters, pluck="name", order_by="creation", limit=1) + + return sites[0] if sites else sites + + +def create(): + saas_apps = jingrow.get_all("Saas Settings", {"enable_pooling": 1}, pluck="name") + for app in saas_apps: + try: + SaasSitePool(app).create() + jingrow.db.commit() + except Exception: + log_error("Pool Error", app=app) + jingrow.db.rollback() + + +def get(app, hybrid_saas_pool=""): + return SaasSitePool(app).get(hybrid_saas_pool) diff --git a/jcloud/jcloud/pagetype/site/saas_site.py b/jcloud/jcloud/pagetype/site/saas_site.py new file mode 100644 index 0000000..4afc8ae --- /dev/null +++ b/jcloud/jcloud/pagetype/site/saas_site.py @@ -0,0 +1,274 @@ +import contextlib +import json + +import jingrow + +from jcloud.jcloud.pagetype.account_request.account_request import AccountRequest +from jcloud.jcloud.pagetype.site.site import Site + + +class SaasSite(Site): + def __init__( + self, + site=None, + app=None, + account_request: AccountRequest = None, + hybrid_saas_pool=None, + subdomain=None, + ): + self.app = app + if site: + super().__init__("Site", site) + else: + ar_name = account_request.name if account_request else "" + subdomain = account_request.subdomain if account_request else subdomain + apps = get_saas_apps(self.app) + if hybrid_saas_pool: + # set pool apps + pool_apps = get_pool_apps(hybrid_saas_pool) + apps.extend(pool_apps) + + super().__init__( + { + "pagetype": "Site", + "subdomain": subdomain, + "domain": get_saas_domain(self.app), + "bench": get_saas_bench(self.app), + "apps": [{"app": app} for app in apps], + "team": jingrow.get_value("Team", {"user": "Administrator"}, "name"), + "standby_for": self.app, + "hybrid_saas_pool": hybrid_saas_pool, + "account_request": ar_name, + "subscription_plan": get_saas_site_plan(self.app), + "trial_end_date": jingrow.utils.add_days(None, 14), + } + ) + + self.subscription_docs = create_app_subscriptions(site=self, app=self.app) + + def rename_pooled_site(self, account_request=None, subdomain=None): + self.subdomain = account_request.subdomain if account_request else subdomain + self.is_standby = False + self.account_request = account_request.name if account_request else "" + self.trial_end_date = jingrow.utils.add_days(None, 14) + plan = get_saas_site_plan(self.app) + self._update_configuration(self.get_plan_config(plan), save=False) + subscription_config = {} + for row in self.configuration: + if row.key == "subscription": + with contextlib.suppress(json.JSONDecodeError): + subscription_config = json.loads(row.value) + subscription_config.update( + { + "trial_end_date": self.trial_end_date.strftime("%Y-%m-%d"), + } + ) + self._update_configuration({"subscription": subscription_config}, save=False) + self.save(ignore_permissions=True) + self.create_subscription(plan) + self.reload() + + return self + + def can_change_plan(self): + return True + + def can_create_site(self): + return True + + +def get_saas_bench(app): + """ + Select server with least cpu consumption + """ + domain = get_saas_domain(app) + + proxy_servers = jingrow.get_all( + "Proxy Server", + [ + ["status", "=", "Active"], + ["Proxy Server Domain", "domain", "=", domain], + ], + pluck="name", + ) + release_group = get_saas_group(app) + cluster = get_saas_cluster(app) + bench_servers = jingrow.db.sql( + """ + SELECT + bench.name, bench.server + FROM + tabBench bench + LEFT JOIN + tabServer server + ON + bench.server = server.name + WHERE + server.proxy_server in %s AND server.cluster = %s AND bench.status = "Active" AND bench.group = %s + ORDER BY + server.use_for_new_sites DESC, bench.creation DESC + """, + [proxy_servers, cluster, release_group], + as_dict=True, + ) + + signup_servers = tuple([bs["server"] for bs in bench_servers]) + signup_server_sub_str = ( + tuple(signup_servers) if len(signup_servers) > 1 else f"('{signup_servers[0]}')" + ) + lowest_cpu_server = jingrow.db.sql( + f""" + SELECT + site.server, + SUM( + CASE WHEN (site.status != "Archived" and site.status != "Suspended") or NOT NULL + THEN plan.cpu_time_per_day ELSE 0 END + ) as cpu_time_per_month + FROM + tabSite site + LEFT JOIN + `tabSite Plan` plan + ON + site.plan = plan.name + WHERE + site.server in {signup_server_sub_str} + GROUP by + site.server + ORDER by + cpu_time_per_month + LIMIT 1""", + as_dict=True, + ) + lowest_cpu_server = ( + lowest_cpu_server[0].server if lowest_cpu_server else signup_servers[0] + ) + + for bs in bench_servers: + if bs["server"] == lowest_cpu_server: + return bs["name"] + + +def get_saas_plan(app): + return jingrow.db.get_value("Saas Settings", app, "plan") + + +def get_saas_site_plan(app): + return jingrow.db.get_value("Saas Settings", app, "site_plan") + + +def get_saas_domain(app): + return jingrow.db.get_value("Saas Settings", app, "domain") + + +def get_saas_cluster(app): + return jingrow.db.get_value("Saas Settings", app, "cluster") + + +def get_saas_apps(app): + return [_app["app"] for _app in jingrow.get_pg("Saas Settings", app).as_dict()["apps"]] + + +def get_saas_group(app): + return jingrow.db.get_value("Saas Settings", app, "group") + + +def get_pool_apps(pool_name): + pool_apps = [] + for rule in jingrow.get_pg("Hybrid Saas Pool", pool_name).as_dict()["site_rules"]: + if rule.rule_type == "App": + pool_apps.append(rule.app) + + return pool_apps + + +def get_default_team_for_app(app): + return jingrow.db.get_value("Saas Settings", app, "default_team") + + +# Saas Update site config utils + + +def create_app_subscriptions(site, app): + marketplace_apps = ( + get_saas_apps(app) + if jingrow.db.get_value("Saas Settings", app, "multi_subscription") + else [app] + ) + + # create subscriptions + subscription_docs, custom_saas_config = get_app_subscriptions(marketplace_apps, app) + if site.trial_end_date: + # set trial end date in site config + subscription_saas_config: dict = custom_saas_config.get("subscription", {}) + subscription_saas_config.update( + { + "trial_end_date": site.trial_end_date.strftime("%Y-%m-%d"), + } + ) + custom_saas_config["subscription"] = subscription_saas_config + + # set site config + site_config = {f"sk_{s.document_name}": s.secret_key for s in subscription_docs} + site_config.update(custom_saas_config) + site._update_configuration(site_config, save=False) + + return subscription_docs + + +def get_app_subscriptions(apps=None, standby_for=None): + """ + Create Marketplace App Subscription docs for all the apps that are installed + and set subscription keys in site config + """ + subscriptions = [] + custom_saas_config = {} + secret_key = "" + + for app in apps: + free_plan = jingrow.get_all( + "Marketplace App Plan", + {"enabled": 1, "price_usd": ("<=", 0), "app": app}, + pluck="name", + ) + if free_plan: + new_subscription = jingrow.get_pg( + { + "pagetype": "Subscription", + "document_type": "Marketplace App", + "document_name": app, + "plan_type": "Marketplace App Plan", + "plan": get_saas_plan(app) + if jingrow.db.exists("Saas Settings", app) + else free_plan[0], + "enabled": 0, + "team": jingrow.get_value("Team", {"user": "Administrator"}, "name"), + } + ).insert(ignore_permissions=True) + + subscriptions.append(new_subscription) + config = jingrow.db.get_value("Marketplace App", app, "site_config") + config = json.loads(config) if config else {} + custom_saas_config.update(config) + + if app == standby_for: + secret_key = new_subscription.secret_key + + if standby_for in jingrow.get_all( + "Saas Settings", {"billing_type": "prepaid"}, pluck="name" + ): + custom_saas_config.update( + { + "subscription": {"secret_key": secret_key}, + "app_include_js": [ + jingrow.db.get_single_value("Jcloud Settings", "app_include_script") + ], + } + ) + + return subscriptions, custom_saas_config + + +def set_site_in_subscription_docs(subscription_docs, site): + for pg in subscription_docs: + pg.site = site + pg.save(ignore_permissions=True) diff --git a/jcloud/jcloud/pagetype/site/site.js b/jcloud/jcloud/pagetype/site/site.js new file mode 100644 index 0000000..a7fa7e9 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/site.js @@ -0,0 +1,438 @@ +// Copyright (c) 2019, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site', { + onload: function (frm) { + frm.set_query('bench', function () { + return { + filters: { + server: frm.pg.server, + status: 'Active', + }, + }; + }); + frm.set_query('host_name', () => { + return { + filters: { + site: frm.pg.name, + status: 'Active', + }, + }; + }); + }, + refresh: async function (frm) { + frm.dashboard.set_headline_alert( + `
+
+
CPU Usage: ${frm.pg.current_cpu_usage}%
+
Database Usage: ${frm.pg.current_database_usage}%
+
Disk Usage: ${frm.pg.current_disk_usage}%
+
+
`, + ); + frm.add_web_link(`https://${frm.pg.name}`, __('Visit Site')); + frm.add_web_link(`/dashboard/sites/${frm.pg.name}`, __('Visit Dashboard')); + + let site = frm.get_pg(); + let account = await jingrow + .call({ + method: 'jcloud.api.account.get', + }) + .then((resp) => resp.message); + + if (site.status == 'Active') { + frm.add_custom_button( + __('Login as Adminstrator'), + () => { + if (account) { + if (site.team === account.team.name) { + login_as_admin(site.name); + } else { + new jingrow.ui.Dialog({ + title: 'Login as Adminstrator', + fields: [ + { + label: 'Please enter reason for this login.', + fieldname: 'reason', + fieldtype: 'Small Text', + }, + ], + primary_action_label: 'Login', + primary_action(values) { + if (values) { + let reason = values.reason; + console.log(reason); + login_as_admin(site.name, reason); + } else { + jingrow.throw(__('Reason field should not be empty')); + } + this.hide(); + }, + }).show(); + } + } else { + jingrow.throw( + __( + "Couldn't retrieve account. Check Error Log for more information", + ), + ); + } + }, + __('Actions'), + ); + } + + [ + [__('Backup'), 'backup'], + [__('Physical Backup'), 'physical_backup'], + [__('Sync Info'), 'sync_info'], + ].forEach(([label, method]) => { + frm.add_custom_button( + label, + () => { + frm.call(method).then((r) => frm.refresh()); + }, + __('Actions'), + ); + }); + [ + [__('Archive'), 'archive', frm.pg.status !== 'Archived'], + [__('Cleanup after Archive'), 'cleanup_after_archive'], + [__('Sync Apps'), 'sync_apps'], + [__('Migrate'), 'migrate'], + [__('Reinstall'), 'reinstall'], + [__('Restore'), 'restore_site'], + [__('Restore Tables'), 'restore_tables'], + [__('Update'), 'schedule_update'], + [__('Deactivate'), 'deactivate'], + [__('Activate'), 'activate', frm.pg.status !== 'Archived'], + [__('Reset Site Usage'), 'reset_site_usage'], + [__('Clear Cache'), 'clear_site_cache'], + [__('Optimize Tables'), 'optimize_tables'], + [__('Update Site Config'), 'update_site_config'], + [__('Create DNS Record'), 'create_dns_record'], + [__('Run After Migrate Steps'), 'run_after_migrate_steps'], + [__('Retry Rename'), 'retry_rename'], + [ + __('Retry Archive'), + 'retry_archive', + frm.pg.name.includes('.archived'), + ], + [__('Update without Backup'), 'update_without_backup'], + [ + __('Fetch bench from Agent'), + 'fetch_bench_from_agent', + frm.pg.status !== 'Archived', + ], + [ + __('Set status based on Ping'), + 'set_status_based_on_ping', + !['Active', 'Archived', 'Inactive', 'Suspended'].includes( + frm.pg.status, + ), + ], + [__('Show Admin Password'), 'show_admin_password'], + ].forEach(([label, method, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()} this site?`, + () => frm.call(method).then((r) => frm.refresh()), + ); + }, + __('Actions'), + ); + } + }); + + frm.add_custom_button( + __('Force Archive'), + () => { + jingrow.confirm(`Are you sure you want to force drop this site?`, () => + frm.call('archive', { force: true }).then((r) => frm.refresh()), + ); + }, + __('Actions'), + ); + + [ + [__('Suspend'), 'suspend'], + [__('Unsuspend'), 'unsuspend'], + ].forEach(([label, method]) => { + frm.add_custom_button( + label, + () => { + jingrow.prompt( + { + fieldtype: 'Data', + label: 'Reason', + fieldname: 'reason', + reqd: 1, + }, + ({ reason }) => { + frm + .call(method, { + reason, + }) + .then((r) => frm.refresh()); + }, + __('Provide Reason'), + ); + }, + __('Actions'), + ); + }); + frm.toggle_enable(['host_name'], frm.pg.status === 'Active'); + + frm.add_custom_button( + __('Replicate Site'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('New Subdomain for Test Site'), + fields: [ + { + fieldtype: 'Data', + fieldname: 'subdomain', + label: 'New Subdomain', + reqd: 1, + }, + ], + primary_action({ subdomain }) { + jingrow.set_route('List', 'Site Replication', { + site: frm.pg.name, + }); + jingrow.new_pg('Site Replication', { + site: frm.pg.name, + subdomain: subdomain, + }); + }, + }); + dialog.show(); + }, + __('Actions'), + ); + + frm.add_custom_button( + __('Update DNS Record'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Update DNS Record'), + fields: [ + { + fieldtype: 'Data', + fieldname: 'value', + label: 'Value', + description: "Site's CNAME record will point to this value", + reqd: 1, + }, + ], + primary_action(args) { + frm + .call('update_dns_record', { + value: args.value, + }) + .then(() => { + dialog.hide(); + }); + }, + }); + dialog.show(); + }, + __('Dangerous Actions'), + ); + + frm.add_custom_button( + __('Move to Group'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Move to Group'), + fields: [ + { + fieldtype: 'Link', + options: 'Release Group', + label: __('Destination Group'), + fieldname: 'group', + get_query: () => { + return { + filters: [ + ['server', '=', frm.pg.server], + ['name', '!=', frm.pg.group], + ], + }; + }, + }, + { + fieldtype: 'Check', + label: __('Skip Failing Patches'), + fieldname: 'skip_failing_patches', + }, + ], + }); + + dialog.set_primary_action(__('Move Site'), (args) => { + frm + .call('move_to_group', { + group: args.group, + skip_failing_patches: args.skip_failing_patches, + }) + .then((r) => { + dialog.hide(); + frm.refresh(); + }); + }); + + dialog.show(); + }, + __('Actions'), + ); + + frm.add_custom_button( + __('Forcefully Remove Site'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Forcefully Remove Site'), + fields: [ + { + fieldtype: 'Link', + options: 'Bench', + label: __('Bench'), + fieldname: 'bench', + reqd: 1, + get_query: () => { + return { + filters: [ + ['name', '!=', frm.pg.bench], + ['status', '!=', 'Archived'], + ], + }; + }, + }, + { + fieldtype: 'Check', + label: __("I know what I'm doing"), + fieldname: 'confirmation', + reqd: 1, + }, + ], + }); + + dialog.set_primary_action(__('Forcefully Remove Site'), (args) => { + if (!args.confirmation) { + jingrow.throw(__("Please confirm that you know what you're doing")); + } + frm + .call('forcefully_remove_site', { + bench: args.bench, + }) + .then((r) => { + dialog.hide(); + frm.refresh(); + if (r.message.job) { + message = ` +Removing site from **${r.message.bench}**. + +Track progress [here](https://${r.message.server}/agent/jobs/${r.message.job}).`; + jingrow.msgprint(jingrow.markdown(message), 'Removing Site'); + } else { + message = ` +Couldn't remove site from **${r.message.bench}**. +\`\`\` +${r.message.error} +\`\`\``; + jingrow.msgprint( + jingrow.markdown(message), + 'Failed to Remove Site', + ); + } + }); + }); + + dialog.show(); + }, + __('Dangerous Actions'), + ); + + frm.add_custom_button( + __('Forcefully Move Site'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Forcefully Move Site'), + fields: [ + { + fieldtype: 'Link', + options: 'Bench', + label: __('Bench'), + fieldname: 'bench', + reqd: 1, + get_query: () => { + return { + filters: [ + ['name', '!=', frm.pg.bench], + ['status', '!=', 'Archived'], + ], + }; + }, + }, + { + fieldtype: 'Check', + label: __("I know what I'm doing"), + fieldname: 'confirmation', + reqd: 1, + }, + { + fieldtype: 'Check', + label: __('Deactivate'), + fieldname: 'deactivate', + }, + ], + }); + + dialog.set_primary_action(__('Forcefully Move Site'), (args) => { + if (!args.confirmation) { + jingrow.throw(__("Please confirm that you know what you're doing")); + } + frm + .call('move_to_bench', { + bench: args.bench, + deactivate: args.deactivate, + }) + .then(() => { + dialog.hide(); + frm.refresh(); + }); + }); + + dialog.show(); + }, + __('Dangerous Actions'), + ); + }, +}); + +function login_as_admin(site_name, reason = null) { + jingrow + .call({ + method: 'jcloud.api.site.login', + args: { + name: site_name, + reason: reason, + }, + }) + .then( + (res) => { + console.log(site_name, res.message.sid); + if (res) { + window.open( + `https://${site_name}/desk?sid=${res.message.sid}`, + '_blank', + ); + } + }, + (error) => { + console.log(error); + jingrow.throw(__(`An error occurred!!`)); + }, + ); +} diff --git a/jcloud/jcloud/pagetype/site/site.json b/jcloud/jcloud/pagetype/site/site.json new file mode 100644 index 0000000..b69efec --- /dev/null +++ b/jcloud/jcloud/pagetype/site/site.json @@ -0,0 +1,742 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-03-06 20:52:45.544397", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "subdomain", + "domain", + "status", + "status_before_update", + "server", + "archive_failed", + "column_break_3", + "bench", + "group", + "cluster", + "admin_password", + "additional_system_user_created", + "config_tab", + "hide_config", + "host_name", + "configuration", + "column_break_12", + "database_name", + "config", + "billing_tab", + "team", + "plan", + "free", + "staging", + "column_break_15", + "account_request", + "site_end_date", + "trial_end_date", + "is_jerp_setup", + "jerp_consultant", + "apps_tab", + "apps", + "_keys_removed_in_last_update", + "_site_usages", + "current_cpu_usage", + "current_database_usage", + "current_disk_usage", + "deploy_section", + "timezone", + "column_break_29", + "remote_files_tab", + "skip_failing_patches", + "remote_config_file", + "remote_database_file", + "column_break_34", + "remote_private_file", + "remote_public_file", + "tab_break_46", + "notifications_section", + "notify_email", + "auto_updates_section", + "skip_auto_updates", + "only_update_at_specified_time", + "auto_update_last_triggered_on", + "column_break_53", + "update_trigger_frequency", + "update_trigger_time", + "column_break_57", + "update_on_weekday", + "update_end_of_month", + "update_on_day_of_month", + "setup_wizard_status_section", + "setup_wizard_complete", + "column_break_vbgj", + "setup_wizard_status_check_retries", + "setup_wizard_status_check_next_retry_on", + "database_section", + "database_access_connection_limit", + "saas_section", + "is_standby", + "standby_for", + "standby_for_product", + "signup_time", + "column_break_63", + "hybrid_saas_pool", + "saas_communication_secret", + "label", + "backups_section", + "backup_time", + "column_break_zgig", + "skip_scheduled_backups", + "tags_section", + "tags" + ], + "fields": [ + { + "fetch_from": "bench.server", + "fetch_if_empty": 1, + "fieldname": "server", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Server", + "options": "Server", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "bench", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Bench", + "options": "Bench", + "reqd": 1, + "set_only_once": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "hide_days": 1, + "hide_seconds": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nInstalling\nUpdating\nActive\nInactive\nBroken\nArchived\nSuspended", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "admin_password", + "fieldtype": "Password", + "hide_days": 1, + "hide_seconds": 1, + "label": "Administrator Password" + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break", + "hide_days": 1, + "hide_seconds": 1 + }, + { + "default": "{}", + "fieldname": "config", + "fieldtype": "Code", + "hide_days": 1, + "hide_seconds": 1, + "label": "Preview", + "options": "JSON", + "read_only": 1 + }, + { + "fieldname": "subdomain", + "fieldtype": "Data", + "hide_days": 1, + "hide_seconds": 1, + "label": "Subdomain", + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "collapsible": 1, + "fieldname": "deploy_section", + "fieldtype": "Section Break", + "hide_days": 1, + "hide_seconds": 1, + "label": "Deploy" + }, + { + "fieldname": "timezone", + "fieldtype": "Data", + "hide_days": 1, + "hide_seconds": 1, + "label": "Timezone" + }, + { + "default": "0", + "fieldname": "setup_wizard_complete", + "fieldtype": "Check", + "hide_days": 1, + "hide_seconds": 1, + "label": "Setup Wizard Complete", + "read_only": 1 + }, + { + "default": "0", + "description": "If checked, this site's usage data won't be sent to Stripe", + "fieldname": "free", + "fieldtype": "Check", + "hide_days": 1, + "hide_seconds": 1, + "label": "Free Site" + }, + { + "fieldname": "host_name", + "fieldtype": "Data", + "label": "Host Name" + }, + { + "fieldname": "status_before_update", + "fieldtype": "Data", + "hidden": 1, + "label": "Status Before Update" + }, + { + "fieldname": "remote_database_file", + "fieldtype": "Link", + "label": "Remote Database File", + "options": "Remote File" + }, + { + "fieldname": "remote_private_file", + "fieldtype": "Link", + "label": "Remote Private File", + "options": "Remote File" + }, + { + "fieldname": "remote_public_file", + "fieldtype": "Link", + "label": "Remote Public File", + "options": "Remote File" + }, + { + "fieldname": "remote_config_file", + "fieldtype": "Link", + "label": "Remote Config File", + "options": "Remote File" + }, + { + "fieldname": "configuration", + "fieldtype": "Table", + "label": "Configuration", + "options": "Site Config" + }, + { + "fieldname": "_keys_removed_in_last_update", + "fieldtype": "Data", + "hidden": 1, + "label": "Keys Removed in Last Config Change" + }, + { + "fieldname": "apps", + "fieldtype": "Table", + "hide_days": 1, + "hide_seconds": 1, + "label": "Apps", + "options": "Site App", + "reqd": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "label": "Domain", + "options": "Root Domain" + }, + { + "fieldname": "_site_usages", + "fieldtype": "Data", + "hidden": 1, + "label": "Site Usage" + }, + { + "fieldname": "current_cpu_usage", + "fieldtype": "Int", + "hidden": 1 + }, + { + "fieldname": "current_database_usage", + "fieldtype": "Int", + "hidden": 1 + }, + { + "fieldname": "current_disk_usage", + "fieldtype": "Int", + "hidden": 1 + }, + { + "fetch_from": "bench.cluster", + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "bench.group", + "fieldname": "group", + "fieldtype": "Link", + "label": "Release Group", + "options": "Release Group", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "column_break_29", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_34", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_15", + "fieldtype": "Column Break" + }, + { + "fieldname": "trial_end_date", + "fieldtype": "Date", + "label": "Trial End Date" + }, + { + "default": "0", + "fieldname": "is_standby", + "fieldtype": "Check", + "label": "Is Standby" + }, + { + "fieldname": "account_request", + "fieldtype": "Link", + "label": "Account Request", + "options": "Account Request" + }, + { + "default": "0", + "fieldname": "is_jerp_setup", + "fieldtype": "Check", + "label": "Is JERP Setup" + }, + { + "fieldname": "jerp_consultant", + "fieldtype": "Link", + "label": "JERP Consultant", + "options": "JERP Consultant" + }, + { + "fieldname": "plan", + "fieldtype": "Link", + "label": "Plan", + "options": "Site Plan", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "staging", + "fieldtype": "Check", + "label": "Staging" + }, + { + "default": "0", + "fieldname": "skip_auto_updates", + "fieldtype": "Check", + "label": "Skip Auto Updates" + }, + { + "fieldname": "notify_email", + "fieldtype": "Data", + "label": "Notify Email" + }, + { + "collapsible": 1, + "fieldname": "notifications_section", + "fieldtype": "Section Break", + "label": "Notifications" + }, + { + "default": "0", + "description": "Only used while first restore", + "fieldname": "skip_failing_patches", + "fieldtype": "Check", + "hidden": 1, + "label": "Skip Failing Patches", + "read_only": 1 + }, + { + "collapsible": 1, + "fieldname": "auto_updates_section", + "fieldtype": "Section Break", + "label": "Auto Updates" + }, + { + "fieldname": "auto_update_last_triggered_on", + "fieldtype": "Datetime", + "label": "Auto Update Last Triggered On" + }, + { + "default": "Daily", + "fieldname": "update_trigger_frequency", + "fieldtype": "Select", + "label": "Update Trigger Frequency", + "options": "Daily\nWeekly\nMonthly" + }, + { + "default": "Sunday", + "depends_on": "eval:pg.update_trigger_frequency === 'Weekly'", + "fieldname": "update_on_weekday", + "fieldtype": "Select", + "label": "Update on Weekday", + "options": "Sunday\nMonday\nTuesday\nWednesday\nThursday\nFriday\nSaturday" + }, + { + "default": "0", + "depends_on": "eval:pg.update_trigger_frequency === 'Monthly'", + "fieldname": "update_end_of_month", + "fieldtype": "Check", + "label": "Update End of Month" + }, + { + "default": "1", + "depends_on": "eval:(pg.update_trigger_frequency === 'Monthly' && pg.update_end_of_month === 0)", + "fieldname": "update_on_day_of_month", + "fieldtype": "Int", + "label": "Update on Day of month", + "non_negative": 1 + }, + { + "default": "15:30:00", + "fieldname": "update_trigger_time", + "fieldtype": "Time", + "label": "Update Trigger Time" + }, + { + "fieldname": "column_break_12", + "fieldtype": "Column Break" + }, + { + "fieldname": "database_name", + "fieldtype": "Data", + "label": "Database Name", + "read_only": 1 + }, + { + "collapsible": 1, + "fieldname": "database_section", + "fieldtype": "Section Break", + "label": "Database Access" + }, + { + "fieldname": "standby_for", + "fieldtype": "Link", + "label": "Standby For", + "options": "Marketplace App" + }, + { + "fieldname": "column_break_57", + "fieldtype": "Column Break" + }, + { + "collapsible": 1, + "fieldname": "saas_section", + "fieldtype": "Section Break", + "label": "SaaS" + }, + { + "fieldname": "column_break_63", + "fieldtype": "Column Break" + }, + { + "fieldname": "hybrid_saas_pool", + "fieldtype": "Link", + "label": "Hybrid Saas Pool", + "options": "Hybrid Saas Pool" + }, + { + "fieldname": "billing_tab", + "fieldtype": "Tab Break", + "label": "Billing" + }, + { + "fieldname": "config_tab", + "fieldtype": "Tab Break", + "label": "Config" + }, + { + "fieldname": "remote_files_tab", + "fieldtype": "Tab Break", + "label": "Remote Files" + }, + { + "fieldname": "tab_break_46", + "fieldtype": "Tab Break", + "label": "Miscellaneous" + }, + { + "fieldname": "apps_tab", + "fieldtype": "Tab Break", + "label": "Apps" + }, + { + "fieldname": "column_break_53", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "archive_failed", + "fieldtype": "Check", + "label": "Archive Failed", + "read_only": 1 + }, + { + "default": "0", + "description": "Hide site config tab from dashboard", + "fieldname": "hide_config", + "fieldtype": "Check", + "label": "Hide Config" + }, + { + "collapsible": 1, + "fieldname": "backups_section", + "fieldtype": "Section Break", + "label": "Backups" + }, + { + "default": "0", + "fieldname": "skip_scheduled_backups", + "fieldtype": "Check", + "label": "Skip Scheduled Backups" + }, + { + "collapsible": 1, + "fieldname": "tags_section", + "fieldtype": "Section Break", + "label": "Tags" + }, + { + "fieldname": "tags", + "fieldtype": "Table", + "label": "Tags", + "options": "Resource Tag" + }, + { + "fieldname": "standby_for_product", + "fieldtype": "Link", + "label": "Standby for Product", + "options": "Product Trial" + }, + { + "fieldname": "backup_time", + "fieldtype": "Time", + "label": "Backup Time" + }, + { + "fieldname": "column_break_zgig", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "only_update_at_specified_time", + "fieldtype": "Check", + "label": "Only update at specified time" + }, + { + "default": "0", + "fieldname": "additional_system_user_created", + "fieldtype": "Check", + "label": "Additional System User Created" + }, + { + "description": "This key will be used to validate requests from saas site for billing & configuration purpose", + "fieldname": "saas_communication_secret", + "fieldtype": "Data", + "label": "SaaS Communication Secret" + }, + { + "collapsible": 1, + "fieldname": "setup_wizard_status_section", + "fieldtype": "Section Break", + "label": "Setup Wizard Status" + }, + { + "fieldname": "column_break_vbgj", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "setup_wizard_status_check_retries", + "fieldtype": "Int", + "label": "Retries" + }, + { + "fieldname": "setup_wizard_status_check_next_retry_on", + "fieldtype": "Datetime", + "label": "Next Retry On" + }, + { + "default": "16", + "fieldname": "database_access_connection_limit", + "fieldtype": "Int", + "label": "Database Access Connection Limit" + }, + { + "description": "Set for sites created through SaaS flow", + "fieldname": "label", + "fieldtype": "Data", + "label": "Label" + }, + { + "fieldname": "signup_time", + "fieldtype": "Datetime", + "label": "Signup Time" + }, + { + "fieldname": "site_end_date", + "fieldtype": "Date", + "in_list_view": 1, + "label": "Site End Date" + } + ], + "grid_page_length": 50, + "links": [ + { + "group": "Usage", + "link_pagetype": "Site Usage", + "link_fieldname": "site" + }, + { + "group": "Usage", + "link_pagetype": "Remote File", + "link_fieldname": "site" + }, + { + "group": "Related Documents", + "link_pagetype": "Site Domain", + "link_fieldname": "site" + }, + { + "group": "Related Documents", + "link_pagetype": "Site Activity", + "link_fieldname": "site" + }, + { + "group": "Related Documents", + "link_pagetype": "Site Plan Change", + "link_fieldname": "site" + }, + { + "group": "Related Documents", + "link_pagetype": "Subscription", + "link_fieldname": "document_name" + }, + { + "group": "Logs", + "link_pagetype": "Agent Job", + "link_fieldname": "site" + }, + { + "group": "Logs", + "link_pagetype": "Site Backup", + "link_fieldname": "site" + }, + { + "group": "Logs", + "link_pagetype": "Site Update", + "link_fieldname": "site" + }, + { + "group": "Related Documents", + "link_pagetype": "JERP Site Settings", + "link_fieldname": "site" + }, + { + "group": "Related Documents", + "link_pagetype": "Site Migration", + "link_fieldname": "site" + }, + { + "group": "Related Documents", + "link_pagetype": "Marketplace App Subscription", + "link_fieldname": "site" + }, + { + "group": "Related Documents", + "link_pagetype": "Site Access Token", + "link_fieldname": "site" + }, + { + "group": "Related Documents", + "link_pagetype": "Site Database User", + "link_fieldname": "site" + } + ], + "modified": "2025-03-29 18:22:28.685911", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + }, + { + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Support Agent", + "share": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site/site.py b/jcloud/jcloud/pagetype/site/site.py new file mode 100644 index 0000000..aa8f35f --- /dev/null +++ b/jcloud/jcloud/pagetype/site/site.py @@ -0,0 +1,3884 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import contextlib +import json +from collections import defaultdict +from contextlib import suppress +from functools import cached_property, wraps +from typing import Any + +import dateutil.parser +import jingrow +import jingrow.data +import jingrow.utils +import pytz +import requests +from jingrow import _ +from jingrow.core.utils import find +from jingrow.jingrowclient import JingrowClient, JingrowException +from jingrow.model.document import Document +from jingrow.model.naming import append_number_if_name_exists +from jingrow.utils import ( + add_to_date, + cint, + comma_and, + cstr, + flt, + get_datetime, + get_url, + now_datetime, + sbool, + time_diff_in_hours, +) + +from jcloud.exceptions import ( + CannotChangePlan, + InsufficientSpaceOnServer, + SiteAlreadyArchived, + SiteUnderMaintenance, + VolumeResizeLimitError, +) +from jcloud.marketplace.pagetype.marketplace_app_plan.marketplace_app_plan import ( + MarketplaceAppPlan, +) +from jcloud.utils.webhook import create_webhook_event + +try: + from jingrow.utils import convert_utc_to_user_timezone +except ImportError: + from jingrow.utils import ( + convert_utc_to_system_timezone as convert_utc_to_user_timezone, + ) + +from typing import TYPE_CHECKING + +from jingrow.utils.password import get_decrypted_password +from jingrow.utils.user import is_system_user + +from jcloud.agent import Agent, AgentRequestSkippedException +from jcloud.api.client import dashboard_whitelist +from jcloud.api.site import check_dns, get_updates_between_current_and_next_apps +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.jcloud.pagetype.marketplace_app.marketplace_app import ( + get_plans_for_app, + marketplace_app_hook, +) +from jcloud.jcloud.pagetype.resource_tag.tag_helpers import TagHelpers +from jcloud.jcloud.pagetype.server.server import is_dedicated_server +from jcloud.jcloud.pagetype.site_activity.site_activity import log_site_activity +from jcloud.jcloud.pagetype.site_analytics.site_analytics import create_site_analytics +from jcloud.jcloud.pagetype.site_plan.site_plan import UNLIMITED_PLANS, get_plan_config +from jcloud.jcloud.report.mariadb_slow_queries.mariadb_slow_queries import ( + get_pagetype_name, +) +from jcloud.utils import ( + convert, + fmt_timedelta, + get_client_blacklisted_keys, + get_current_team, + guess_type, + human_readable, + log_error, + unique, + validate_subdomain, +) +from jcloud.utils.dns import _change_dns_record, create_dns_record +from jcloud.api.aliyun_sms import send_renew_sms + +if TYPE_CHECKING: + from datetime import datetime + + from jingrow.types.DF import Table + + from jcloud.jcloud.pagetype.bench.bench import Bench + from jcloud.jcloud.pagetype.bench_app.bench_app import BenchApp + from jcloud.jcloud.pagetype.database_server.database_server import DatabaseServer + from jcloud.jcloud.pagetype.deploy_candidate.deploy_candidate import DeployCandidate + from jcloud.jcloud.pagetype.release_group.release_group import ReleaseGroup + from jcloud.jcloud.pagetype.server.server import BaseServer, Server + +DOCTYPE_SERVER_TYPE_MAP = { + "Server": "Application", + "Database Server": "Database", + "Proxy Server": "Proxy", +} + + +class Site(Document, TagHelpers): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + from jcloud.jcloud.pagetype.resource_tag.resource_tag import ResourceTag + from jcloud.jcloud.pagetype.site_app.site_app import SiteApp + from jcloud.jcloud.pagetype.site_config.site_config import SiteConfig + + _keys_removed_in_last_update: DF.Data | None + _site_usages: DF.Data | None + account_request: DF.Link | None + additional_system_user_created: DF.Check + admin_password: DF.Password | None + apps: DF.Table[SiteApp] + archive_failed: DF.Check + auto_update_last_triggered_on: DF.Datetime | None + backup_time: DF.Time | None + bench: DF.Link + cluster: DF.Link + config: DF.Code | None + configuration: DF.Table[SiteConfig] + current_cpu_usage: DF.Int + current_database_usage: DF.Int + current_disk_usage: DF.Int + database_access_connection_limit: DF.Int + database_name: DF.Data | None + domain: DF.Link | None + jerp_consultant: DF.Link | None + free: DF.Check + group: DF.Link + hide_config: DF.Check + host_name: DF.Data | None + hybrid_saas_pool: DF.Link | None + is_jerp_setup: DF.Check + is_standby: DF.Check + label: DF.Data | None + notify_email: DF.Data | None + only_update_at_specified_time: DF.Check + plan: DF.Link | None + remote_config_file: DF.Link | None + remote_database_file: DF.Link | None + remote_private_file: DF.Link | None + remote_public_file: DF.Link | None + saas_communication_secret: DF.Data | None + server: DF.Link + setup_wizard_complete: DF.Check + setup_wizard_status_check_next_retry_on: DF.Datetime | None + setup_wizard_status_check_retries: DF.Int + signup_time: DF.Datetime | None + site_end_date: DF.Date | None + skip_auto_updates: DF.Check + skip_failing_patches: DF.Check + skip_scheduled_backups: DF.Check + staging: DF.Check + standby_for: DF.Link | None + standby_for_product: DF.Link | None + status: DF.Literal["Pending", "Installing", "Updating", "Active", "Inactive", "Broken", "Archived", "Suspended"] + status_before_update: DF.Data | None + subdomain: DF.Data + tags: DF.Table[ResourceTag] + team: DF.Link + timezone: DF.Data | None + trial_end_date: DF.Date | None + update_end_of_month: DF.Check + update_on_day_of_month: DF.Int + update_on_weekday: DF.Literal["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"] + update_trigger_frequency: DF.Literal["Daily", "Weekly", "Monthly"] + update_trigger_time: DF.Time | None + # end: auto-generated types + + PAGETYPE = "Site" + + dashboard_fields = ( + "ip", + "status", + "group", + "notify_email", + "team", + "plan", + "setup_wizard_complete", + "archive_failed", + "cluster", + "bench", + "group", + "database_access_connection_limit", + "site_end_date", + "trial_end_date", + "tags", + "server", + "host_name", + "skip_auto_updates", + "additional_system_user_created", + "label", + "signup_time", + ) + + @staticmethod + def get_list_query(query, filters=None, **list_args): + from jcloud.jcloud.pagetype.site_update.site_update import ( + benches_with_available_update, + ) + + Site = jingrow.qb.PageType("Site") + + status = filters.get("status") + if status == "Archived": + sites = query.where(Site.status == status).run(as_dict=1) + else: + benches_with_available_update = benches_with_available_update() + sites = query.where(Site.status != "Archived").select(Site.bench).run(as_dict=1) + + for site in sites: + if site.bench in benches_with_available_update: + site.status = "Update Available" + + return sites + + @staticmethod + def on_not_found(name): + # If name is a custom domain then redirect to the site name + site_name = jingrow.db.get_value("Site Domain", name, "site") + if site_name: + jingrow.response.message = { + "redirect": f"/dashboard/sites/{site_name}", + } + raise + + def get_pg(self, pg): + from jcloud.api.client import get + + group = jingrow.db.get_value( + "Release Group", + self.group, + ["title", "public", "team", "central_bench", "version"], + as_dict=1, + ) + pg.group_title = group.title + pg.version = group.version + pg.group_team = group.team + pg.group_public = group.public or group.central_bench + pg.latest_jingrow_version = jingrow.db.get_value( + "Jingrow Version", {"status": "Stable", "public": True}, order_by="name desc" + ) + pg.eol_versions = jingrow.db.get_all( + "Jingrow Version", + filters={"status": "End of Life"}, + fields=["name"], + order_by="name desc", + pluck="name", + ) + pg.owner_email = jingrow.db.get_value("Team", self.team, "user") + pg.current_usage = self.current_usage + pg.current_plan = get("Site Plan", self.plan) if self.plan else None + pg.last_updated = self.last_updated + pg.has_scheduled_updates = bool( + jingrow.db.exists("Site Update", {"site": self.name, "status": "Scheduled"}) + ) + pg.update_information = self.get_update_information() + pg.actions = self.get_actions() + server = jingrow.get_value("Server", self.server, ["ip", "proxy_server", "team", "title"], as_dict=1) + pg.cluster = jingrow.db.get_value("Cluster", self.cluster, ["title", "image"], as_dict=1) + pg.outbound_ip = server.ip + pg.server_team = server.team + pg.server_title = server.title + pg.inbound_ip = self.inbound_ip + pg.is_dedicated_server = is_dedicated_server(self.server) + + if broken_domain_tls_certificate := jingrow.db.get_value( + "Site Domain", {"site": self.name, "status": "Broken"}, "tls_certificate" + ): + pg.broken_domain_error = jingrow.db.get_value( + "TLS Certificate", broken_domain_tls_certificate, "error" + ) + + return pg + + def site_action(allowed_status: list[str]): + def outer_wrapper(func): + @wraps(func) + def wrapper(inst, *args, **kwargs): + user_type = jingrow.session.data.user_type or jingrow.get_cached_value( + "User", jingrow.session.user, "user_type" + ) + if user_type == "System User": + return func(inst, *args, **kwargs) + status = jingrow.get_value(inst.pagetype, inst.name, "status", for_update=True) + if status not in allowed_status: + jingrow.throw( + f"Site action not allowed for site with status: {jingrow.bold(status)}.\nAllowed status are: {jingrow.bold(comma_and(allowed_status))}." + ) + return func(inst, *args, **kwargs) + + return wrapper + + return outer_wrapper + + def _get_site_name(self, subdomain: str): + """Get full site domain name given subdomain.""" + if not self.domain: + self.domain = jingrow.db.get_single_value("Jcloud Settings", "domain") + return f"{subdomain}.{self.domain}" + + def autoname(self): + self.name = self._get_site_name(self.subdomain) + + def validate(self): + if self.has_value_changed("subdomain"): + self.validate_site_name() + self.validate_bench() + self.set_site_admin_password() + self.validate_installed_apps() + self.validate_host_name() + self.validate_site_config() + self.validate_auto_update_fields() + self.validate_site_plan() + + def before_insert(self): + if not self.bench and self.group: + self.set_latest_bench() + # initialize site.config based on plan + self._update_configuration(self.get_plan_config(), save=False) + if not self.notify_email and self.team != "Administrator": + self.notify_email = jingrow.db.get_value("Team", self.team, "notify_email") + if not self.setup_wizard_status_check_next_retry_on: + self.setup_wizard_status_check_next_retry_on = now_datetime() + + def validate_site_name(self): + validate_subdomain(self.subdomain) + + def set_site_admin_password(self): + # set site.admin_password if doesn't exist + if not self.admin_password: + self.admin_password = jingrow.generate_hash(length=16) + + def validate_bench(self): + if ( + self.status not in ("Broken", "Archived") + and jingrow.db.get_value("Bench", self.bench, "status", for_update=True) == "Archived" + ): + jingrow.throw( + f"Bench {self.bench} is not active. Please try again if you've deployed a new bench." + ) + + bench_group = jingrow.db.get_value("Bench", self.bench, "group") + if bench_group != self.group: + jingrow.throw( + f"Bench release group {bench_group} is not the same as site release group {self.group}." + ) + + bench_server = jingrow.db.get_value("Bench", self.bench, "server") + if bench_server != self.server: + jingrow.throw(f"Bench server {bench_server} is not the same as site server {self.server}.") + + def validate_installed_apps(self): + # validate apps to be installed on site + bench_apps: Table[BenchApp] = jingrow.get_pg("Bench", self.bench).apps + for app in self.apps: + if not find(bench_apps, lambda x: x.app == app.app): + jingrow.throw(f"app {app.app} is not available on Bench {self.bench}.") + + if self.apps[0].app != "jingrow": + jingrow.throw("First app to be installed on site must be jingrow.") + + site_apps = [app.app for app in self.apps] + if len(site_apps) != len(set(site_apps)): + jingrow.throw("Can't install same app twice.") + + # Install apps in the same order as bench + if self.is_new(): + self.sort_apps(bench_apps) + + def sort_apps(self, bench_apps: Table[BenchApp]): + bench_app_names = [app.app for app in bench_apps] + self.apps.sort(key=lambda x: bench_app_names.index(x.app)) + for idx, app in enumerate(self.apps): + app.idx = idx + 1 + + def validate_host_name(self): + # set or update site.host_name + if self.is_new(): + self.host_name = self.name + self._update_configuration({"host_name": f"https://{self.host_name}"}, save=False) + elif self.has_value_changed("host_name"): + self._validate_host_name() + + def validate_site_config(self): + # update site._keys_removed_in_last_update value + old_keys = json.loads(self.config) + new_keys = [x.key for x in self.configuration] + self._keys_removed_in_last_update = json.dumps([x for x in old_keys if x not in new_keys]) + + # generate site.config from site.configuration + self.update_config_preview() + + # create an agent request if config has been updated + # if not self.is_new() and self.has_value_changed("config"): + # Agent(self.server).update_site_config(self) + + def validate_auto_update_fields(self): + # Validate day of month + if not (1 <= self.update_on_day_of_month <= 31): + jingrow.throw("Day of the month must be between 1 and 31 (included)!") + # If site is on public bench, don't allow to disable auto updates + is_group_public = jingrow.get_cached_value("Release Group", self.group, "public") + if self.skip_auto_updates and is_group_public: + jingrow.throw("Auto updates can't be disabled for sites on public benches!") + + def validate_site_plan(self): + if hasattr(self, "subscription_plan") and self.subscription_plan: + """ + If `release_groups` in site plan is empty, then site can be deployed in any release group. + Otherwise, site can only be deployed in the clusters mentioned in the release groups. + """ + release_groups = jingrow.db.get_all( + "Site Plan Release Group", + pluck="release_group", + filters={ + "parenttype": "Site Plan", + "parentfield": "release_groups", + "parent": self.subscription_plan, + }, + ) + clusters = jingrow.db.get_all("Bench", pluck="cluster", filters={"group": ("in", release_groups)}) + is_valid = len(clusters) == 0 or self.cluster in clusters + if not is_valid: + jingrow.throw(f"In {self.subscription_plan}, you can't deploy site in {self.cluster} cluster") + + """ + If `allowed_apps` in site plan is empty, then site can be deployed with any apps. + Otherwise, site can only be deployed with the apps mentioned in the site plan. + """ + allowed_apps = jingrow.db.get_all( + "Site Plan Allowed App", + pluck="app", + filters={ + "parenttype": "Site Plan", + "parentfield": "allowed_apps", + "parent": self.subscription_plan, + }, + ) + if allowed_apps: + selected_apps = [app.app for app in self.apps] + + for app in selected_apps: + if app not in allowed_apps: + jingrow.throw(f"In {self.subscription_plan}, you can't deploy site with {app} app") + + is_dedicated_server_plan = jingrow.db.get_value( + "Site Plan", self.subscription_plan, "dedicated_server_plan" + ) + is_site_on_public_server = jingrow.db.get_value("Server", self.server, "public") + + # If site is on public server, don't allow unlimited plans + if is_site_on_public_server and is_dedicated_server_plan: + self.subscription_plan = jingrow.db.get_value( + "Site Plan", + { + "private_benches": 1, + "dedicated_server_plan": 0, + "document_type": "Site", + "price_cny": ["!=", 0], + }, + order_by="price_cny asc", + ) + + # If site is on dedicated server, set unlimited plan + elif not is_dedicated_server_plan and not is_site_on_public_server: + self.subscription_plan = jingrow.db.get_value( + "Site Plan", + { + "dedicated_server_plan": 1, + "document_type": "Site", + "support_included": 0, + }, + ) + + def capture_signup_event(self, event: str): + team = jingrow.get_pg("Team", self.team) + if jingrow.db.count("Site", {"team": team.name}) <= 1 and team.account_request: + from jcloud.utils.telemetry import capture + + account_request = jingrow.get_pg("Account Request", team.account_request) + if not (account_request.is_saas_signup() or account_request.invited_by_parent_team): + capture(event, "fc_signup", team.user) + + def on_update(self): + if self.status == "Active" and self.has_value_changed("host_name"): + self.update_site_config({"host_name": f"https://{self.host_name}"}) + self._update_redirects_for_all_site_domains() + jingrow.db.set_value("Site Domain", self.host_name, "redirect_to_primary", False) + + self.update_subscription() + + if self.has_value_changed("team"): + jingrow.db.set_value("Site Domain", {"site": self.name}, "team", self.team) + jingrow.db.delete("Jcloud Role Permission", {"site": self.name}) + + if self.status not in [ + "Pending", + "Archived", + "Suspended", + ] and (self.has_value_changed("subdomain") or self.has_value_changed("domain")): + self.rename(self._get_site_name(self.subdomain)) + + # Telemetry: Send event if first site status changed to Active + if self.status == "Active" and self.has_value_changed("status"): + self.capture_signup_event("first_site_status_changed_to_active") + + if self.has_value_changed("status"): + create_site_status_update_webhook_event(self.name) + + def generate_saas_communication_secret(self, create_agent_job=False, save=True): + if not self.standby_for and not self.standby_for_product: + return + if not self.saas_communication_secret: + self.saas_communication_secret = jingrow.generate_hash(length=32) + config = { + "fc_communication_secret": self.saas_communication_secret, + } + if create_agent_job: + self.update_site_config(config) + else: + self._update_configuration(config=config, save=save) + + def rename_upstream(self, new_name: str): + proxy_server = jingrow.db.get_value("Server", self.server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + site_domains = jingrow.get_all( + "Site Domain", {"site": self.name, "name": ("!=", self.name)}, pluck="name" + ) + agent.rename_upstream_site(self.server, self, new_name, site_domains) + + def set_apps(self, apps: list): + self.apps = [] + bench_apps = jingrow.get_pg("Bench", self.bench).apps + for app in apps: + if not find(bench_apps, lambda x: x.app == app): + continue + self.append("apps", {"app": app}) + self.save() + + @jingrow.whitelist() + def sync_apps(self): + agent = Agent(self.server) + apps_list = agent.get_site_apps(site=self) + self.set_apps(apps_list) + + @jingrow.whitelist() + def retry_rename(self): + """Retry rename with current subdomain""" + if self.name != self._get_site_name(self.subdomain): + self.rename(self._get_site_name(self.subdomain)) + else: + jingrow.throw("Please choose a different subdomain") + + @jingrow.whitelist() + def retry_archive(self): + """Retry archive with subdomain+domain name of site""" + site_name = self.subdomain + "." + self.domain + if jingrow.db.exists("Site", {"name": site_name, "bench": self.bench}): + jingrow.throw(f"Another site already exists in {self.bench} with name: {site_name}") + self.archive(site_name=site_name, reason="Retry Archive") + + def check_duplicate_site(self): + if jingrow.db.exists( + "Site", + { + "subdomain": self.subdomain, + "domain": self.domain, + "status": ("!=", "Archived"), + "name": ("!=", self.name), + }, + ): + jingrow.throw("Site with same subdomain already exists") + + def rename(self, new_name: str): + self.check_duplicate_site() + create_dns_record(pg=self, record_name=self._get_site_name(self.subdomain)) + agent = Agent(self.server) + if self.standby_for_product or self.standby_for: + # if standby site, rename site and create first user for trial signup + create_user = self.get_user_details() + # update the subscription config while renaming the standby site + self.update_config_preview() + site_config = json.loads(self.config) + subscription_config = site_config.get("subscription", {}) + job = agent.rename_site(self, new_name, create_user, config={"subscription": subscription_config}) + self.flags.rename_site_agent_job_name = job.name + else: + agent.rename_site(self, new_name) + self.rename_upstream(new_name) + self.status = "Pending" + self.save() + + try: + # remove old dns record from route53 after rename + domain = jingrow.get_pg("Root Domain", self.domain) + proxy_server = jingrow.get_value("Server", self.server, "proxy_server") + self.remove_dns_record(domain, proxy_server, self.name) + except Exception: + log_error("Removing Old Site from Route53 Failed") + + def update_config_preview(self): + """Regenerates site.config on each site.validate from the site.configuration child table data""" + new_config = {} + + # Update from site.configuration + for row in self.configuration: + # update internal flag from master + row.internal = jingrow.db.get_value("Site Config Key", row.key, "internal") + key_type = row.type or row.get_type() + row.type = key_type + + if key_type == "Number": + key_value = int(row.value) if isinstance(row.value, (float, int)) else json.loads(row.value) + elif key_type == "Boolean": + key_value = ( + row.value if isinstance(row.value, bool) else bool(sbool(json.loads(cstr(row.value)))) + ) + elif key_type == "JSON": + key_value = json.loads(cstr(row.value)) + else: + key_value = row.value + + new_config[row.key] = key_value + + self.config = json.dumps(new_config, indent=4) + + def install_marketplace_conf(self, app: str, plan: str | None = None): + if plan: + MarketplaceAppPlan.create_marketplace_app_subscription(self.name, app, plan, self.team) + marketplace_app_hook(app=app, site=self, op="install") + + def uninstall_marketplace_conf(self, app: str): + marketplace_app_hook(app=app, site=self, op="uninstall") + + # disable marketplace plan if it exists + marketplace_app_name = jingrow.db.get_value("Marketplace App", {"app": app}) + app_subscription = jingrow.db.exists( + "Subscription", + { + "team": self.team, + "site": self.name, + "document_type": "Marketplace App", + "document_name": marketplace_app_name, + }, + ) + if marketplace_app_name and app_subscription: + jingrow.db.set_value("Subscription", app_subscription, "enabled", 0) + + def check_marketplace_app_installable(self, plan: str | None = None): + if not plan: + return + if ( + not jingrow.db.get_value("Marketplace App Plan", plan, "price_usd") <= 0 + and not jingrow.local.team().can_install_paid_apps() + ): + jingrow.throw( + "You cannot install a Paid app on Free Credits. Please buy credits before trying to install again." + ) + + # TODO: check if app is available and can be installed + + @dashboard_whitelist() + @site_action(["Active"]) + def install_app(self, app: str, plan: str | None = None) -> str: + self.check_marketplace_app_installable(plan) + + if find(self.apps, lambda x: x.app == app): + return None + + agent = Agent(self.server) + job = agent.install_app_site(self, app) + log_site_activity(self.name, "Install App", app, job.name) + self.status = "Pending" + self.save() + self.install_marketplace_conf(app, plan) + + return job.name + + @dashboard_whitelist() + @site_action(["Active"]) + def uninstall_app(self, app: str) -> str: + agent = Agent(self.server) + job = agent.uninstall_app_site(self, app) + + log_site_activity(self.name, "Uninstall App", app, job.name) + + self.uninstall_marketplace_conf(app) + self.status = "Pending" + self.save() + + return job.name + + def _create_default_site_domain(self): + """Create Site Domain with Site name.""" + return jingrow.get_pg( + { + "pagetype": "Site Domain", + "site": self.name, + "domain": self.name, + "status": "Active", + "retry_count": 0, + "dns_type": "CNAME", + } + ).insert(ignore_if_duplicate=True) + + def after_insert(self): + from jcloud.jcloud.pagetype.jcloud_role.jcloud_role import ( + add_permission_for_newly_created_pg, + ) + + self.capture_signup_event("created_first_site") + + if hasattr(self, "subscription_plan") and self.subscription_plan: + # create subscription + self.create_subscription(self.subscription_plan) + self.reload() + + if hasattr(self, "app_plans") and self.app_plans: + for app, plan in self.app_plans.items(): + MarketplaceAppPlan.create_marketplace_app_subscription( + self.name, app, plan["name"], self.team, True + ) + + # log activity + log_site_activity(self.name, "Create") + self._create_default_site_domain() + create_dns_record(self, record_name=self._get_site_name(self.subdomain)) + self.create_agent_request() + + if hasattr(self, "share_details_consent") and self.share_details_consent: + # create partner lead + jingrow.get_pg(pagetype="Partner Lead", team=self.team, site=self.name).insert( + ignore_permissions=True + ) + + if self.backup_time: + self.backup_time = None # because FF by default sets it to current time + self.save() + add_permission_for_newly_created_pg(self) + + create_site_status_update_webhook_event(self.name) + + def remove_dns_record(self, domain: Document, proxy_server: str, site: str): + """Remove dns record of site pointing to proxy.""" + _change_dns_record(method="DELETE", domain=domain, proxy_server=proxy_server, record_name=site) + + def is_version_14_or_higher(self) -> bool: + group: ReleaseGroup = jingrow.get_cached_pg("Release Group", self.group) + return group.is_version_14_or_higher() + + @property + def space_required_on_app_server(self): + db_size, public_size, private_size = ( + jingrow.get_pg("Remote File", file_name).size if file_name else 0 + for file_name in ( + self.remote_database_file, + self.remote_public_file, + self.remote_private_file, + ) + ) + space_for_download = db_size + public_size + private_size + space_for_extracted_files = ( + (0 if self.is_version_14_or_higher() else (8 * db_size)) + public_size + private_size + ) # 8 times db size for extraction; estimated + return space_for_download + space_for_extracted_files + + @property + def space_required_on_db_server(self): + db_size = jingrow.get_pg("Remote File", self.remote_database_file).size + return 8 * db_size * 2 # double extracted size for binlog + + def check_and_increase_disk(self, server: "BaseServer", space_required: int): + mountpoint = server.guess_data_disk_mountpoint() + free_space = server.free_space(mountpoint) + if (diff := free_space - space_required) <= 0: + msg = f"Insufficient estimated space on {DOCTYPE_SERVER_TYPE_MAP[server.pagetype]} server to create site. Required: {human_readable(space_required)}, Available: {human_readable(free_space)} (Need {human_readable(abs(diff))})." + if server.public: + self.try_increasing_disk(server, mountpoint, diff, msg) + else: + jingrow.throw(msg, InsufficientSpaceOnServer) + + def try_increasing_disk(self, server: "BaseServer", mountpoint: str, diff: int, err_msg: str): + try: + server.calculated_increase_disk_size(mountpoint=mountpoint, additional=diff / 1024 / 1024 // 1024) + except VolumeResizeLimitError: + jingrow.throw( + f"{err_msg} Please wait {fmt_timedelta(server.time_to_wait_before_updating_volume)} before trying again.", + InsufficientSpaceOnServer, + ) + + def check_enough_space_on_server(self): + app: "Server" = jingrow.get_pg("Server", self.server) + self.check_and_increase_disk(app, self.space_required_on_app_server) + + if app.database_server: + db: "DatabaseServer" = jingrow.get_pg("Database Server", app.database_server) + self.check_and_increase_disk(db, self.space_required_on_db_server) + + def create_agent_request(self): + agent = Agent(self.server) + if self.remote_database_file: + agent.new_site_from_backup(self, skip_failing_patches=self.skip_failing_patches) + else: + """ + If the site is creating for saas / product trial purpose, + Create a system user with password at the time of site creation. + + If `ignore_additional_system_user_creation` is set, don't create additional system user + """ + if (self.standby_for_product or self.standby_for) and not self.is_standby: + user_details = self.get_user_details() + if self.flags.get("ignore_additional_system_user_creation", False): + user_details = None + self.flags.new_site_agent_job_name = agent.new_site(self, create_user=user_details).name + else: + self.flags.new_site_agent_job_name = agent.new_site(self).name + + server = jingrow.get_all("Server", filters={"name": self.server}, fields=["proxy_server"], limit=1)[0] + + agent = Agent(server.proxy_server, server_type="Proxy Server") + agent.new_upstream_file(server=self.server, site=self.name) + + @dashboard_whitelist() + @site_action(["Active", "Broken"]) + def reinstall(self): + agent = Agent(self.server) + job = agent.reinstall_site(self) + log_site_activity(self.name, "Reinstall", job=job.name) + self.status = "Pending" + self.save() + return job.name + + @dashboard_whitelist() + @site_action(["Active", "Broken"]) + def migrate(self, skip_failing_patches=False): + agent = Agent(self.server) + activate = True + if self.status in ("Inactive", "Suspended"): + activate = False + self.status_before_update = self.status + elif self.status == "Broken" and self.status_before_update in ( + "Inactive", + "Suspended", + ): + activate = False + job = agent.migrate_site(self, skip_failing_patches=skip_failing_patches, activate=activate) + log_site_activity(self.name, "Migrate", job=job.name) + self.status = "Pending" + self.save() + + @jingrow.whitelist() + def last_migrate_failed(self): + """Returns `True` if the last site update's(`Migrate` deploy type) migrate site job step failed, `False` otherwise""" + + site_update = jingrow.get_all( + "Site Update", + filters={"site": self.name}, + fields=["status", "update_job", "deploy_type"], + limit=1, + order_by="creation desc", + ) + + if not (site_update and site_update[0].deploy_type == "Migrate"): + return False + site_update = site_update[0] + + if site_update.status == "Recovered": + migrate_site_step = jingrow.get_all( + "Agent Job Step", + filters={ + "step_name": "Migrate Site", + "agent_job": site_update.update_job, + }, + fields=["status"], + limit=1, + ) + + if migrate_site_step and migrate_site_step[0].status == "Failure": + return True + + return False + + @jingrow.whitelist() + def restore_tables(self): + self.status_before_update = self.status + agent = Agent(self.server) + agent.restore_site_tables(self) + self.status = "Pending" + self.save() + + @dashboard_whitelist() + def clear_site_cache(self): + agent = Agent(self.server) + job = agent.clear_site_cache(self) + + log_site_activity(self.name, "Clear Cache", job=job.name) + + @dashboard_whitelist() + @site_action(["Active", "Broken"]) + def restore_site(self, skip_failing_patches=False): + if not jingrow.get_pg("Remote File", self.remote_database_file).exists(): + raise Exception(f"Remote File {self.remote_database_file} is unavailable on S3") + + agent = Agent(self.server) + job = agent.restore_site(self, skip_failing_patches=skip_failing_patches) + log_site_activity(self.name, "Restore", job=job.name) + self.status = "Pending" + self.save() + return job.name + + @dashboard_whitelist() + @site_action(["Active", "Broken"]) + def restore_site_from_files(self, files, skip_failing_patches=False): + self.remote_database_file = files["database"] + self.remote_public_file = files["public"] + self.remote_private_file = files["private"] + self.save() + self.reload() + return self.restore_site(skip_failing_patches=skip_failing_patches) + + @jingrow.whitelist() + def physical_backup(self): + return self.backup(physical=True) + + @dashboard_whitelist() + def backup(self, with_files=False, offsite=False, force=False, physical=False): + if self.status == "Suspended": + activity = jingrow.db.get_all( + "Site Activity", + filters={"site": self.name, "action": "Suspend Site"}, + order_by="creation desc", + limit=1, + ) + suspension_time = jingrow.get_pg("Site Activity", activity[0]).creation + + if ( + jingrow.db.count( + "Site Backup", + filters=dict( + site=self.name, + status="Success", + creation=(">=", suspension_time), + ), + ) + > 3 + ): + jingrow.throw("You cannot take more than 3 backups after site suspension") + + return jingrow.get_pg( + { + "pagetype": "Site Backup", + "site": self.name, + "with_files": with_files, + "offsite": offsite, + "force": force, + "physical": physical, + } + ).insert() + + @dashboard_whitelist() + def get_backup_download_link(self, backup, file): + from botocore.exceptions import ClientError + + if file not in ["database", "public", "private", "config"]: + jingrow.throw("Invalid file type") + + try: + remote_file = jingrow.db.get_value( + "Site Backup", + {"name": backup, "site": self.name}, + f"remote_{file}_file", + ) + return jingrow.get_pg("Remote File", remote_file).download_link + except ClientError: + log_error(title="Offsite Backup Response Exception") + + def site_migration_scheduled(self): + return jingrow.db.get_value( + "Site Migration", {"site": self.name, "status": "Scheduled"}, "scheduled_time" + ) + + def site_update_scheduled(self): + return jingrow.db.get_value( + "Site Update", {"site": self.name, "status": "Scheduled"}, "scheduled_time" + ) + + def check_move_scheduled(self): + if time := self.site_migration_scheduled(): + jingrow.throw(f"Site Migration is scheduled for {self.name} at {time}") + if time := self.site_update_scheduled(): + jingrow.throw(f"Site Update is scheduled for {self.name} at {time}") + + def ready_for_move(self): + if self.status in ["Updating", "Pending", "Installing"]: + jingrow.throw(f"Site is in {self.status} state. Cannot Update", SiteUnderMaintenance) + elif self.status == "Archived": + jingrow.throw("Site is archived. Cannot Update", SiteAlreadyArchived) + self.check_move_scheduled() + + self.status_before_update = self.status + self.status = "Pending" + self.save() + + @dashboard_whitelist() + @site_action(["Active", "Inactive", "Suspended"]) + def schedule_update( + self, + skip_failing_patches: bool = False, + skip_backups: bool = False, + physical_backup: bool = False, + scheduled_time: str | None = None, + ): + log_site_activity(self.name, "Update") + + pg = jingrow.get_pg( + { + "pagetype": "Site Update", + "site": self.name, + "backup_type": "Physical" if physical_backup else "Logical", + "skipped_failing_patches": skip_failing_patches, + "skipped_backups": skip_backups, + "status": "Scheduled" if scheduled_time else "Pending", + "scheduled_time": scheduled_time, + } + ).insert() + return pg.name + + @dashboard_whitelist() + def edit_scheduled_update( + self, + name, + skip_failing_patches: bool = False, + skip_backups: bool = False, + scheduled_time: str | None = None, + ): + pg = jingrow.get_pg("Site Update", name) + pg.skipped_failing_patches = skip_failing_patches + pg.skipped_backups = skip_backups + pg.scheduled_time = scheduled_time + pg.save() + return pg.name + + @dashboard_whitelist() + def cancel_scheduled_update(self, site_update: str): + if status := jingrow.db.get_value("Site Update", site_update, "status") != "Scheduled": + jingrow.throw(f"Cannot cancel a Site Update with status {status}") + + # TODO: Set status to cancelled instead of deleting the pg + jingrow.delete_pg("Site Update", site_update) + + @jingrow.whitelist() + def move_to_group(self, group, skip_failing_patches=False, skip_backups=False): + log_site_activity(self.name, "Update") + + return jingrow.get_pg( + { + "pagetype": "Site Update", + "site": self.name, + "destination_group": group, + "skipped_failing_patches": skip_failing_patches, + "skipped_backups": skip_backups, + "ignore_past_failures": True, + } + ).insert() + + @jingrow.whitelist() + def move_to_bench(self, bench, deactivate=True, skip_failing_patches=False): + jingrow.only_for("System Manager") + self.ready_for_move() + + if bench == self.bench: + jingrow.throw("Site is already on the selected bench.") + + agent = Agent(self.server) + job = agent.move_site_to_bench(self, bench, deactivate, skip_failing_patches) + log_site_activity(self.name, "Update", job=job.name) + + return job + + def reset_previous_status(self, fix_broken=False): + if self.status == "Archived": + return + self.status = self.status_before_update + self.status_before_update = None + if not self.status or (self.status == "Broken" and fix_broken): + status_map = {402: "Suspended", 503: "Inactive"} + try: + response = requests.get(f"https://{self.name}") + self.status = status_map.get(response.status_code, "Active") + except Exception: + log_error("Site Status Fetch Error", site=self.name) + self.save() + + @jingrow.whitelist() + @site_action(["Active"]) + def update_without_backup(self): + log_site_activity(self.name, "Update") + + jingrow.get_pg( + { + "pagetype": "Site Update", + "site": self.name, + "skipped_backups": 1, + } + ).insert() + + @dashboard_whitelist() + def add_domain(self, domain): + domain = domain.lower().strip(".") + response = check_dns(self.name, domain) + if response["matched"]: + if jingrow.db.exists("Site Domain", {"domain": domain}): + jingrow.throw(f"The domain {jingrow.bold(domain)} is already used by a site") + + log_site_activity(self.name, "Add Domain") + jingrow.get_pg( + { + "pagetype": "Site Domain", + "status": "Pending", + "site": self.name, + "domain": domain, + "dns_type": response["type"], + "dns_response": json.dumps(response, indent=4, default=str), + } + ).insert() + + def add_domain_for_product_site(self, domain): + domain = domain.lower().strip(".") + log_site_activity(self.name, "Add Domain") + create_dns_record(pg=self, record_name=domain) + jingrow.get_pg( + { + "pagetype": "Site Domain", + "status": "Pending", + "site": self.name, + "domain": domain, + "dns_type": "CNAME", + } + ).insert() + + @jingrow.whitelist() + def create_dns_record(self): + domains = jingrow.db.get_list( + "Site Domain", filters={"site": self.name}, fields=["domain"], pluck="domain" + ) + for domain in domains: + if bool(jingrow.db.exists("Root Domain", domain.split(".", 1)[1], "name")): + create_dns_record(pg=self, record_name=domain) + + @jingrow.whitelist() + def update_dns_record(self, value): + domain = jingrow.get_pg("Root Domain", self.domain) + record_name = self._get_site_name(self.subdomain) + _change_dns_record("UPSERT", domain, value, record_name) + + def get_config_value_for_key(self, key: str) -> Any: + """ + Get site config value configuration child table for given key. + + :returns: None if key not in config. + """ + key_obj = find(self.configuration, lambda x: x.key == key) + if key_obj: + return json.loads(key_obj.get("value")) + return None + + def add_domain_to_config(self, domain: str): + domains = self.get_config_value_for_key("domains") or [] + domains.append(domain) + self._update_configuration({"domains": domains}) + agent = Agent(self.server) + agent.add_domain(self, domain) + + def remove_domain_from_config(self, domain): + domains = self.get_config_value_for_key("domains") or [] + if domain not in domains: + return + domains.remove(domain) + self._update_configuration({"domains": domains}) + agent = Agent(self.server) + agent.remove_domain(self, domain) + + @dashboard_whitelist() + def remove_domain(self, domain): + if domain == self.name: + jingrow.throw("Cannot delete default site_domain") + site_domain = jingrow.get_all("Site Domain", filters={"site": self.name, "domain": domain})[0] + site_domain = jingrow.delete_pg("Site Domain", site_domain.name) + + def retry_add_domain(self, domain): + if check_dns(self.name, domain)["matched"]: + site_domain = jingrow.get_all( + "Site Domain", + filters={ + "site": self.name, + "domain": domain, + "status": ("!=", "Active"), + "retry_count": ("<=", 5), + }, + )[0] + site_domain = jingrow.get_pg("Site Domain", site_domain.name) + site_domain.retry() + + def _check_if_domain_belongs_to_site(self, domain: str): + if not jingrow.db.exists({"pagetype": "Site Domain", "site": self.name, "domain": domain}): + jingrow.throw( + msg=f"Site Domain {domain} for site {self.name} does not exist", + exc=jingrow.exceptions.LinkValidationError, + ) + + def _check_if_domain_is_active(self, domain: str): + status = jingrow.get_value("Site Domain", domain, "status") + if status != "Active": + jingrow.throw(msg="Only active domains can be primary", exc=jingrow.LinkValidationError) + + def _validate_host_name(self): + """Perform checks for primary domain.""" + self._check_if_domain_belongs_to_site(self.host_name) + self._check_if_domain_is_active(self.host_name) + + @dashboard_whitelist() + def set_host_name(self, domain: str): + """Set host_name/primary domain of site.""" + self.host_name = domain + self.save() + + def _get_redirected_domains(self) -> list[str]: + """Get list of redirected site domains for site.""" + return jingrow.get_all( + "Site Domain", + filters={"site": self.name, "redirect_to_primary": True}, + pluck="name", + ) + + def _update_redirects_for_all_site_domains(self): + domains = self._get_redirected_domains() + if domains: + return self.set_redirects_in_proxy(domains) + return None + + def _remove_redirects_for_all_site_domains(self): + domains = self._get_redirected_domains() + if domains: + self.unset_redirects_in_proxy(domains) + + def set_redirects_in_proxy(self, domains: list[str]): + target = self.host_name + proxy_server = jingrow.db.get_value("Server", self.server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + return agent.setup_redirects(self.name, domains, target) + + def unset_redirects_in_proxy(self, domains: list[str]): + proxy_server = jingrow.db.get_value("Server", self.server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + agent.remove_redirects(self.name, domains) + + @dashboard_whitelist() + def set_redirect(self, domain: str): + """Enable redirect to primary for domain.""" + self._check_if_domain_belongs_to_site(domain) + site_domain = jingrow.get_pg("Site Domain", domain) + site_domain.setup_redirect() + + @dashboard_whitelist() + def unset_redirect(self, domain: str): + """Disable redirect to primary for domain.""" + self._check_if_domain_belongs_to_site(domain) + site_domain = jingrow.get_pg("Site Domain", domain) + site_domain.remove_redirect() + + @dashboard_whitelist() + @site_action(["Active", "Broken", "Suspended"]) + def archive(self, site_name=None, reason=None, force=False, skip_reload=False): + agent = Agent(self.server) + self.status = "Pending" + self.save() + job = agent.archive_site(self, site_name, force) + log_site_activity(self.name, "Archive", reason, job.name) + + server = jingrow.get_all("Server", filters={"name": self.server}, fields=["proxy_server"], limit=1)[0] + + agent = Agent(server.proxy_server, server_type="Proxy Server") + agent.remove_upstream_file( + server=self.server, + site=self.name, + site_name=site_name, + skip_reload=skip_reload, + ) + + self.db_set("host_name", None) + + self.delete_offsite_backups() + jingrow.db.set_value( + "Site Backup", + {"site": self.name, "offsite": False}, + "files_availability", + "Unavailable", + ) + self.disable_subscription() + self.disable_marketplace_subscriptions() + + self.archive_site_database_users() + + @jingrow.whitelist() + def cleanup_after_archive(self): + site_cleanup_after_archive(self.name) + + def delete_offsite_backups(self): + from jcloud.jcloud.pagetype.remote_file.remote_file import ( + delete_remote_backup_objects, + ) + + log_site_activity(self.name, "Drop Offsite Backups") + + sites_remote_files = [] + site_backups = jingrow.get_all( + "Site Backup", + filters={ + "site": self.name, + "offsite": True, + "files_availability": "Available", + }, + pluck="name", + order_by="creation desc", + )[1:] # Keep latest backup + for backup_files in jingrow.get_all( + "Site Backup", + filters={"name": ("in", site_backups)}, + fields=[ + "remote_database_file", + "remote_public_file", + "remote_private_file", + ], + as_list=True, + order_by="creation desc", + ignore_ifnull=True, + ): + sites_remote_files += backup_files + + if not sites_remote_files: + return None + + jingrow.db.set_value( + "Site Backup", + {"name": ("in", site_backups), "offsite": True}, + "files_availability", + "Unavailable", + ) + + return delete_remote_backup_objects(sites_remote_files) + + @dashboard_whitelist() + def send_change_team_request(self, team_mail_id: str, reason: str): + """Send email to team to accept site transfer request""" + + if self.team != get_current_team(): + jingrow.throw( + "You should belong to the team owning the site to initiate a site ownership transfer." + ) + + if not jingrow.db.exists("Team", {"user": team_mail_id, "enabled": 1}): + jingrow.throw("No Active Team record found.") + + old_team = jingrow.db.get_value("Team", self.team, "user") + + if old_team == team_mail_id: + jingrow.throw(f"Site is already owned by the team {team_mail_id}") + + key = jingrow.generate_hash("Site Transfer Link", 20) + jingrow.get_pg( + { + "pagetype": "Team Change", + "document_type": "Site", + "document_name": self.name, + "to_team": jingrow.db.get_value("Team", {"user": team_mail_id, "enabled": 1}), + "from_team": self.team, + "reason": reason, + "key": key, + } + ).insert() + + link = get_url(f"/api/method/jcloud.api.site.confirm_site_transfer?key={key}") + + if jingrow.conf.developer_mode: + print(f"\nSite transfer link for {team_mail_id}\n{link}\n") + + jingrow.sendmail( + recipients=team_mail_id, + subject="Transfer Site Ownership Confirmation", + template="transfer_team_confirmation", + args={ + "name": self.host_name or self.name, + "type": "site", + "old_team": old_team, + "new_team": team_mail_id, + "transfer_url": link, + }, + ) + + @dashboard_whitelist() + @site_action(["Active", "Broken"]) + def login_as_admin(self, reason=None): + sid = self.login(reason=reason) + return f"https://{self.host_name or self.name}/desk?sid={sid}" + + @dashboard_whitelist() + @site_action(["Active"]) + def login_as_team(self, reason=None): + if self.additional_system_user_created: + team_user = jingrow.db.get_value("Team", self.team, "user") + sid = self.get_login_sid(user=team_user) + if self.standby_for_product: + redirect_route = ( + jingrow.db.get_value("Product Trial", self.standby_for_product, "redirect_to_after_login") + or "/desk" + ) + else: + redirect_route = "/desk" + return f"https://{self.host_name or self.name}{redirect_route}?sid={sid}" + + jingrow.throw("No additional system user created for this site") + return None + + @site_action(["Active"]) + def login_as_user(self, user_email, reason=None): + try: + sid = self.get_login_sid(user=user_email) + if self.standby_for_product: + redirect_route = ( + jingrow.db.get_value("Product Trial", self.standby_for_product, "redirect_to_after_login") + or "/desk" + ) + else: + redirect_route = "/desk" + return f"https://{self.host_name or self.name}{redirect_route}?sid={sid}" + except Exception as e: + jingrow.throw(str(e)) + + @jingrow.whitelist() + def login(self, reason=None): + log_site_activity(self.name, "Login as Administrator", reason=reason) + return self.get_login_sid() + + def create_user_with_team_info(self): + team_user = jingrow.db.get_value("Team", self.team, "user") + user = jingrow.get_pg("User", team_user) + return self.create_user(user.email, user.first_name or "", user.last_name or "") + + def create_user(self, email, first_name, last_name, password=None): + if self.additional_system_user_created: + return None + agent = Agent(self.server) + return agent.create_user(self, email, first_name, last_name, password) + + @jingrow.whitelist() + def show_admin_password(self): + jingrow.msgprint(self.get_password("admin_password"), title="Password", indicator="green") + + def get_connection_as_admin(self): + password = get_decrypted_password("Site", self.name, "admin_password") + return JingrowClient(f"https://{self.name}", "Administrator", password) + + def get_sid_from_agent(self, user: str) -> str | None: + try: + agent = Agent(self.server) + return agent.get_site_sid(self, user) + except requests.HTTPError as e: + if "validate_ip_address" in str(e): + jingrow.throw( + f"Login with {user}'s credentials is IP restricted. Please remove the same and try again.", + jingrow.ValidationError, + ) + elif f"User {user} does not exist" in str(e): + jingrow.throw(f"User {user} does not exist in the site", jingrow.ValidationError) + elif jingrow.db.exists( + "Incident", + { + "server": self.server, + "status": ("not in", ["Resolved", "Auto-Resolved", "Jcloud-Resolved"]), + }, + ): + jingrow.throw( + "Server appears to be unresponsive. Please try again in some time.", + jingrow.ValidationError, + ) + else: + raise e + except AgentRequestSkippedException: + jingrow.throw( + "Server is unresponsive. Please try again in some time.", + jingrow.ValidationError, + ) + + def get_login_sid(self, user: str = "Administrator"): + sid = None + if user == "Administrator": + password = get_decrypted_password("Site", self.name, "admin_password") + response = requests.post( + f"https://{self.name}/api/method/login", + data={"usr": user, "pwd": password}, + ) + sid = response.cookies.get("sid") + if not sid or sid == "Guest": + sid = self.get_sid_from_agent(user) + if not sid or sid == "Guest": + jingrow.throw(f"Could not login as {user}", jingrow.ValidationError) + return sid + + def fetch_info(self): + agent = Agent(self.server) + return agent.get_site_info(self) + + def fetch_analytics(self): + agent = Agent(self.server) + if agent.should_skip_requests(): + return None + return agent.get_site_analytics(self) + + def get_disk_usages(self): + try: + last_usage = jingrow.get_last_pg("Site Usage", {"site": self.name}) + except jingrow.DoesNotExistError: + return defaultdict(lambda: None) + + return { + "database": last_usage.database, + "database_free": last_usage.database_free, + "backups": last_usage.backups, + "public": last_usage.public, + "private": last_usage.private, + "creation": last_usage.creation, + } + + def _sync_config_info(self, fetched_config: dict) -> bool: + """Update site pg config with the fetched_config values. + + :fetched_config: Generally data passed is the config part of the agent info response + :returns: True if value has changed + """ + config = { + key: fetched_config[key] for key in fetched_config if key not in get_client_blacklisted_keys() + } + new_config = {**json.loads(self.config or "{}"), **config} + current_config = json.dumps(new_config, indent=4) + + if self.config != current_config: + self._update_configuration(new_config, save=False) + return True + return False + + def _sync_usage_info(self, fetched_usage: dict): + """Generate a Site Usage pg for the site using the fetched_usage data. + + :fetched_usage: Requires backups, database, public, private keys with Numeric values + """ + + if isinstance(fetched_usage, list): + for usage in fetched_usage: + self._insert_site_usage(usage) + else: + self._insert_site_usage(fetched_usage) + + def _insert_site_usage(self, usage: dict): + current_usages = self.get_disk_usages() + site_usage_data = { + "site": self.name, + "backups": usage["backups"], + "database": usage["database"], + "database_free": usage.get("database_free", 0), + "database_free_tables": json.dumps(usage.get("database_free_tables", []), indent=1), + "public": usage["public"], + "private": usage["private"], + } + + same_as_last_usage = ( + current_usages["backups"] == site_usage_data["backups"] + and current_usages["database"] == site_usage_data["database"] + and current_usages["public"] == site_usage_data["public"] + and current_usages["private"] == site_usage_data["private"] + and current_usages["database_free"] == site_usage_data["private"] + ) + + if same_as_last_usage: + return + + equivalent_site_time = None + if usage.get("timestamp"): + equivalent_site_time = convert_utc_to_user_timezone( + dateutil.parser.parse(usage["timestamp"]) + ).replace(tzinfo=None) + if jingrow.db.exists("Site Usage", {"site": self.name, "creation": equivalent_site_time}): + return + if current_usages["creation"] and equivalent_site_time < current_usages["creation"]: + return + + site_usage = jingrow.get_pg({"pagetype": "Site Usage", **site_usage_data}).insert() + + if equivalent_site_time: + site_usage.db_set("creation", equivalent_site_time) + + def _sync_timezone_info(self, timezone: str) -> bool: + """Update site pg timezone with the passed value of timezone. + + :timezone: Timezone passed in part of the agent info response + :returns: True if value has changed + """ + # Validate timezone string + # Empty string is fine, since we default to IST + if timezone: + try: + pytz.timezone(timezone) + except pytz.exceptions.UnknownTimeZoneError: + return False + + if self.timezone != timezone: + self.timezone = timezone + return True + return False + + def _sync_database_name(self, config): + database_name = config.get("db_name") + if self.database_name != database_name: + self.database_name = database_name + return True + return False + + @jingrow.whitelist() + def sync_info(self, data=None): + """Updates Site Usage, site.config and timezone details for site.""" + if not data: + data = self.fetch_info() + + if not data: + return + + fetched_usage = data["usage"] + fetched_config = data["config"] + fetched_timezone = data["timezone"] + + self._sync_usage_info(fetched_usage) + to_save = self._sync_config_info(fetched_config) + to_save |= self._sync_timezone_info(fetched_timezone) + to_save |= self._sync_database_name(fetched_config) + + if to_save: + self.save() + + def sync_analytics(self, analytics=None): + if not analytics: + analytics = self.fetch_analytics() + if analytics: + create_site_analytics(self.name, analytics) + + def create_sync_user_webhook(self): + """ + Create 3 webhook records in the site to sync the user with jcloud + - One for user record creation + - One for user record update + - One for user record deletion + """ + conn = self.get_connection_as_admin() + pagetype_data = { + "pagetype": "Webhook", + "webhook_pagetype": "User", + "enabled": 1, + "request_url": "https://jingrow.com/api/method/jcloud.api.site_login.sync_product_site_user", + "request_method": "POST", + "request_structure": "JSON", + "webhook_json": """{ "user_info": { "email": "{{pg.email}}", "enabled": "{{pg.enabled}}" } }""", + "webhook_headers": [ + {"key": "x-site", "value": self.name}, + {"key": "Content-Type", "value": "application/json"}, + {"key": "x-site-token", "value": self.saas_communication_secret}, + ], + } + + webhook_data = [ + { + "name": "Sync User records with Jingrow on create", + "webhook_docevent": "after_insert", + }, + { + "name": "Sync User records with Jingrow on update", + "webhook_docevent": "on_update", + "condition": """pg.has_value_changed("enabled")""", + }, + { + "name": "Sync User records with Jingrow on delete", + "webhook_docevent": "on_trash", + }, + ] + + for webhook in webhook_data: + try: + conn.insert({**pagetype_data, **webhook}) + except JingrowException as ex: + if "jingrow.exceptions.DuplicateEntryError" not in str(ex): + raise ex + + def sync_users_to_product_site(self, analytics=None): + from jcloud.jcloud.pagetype.site_user.site_user import create_user_for_product_site + + if self.is_standby: + return + if not analytics: + analytics = self.fetch_analytics() + if analytics: + create_user_for_product_site(self.name, analytics) + + @dashboard_whitelist() + def is_setup_wizard_complete(self): + if self.setup_wizard_complete: + return True + + sid = self.get_login_sid() + conn = JingrowClient(f"https://{self.name}?sid={sid}") + + try: + value = conn.get_value("System Settings", "setup_complete", "System Settings") + except json.JSONDecodeError: + # the proxy might be down or network failure + # that's why the response is blank and get_value try to parse the json + # and raise json.JSONDecodeError + return False + except Exception: + if self.ping().status_code == requests.codes.ok: + # Site is up but setup status fetch failed + log_error("Fetching Setup Status Failed", pg=self) + return False + + setup_complete = cint(value["setup_complete"]) + if not setup_complete: + return False + + self.reload() + self.setup_wizard_complete = 1 + + self.team = ( + jingrow.db.get_value( + "Team", + {"user": jingrow.db.get_value("Account Request", self.account_request, "email")}, + "name", + ) + if self.team == "Administrator" + else self.team + ) + + self.save() + + # Telemetry: Send event if first site status changed to Active + if self.setup_wizard_complete: + self.capture_signup_event("first_site_setup_wizard_completed") + + return setup_complete + + def fetch_setup_wizard_complete_status(self): + with suppress(Exception): + # max retries = 18, backoff time = 10s, with exponential backoff it will try for 30 days + if self.setup_wizard_status_check_retries >= 18: + return + is_completed = self.is_setup_wizard_complete() + if not is_completed: + self.setup_wizard_status_check_retries += 1 + exponential_backoff_duration = 10 * (2**self.setup_wizard_status_check_retries) + self.setup_wizard_status_check_next_retry_on = add_to_date( + now_datetime(), seconds=exponential_backoff_duration + ) + self.save() + + @jingrow.whitelist() + def set_status_based_on_ping(self): + if self.status in ("Active", "Archived", "Inactive", "Suspended"): + return + try: + response = self.ping() + except Exception: + return + else: + if response.status_code == requests.codes.ok: + self.status = "Active" + self.save() + + def ping(self): + return requests.get(f"https://{self.name}/api/method/ping") + + def _set_configuration(self, config: list[dict]): + """Similar to _update_configuration but will replace full configuration at once + This is necessary because when you update site config from the UI, you can update the key, + update the value, remove the key. All of this can be handled by setting the full configuration at once. + + Args: + config (list): List of dicts with key, value, and type + """ + blacklisted_config = [x for x in self.configuration if x.key in get_client_blacklisted_keys()] + self.configuration = [] + + # Maintain keys that aren't accessible to Dashboard user + for i, _config in enumerate(blacklisted_config): + _config.idx = i + 1 + self.configuration.append(_config) + + for d in config: + d = jingrow._dict(d) + if isinstance(d.value, (dict, list)): + value = json.dumps(d.value) + else: + value = d.value + # Value is mandatory, skip None and empty strings + if value is None or cstr(value).strip() == "": + continue + self.append("configuration", {"key": d.key, "value": value, "type": d.type}) + self.save() + + def _update_configuration(self, config, save=True): + """Updates site.configuration, runs site.save which updates site.config + + Args: + config (dict): Python dict for any suitable jingrow.conf + """ + existing_keys = {x.key: i for i, x in enumerate(self.configuration)} + for key, value in config.items(): + _type = jingrow.get_value("Site Config Key", {"key": key}, "type") or guess_type(value) + converted_value = convert(value) + if converted_value is None or cstr(converted_value).strip() == "": + continue + if key in existing_keys: + self.configuration[existing_keys[key]].value = converted_value + self.configuration[existing_keys[key]].type = _type + else: + self.append( + "configuration", + {"key": key, "value": converted_value, "type": _type}, + ) + + if save: + self.save() + + @dashboard_whitelist() + @site_action(["Active"]) + def update_config(self, config=None): + """Updates site.configuration, meant for dashboard and API users""" + if config is None: + return + # config = {"key1": value1, "key2": value2} + config = jingrow.parse_json(config) + + sanitized_config = {} + for key, value in config.items(): + if key in get_client_blacklisted_keys(): + jingrow.throw(_(f"The key {key} is blacklisted or internal and cannot be updated")) + + _type = self._site_config_key_type(key, value) + + if _type == "Number": + value = flt(value) + elif _type == "Boolean": + value = bool(sbool(value)) + elif _type == "JSON": + value = jingrow.parse_json(value) + elif _type == "Password" and value == "*******": + value = jingrow.get_value("Site Config", {"key": key, "parent": self.name}, "value") + sanitized_config[key] = value + + self.update_site_config(sanitized_config) + + def _site_config_key_type(self, key, value): + if jingrow.db.exists("Site Config Key", key): + return jingrow.db.get_value("Site Config Key", key, "type") + + if isinstance(value, (dict, list)): + return "JSON" + if isinstance(value, bool): + return "Boolean" + if isinstance(value, (int, float)): + return "Number" + return "String" + + @dashboard_whitelist() + @site_action(["Active"]) + def delete_config(self, key): + """Deletes a key from site configuration, meant for dashboard and API users""" + if key in get_client_blacklisted_keys(): + return None + + updated_config = [] + for row in self.configuration: + if row.key != key and not row.internal: + updated_config.append({"key": row.key, "value": row.value, "type": row.type}) + + return self.update_site_config(updated_config) + + def delete_multiple_config(self, keys: list[str]): + # relies on self._keys_removed_in_last_update in self.validate + # used by https://jingrow.com/app/marketplace-app/email_delivery_service + config_list: list[dict] = [] + for key in self.configuration: + config = {} + if key.key in keys: + continue + config["key"] = key.key + config["value"] = key.value + config["type"] = key.type + config_list.append(config) + self.update_site_config(config_list) + + @jingrow.whitelist() + def update_site_config(self, config=None): + """Updates site.configuration, site.config and runs site.save which initiates an Agent Request + This checks for the blacklisted config keys via Jingrow Validations, but not for internal usages. + Don't expose this directly to an external API. Pass through `jcloud.utils.sanitize_config` or use + `jcloud.api.site.update_config` instead. + + Args: + config (dict): Python dict for any suitable jingrow.conf + """ + if config is None: + config = {} + if isinstance(config, list): + self._set_configuration(config) + else: + self._update_configuration(config) + return Agent(self.server).update_site_config(self) + + def update_site(self): + log_site_activity(self.name, "Update") + + def create_subscription(self, plan): + # create a site plan change log + self._create_initial_site_plan_change(plan) + + def update_subscription(self): + if self.status in ["Archived", "Broken", "Suspended"]: + self.disable_subscription() + else: + self.enable_subscription() + + if self.has_value_changed("team"): + subscription = self.subscription + if subscription: + subscription.team = self.team + subscription.save(ignore_permissions=True) + + def enable_subscription(self): + subscription = self.subscription + if subscription: + subscription.enable() + + def disable_subscription(self): + subscription = self.subscription + if subscription: + jingrow.db.set_value("Subscription", subscription.name, "enabled", False) + + def disable_marketplace_subscriptions(self): + app_subscriptions = jingrow.get_all( + "Marketplace App Subscription", + filters={"site": self.name, "status": "Active"}, + pluck="name", + ) + + for subscription in app_subscriptions: + subscription_pg = jingrow.get_pg("Marketplace App Subscription", subscription) + subscription_pg.disable() + + subscriptions = jingrow.get_all("Subscription", {"site": self.name, "enabled": 1}, pluck="name") + for subscription in subscriptions: + subscription_pg = jingrow.get_pg("Subscription", subscription) + subscription_pg.disable() + + def can_change_plan(self, ignore_card_setup): + if is_system_user(jingrow.session.user): + return + + if ignore_card_setup: + # ignore card setup for prepaid app payments + return + + if bool(jingrow.db.get_value("Cluster", self.cluster, "hybrid")): + # skip validation if site is on hybrid server + return + + team = jingrow.get_pg("Team", self.team) + + if team.parent_team: + team = jingrow.get_pg("Team", team.parent_team) + + if team.payment_mode == "Paid By Partner" and team.billing_team: + team = jingrow.get_pg("Team", team.billing_team) + + if team.is_defaulter(): + jingrow.throw("Cannot change plan because you have unpaid invoices", CannotChangePlan) + + if not (team.default_payment_method or team.get_balance()): + jingrow.throw( + "Cannot change plan because you haven't added a card and not have enough balance", + CannotChangePlan, + ) + + # TODO: rename to change_plan and remove the need for ignore_card_setup param + @dashboard_whitelist() + def set_plan(self, plan): + from jcloud.api.site import validate_plan + + validate_plan(self.server, plan) + self.change_plan(plan) + + def change_plan(self, plan, ignore_card_setup=False): + self.can_change_plan(ignore_card_setup) + plan_config = self.get_plan_config(plan) + + self._update_configuration(plan_config) + ret = jingrow.get_pg( + { + "pagetype": "Site Plan Change", + "site": self.name, + "from_plan": self.plan, + "to_plan": plan, + } + ).insert() + + self.reload() + if self.status == "Suspended": + self.unsuspend_if_applicable() + else: + # trigger agent job only once + self.update_site_config(plan_config) + + if self.trial_end_date: + self.reload() + self.trial_end_date = "" + self.save() + + jingrow.enqueue_pg( + self.pagetype, + self.name, + "revoke_database_access_on_plan_change", + enqueue_after_commit=True, + ) + return ret + + def archive_site_database_users(self): + db_users = jingrow.get_all( + "Site Database User", + filters={ + "site": self.name, + "status": ("!=", "Archived"), + }, + pluck="name", + ) + + for db_user in db_users: + jingrow.get_pg("Site Database User", db_user).archive( + raise_error=False, skip_remove_db_user_step=True + ) + + def revoke_database_access_on_plan_change(self): + # If the new plan doesn't have database access, disable it + if jingrow.db.get_value("Site Plan", self.plan, "database_access"): + return + + self.archive_site_database_users() + + def unsuspend_if_applicable(self): + try: + usage = jingrow.get_last_pg("Site Usage", {"site": self.name}) + except jingrow.DoesNotExistError: + # If no pg is found, it means the site was created a few moments before + # team was suspended, potentially due to failure in payment. Don't unsuspend + # site in that case. team.unsuspend_sites should handle that, then. + return + + plan_name = self.plan + # get plan from subscription + if not plan_name: + subscription = self.subscription + if not subscription: + return + plan_name = subscription.plan + + plan = jingrow.get_pg("Site Plan", plan_name) + + disk_usage = usage.public + usage.private + if usage.database < plan.max_database_usage and disk_usage < plan.max_storage_usage: + self.current_database_usage = (usage.database / plan.max_database_usage) * 100 + self.current_disk_usage = ((usage.public + usage.private) / plan.max_storage_usage) * 100 + self.unsuspend(reason="Plan Upgraded") + + @dashboard_whitelist() + @site_action(["Active", "Broken"]) + def deactivate(self): + plan = jingrow.db.get_value("Site Plan", self.plan, ["is_jingrow_plan", "is_trial_plan"], as_dict=True) + if self.plan and plan.is_trial_plan: + jingrow.throw(_("Cannot deactivate site on a trial plan")) + + if self.plan and plan.is_jingrow_plan: + jingrow.throw(_("Cannot deactivate site on a Jingrow plan")) + + log_site_activity(self.name, "Deactivate Site") + self.status = "Inactive" + self.update_site_config({"maintenance_mode": 1}) + self.update_site_status_on_proxy("deactivated") + + @dashboard_whitelist() + @site_action(["Inactive", "Broken"]) + def activate(self): + log_site_activity(self.name, "Activate Site") + self.status = "Active" + self.update_site_config({"maintenance_mode": 0}) + self.update_site_status_on_proxy("activated") + self.reactivate_app_subscriptions() + + @jingrow.whitelist() + def suspend(self, reason=None, skip_reload=False): + log_site_activity(self.name, "Suspend Site", reason) + self.status = "Suspended" + self.update_site_config({"maintenance_mode": 1}) + self.update_site_status_on_proxy("suspended", skip_reload=skip_reload) + self.deactivate_app_subscriptions() + + def deactivate_app_subscriptions(self): + jingrow.db.set_value( + "Marketplace App Subscription", + {"status": "Active", "site": self.name}, + {"status": "Inactive"}, + ) + + def reactivate_app_subscriptions(self): + jingrow.db.set_value( + "Marketplace App Subscription", + {"status": "Inactive", "site": self.name}, + {"status": "Active"}, + ) + + @jingrow.whitelist() + @site_action(["Suspended"]) + def unsuspend(self, reason=None, skip_reload=False): + log_site_activity(self.name, "Unsuspend Site", reason) + self.status = "Active" + self.update_site_config({"maintenance_mode": 0}) + self.update_site_status_on_proxy("activated", skip_reload=skip_reload) + self.reactivate_app_subscriptions() + + @jingrow.whitelist() + def reset_site_usage(self): + agent = Agent(self.server) + agent.reset_site_usage(self) + + def update_site_status_on_proxy(self, status, skip_reload=False): + proxy_server = jingrow.db.get_value("Server", self.server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + agent.update_site_status(self.server, self.name, status, skip_reload) + + def get_user_details(self): + if jingrow.db.get_value("Team", self.team, "user") == "Administrator" and self.account_request: + ar = jingrow.get_pg("Account Request", self.account_request) + user_email = ar.email + user_first_name = ar.first_name + user_last_name = ar.last_name + else: + user_email = jingrow.db.get_value("Team", self.team, "user") + user = jingrow.db.get_value( + "User", {"email": user_email}, ["first_name", "last_name"], as_dict=True + ) + user_first_name = user.first_name if (user and user.first_name) else "" + user_last_name = user.last_name if (user and user.last_name) else "" + payload = { + "email": user_email, + "first_name": user_first_name or "", + "last_name": user_last_name or "", + } + """ + If the site is created for product trial, + we might have collected the password from end-user for his site + """ + if self.account_request and self.standby_for_product and not self.is_standby: + with contextlib.suppress(jingrow.DoesNotExistError): + # fetch the product trial request + product_trial_request = jingrow.get_pg( + "Product Trial Request", + { + "account_request": self.account_request, + "product_trial": self.standby_for_product, + "site": self.name, + }, + ) + setup_wizard_completion_mode = jingrow.get_value( + "Product Trial", product_trial_request.product_trial, "setup_wizard_completion_mode" + ) + if setup_wizard_completion_mode == "manual": + password = product_trial_request.get_user_login_password_from_signup_details() + if password: + payload["password"] = password + + return payload + + def setup_jerp(self): + account_request = jingrow.get_pg("Account Request", self.account_request) + agent = Agent(self.server) + user = { + "email": account_request.email, + "first_name": account_request.first_name, + "last_name": account_request.last_name, + } + config = { + "setup_config": { + "country": account_request.country, + "timezone": account_request.timezone, + "domain": account_request.domain, + "currency": account_request.currency, + "language": account_request.language, + "company": account_request.company, + } + } + agent.setup_jerp(self, user, config) + + @property + def subscription(self): + name = jingrow.db.get_value("Subscription", {"document_type": "Site", "document_name": self.name}) + return jingrow.get_pg("Subscription", name) if name else None + + def can_charge_for_subscription(self, subscription=None): + today = jingrow.utils.getdate() + return ( + self.status not in ["Archived", "Suspended"] + and self.team + and self.team != "Administrator" + and not self.free + and (today > get_datetime(self.trial_end_date).date() if self.trial_end_date else True) + ) + + def get_plan_name(self, plan=None): + if not plan: + plan = self.subscription_plan if hasattr(self, "subscription_plan") else self.plan + if plan and not isinstance(plan, str): + jingrow.throw("Site.subscription_plan must be a string") + return plan + + def get_plan_config(self, plan=None): + plan = self.get_plan_name(plan) + config = get_plan_config(plan) + if plan in UNLIMITED_PLANS: + # PERF: do not enable usage tracking on unlimited sites. + config.pop("rate_limit", None) + return config + + def set_latest_bench(self): + from pypika.terms import PseudoColumn + + if not (self.domain and self.cluster and self.group): + jingrow.throw("domain, cluster and group are required to create site") + + proxy_servers_names = jingrow.db.get_all( + "Proxy Server Domain", {"domain": self.domain}, pluck="parent" + ) + proxy_servers = jingrow.db.get_all( + "Proxy Server", + {"status": "Active", "name": ("in", proxy_servers_names)}, + pluck="name", + ) + + """ + For restricted plans, just choose any bench from the release groups and clusters combination + For others, don't allow to deploy on those specific release group benches, choose anything except that + """ + + release_group_names = [] + if self.get_plan_name(): + release_group_names = jingrow.db.get_all( + "Site Plan Release Group", + pluck="release_group", + filters={ + "parenttype": "Site Plan", + "parentfield": "release_groups", + "parent": self.get_plan_name(), + }, + ) + + Bench = jingrow.qb.PageType("Bench") + Server = jingrow.qb.PageType("Server") + + bench_query = ( + jingrow.qb.from_(Bench) + .select( + Bench.name, + Bench.server, + Bench.group, + PseudoColumn(f"`tabBench`.`cluster` = '{self.cluster}' `in_primary_cluster`"), + ) + .left_join(Server) + .on(Bench.server == Server.name) + .where(Server.proxy_server.isin(proxy_servers)) + .where(Bench.status == "Active") + .orderby(PseudoColumn("in_primary_cluster"), order=jingrow.qb.desc) + .orderby(Server.use_for_new_sites, order=jingrow.qb.desc) + .orderby(Bench.creation, order=jingrow.qb.desc) + .limit(1) + ) + if release_group_names: + bench_query = bench_query.where(Bench.group.isin(release_group_names)) + else: + restricted_release_group_names = jingrow.db.get_all( + "Site Plan Release Group", + pluck="release_group", + filters={"parenttype": "Site Plan", "parentfield": "release_groups"}, + ) + if self.group in restricted_release_group_names: + jingrow.throw(f"Site can't be deployed on this release group {self.group} due to restrictions") + bench_query = bench_query.where(Bench.group == self.group) + if self.server: + bench_query = bench_query.where(Server.name == self.server) + + result = bench_query.run(as_dict=True) + if len(result) == 0: + jingrow.throw("No bench available to deploy this site") + return + + self.bench = result[0].name + self.server = result[0].server + if release_group_names: + self.group = result[0].group + + def _create_initial_site_plan_change(self, plan): + jingrow.get_pg( + { + "pagetype": "Site Plan Change", + "site": self.name, + "from_plan": "", + "to_plan": plan, + "type": "Initial Plan", + "timestamp": self.creation, + } + ).insert(ignore_permissions=True) + + def check_db_access_enabling(self): + if jingrow.db.get_value( + "Agent Job", + filters={ + "site": self.name, + "job_type": "Add User to ProxySQL", + "status": ["in", ["Running", "Pending"]], + }, + for_update=True, + ): + jingrow.throw("Database Access is already being enabled on this site. Please check after a while.") + + def get_auto_update_info(self): + fields = [ + "auto_updates_scheduled", + "auto_update_last_triggered_on", + "update_trigger_frequency", + "update_trigger_time", + "update_on_weekday", + "update_end_of_month", + "update_on_day_of_month", + ] + return {field: self.get(field) for field in fields} + + def get_update_information(self): + from jcloud.jcloud.pagetype.site_update.site_update import ( + benches_with_available_update, + ) + + out = jingrow._dict() + out.update_available = self.bench in benches_with_available_update(site=self.name) + if not out.update_available: + return out + + bench: "Bench" = jingrow.get_pg("Bench", self.bench) + source = bench.candidate + destinations = jingrow.get_all( + "Deploy Candidate Difference", + filters={"source": source}, + limit=1, + pluck="destination", + ) + if not destinations: + out.update_available = False + return out + + destination = destinations[0] + + destination_candidate: "DeployCandidate" = jingrow.get_pg("Deploy Candidate", destination) + + current_apps = bench.apps + next_apps = destination_candidate.apps + out.apps = get_updates_between_current_and_next_apps(current_apps, next_apps) + + out.installed_apps = self.apps + out.update_available = any([app["update_available"] for app in out.apps]) + return out + + def fetch_running_optimize_tables_job(self): + return jingrow.db.exists( + "Agent Job", + { + "site": self.name, + "job_type": "Optimize Tables", + "status": ["in", ["Undelivered", "Running", "Pending"]], + }, + ) + + @dashboard_whitelist() + def optimize_tables(self, ignore_checks: bool = False): + if not ignore_checks: + # check for running `Optimize Tables` agent job + if job := self.fetch_running_optimize_tables_job(): + return { + "success": True, + "message": "Optimize Tables job is already running on this site.", + "job_name": job, + } + # check if `Optimize Tables` has run in last 1 hour + recent_agent_job_name = jingrow.db.exists( + "Agent Job", + { + "site": self.name, + "job_type": "Optimize Tables", + "status": ["not in", ["Failure", "Delivery Failure"]], + "creation": [">", jingrow.utils.add_to_date(jingrow.utils.now_datetime(), hours=-1)], + }, + ) + if recent_agent_job_name: + return { + "success": False, + "message": "Optimize Tables job has already run in the last 1 hour. Try later.", + "job_name": None, + } + + agent = Agent(self.server) + job_name = agent.optimize_tables(self).name + return { + "success": True, + "message": "Optimize Tables has been triggered on this site.", + "job_name": job_name, + } + + @dashboard_whitelist() + def get_database_performance_report(self): + from jcloud.jcloud.report.mariadb_slow_queries.mariadb_slow_queries import get_data as get_slow_queries + + agent = Agent(self.server) + # fetch slow queries of last 7 days + slow_queries = get_slow_queries( + jingrow._dict( + { + "database": self.database_name, + "start_datetime": jingrow.utils.add_to_date(None, days=-7), + "stop_datetime": jingrow.utils.now_datetime(), + "search_pattern": ".*", + "max_lines": 2000, + "normalize_queries": True, + } + ) + ) + # convert all the float to int + for query in slow_queries: + for key, value in query.items(): + if isinstance(value, float): + query[key] = int(value) + + # Sort by duration + slow_queries.sort(key=lambda x: x["duration"], reverse=True) + + is_performance_schema_enabled = False + if database_server := jingrow.db.get_value("Server", self.server, "database_server"): + is_performance_schema_enabled = jingrow.db.get_value( + "Database Server", + database_server, + "is_performance_schema_enabled", + ) + result = None + if is_performance_schema_enabled: + with suppress(Exception): + # for larger table or if database has any locks, fetching perf report will be failed + result = agent.get_summarized_performance_report_of_database(self) + # remove `parent` & `creation` indexes from unused_indexes + result["unused_indexes"] = [ + index + for index in result.get("unused_indexes", []) + if index["index_name"] not in ["parent", "creation"] + ] + + if not result: + result = {} + result["unused_indexes"] = [] + result["redundant_indexes"] = [] + result["top_10_time_consuming_queries"] = [] + result["top_10_queries_with_full_table_scan"] = [] + + # sort the slow queries by `rows_examined` + result["slow_queries"] = sorted(slow_queries, key=lambda x: x["rows_examined"], reverse=True) + result["is_performance_schema_enabled"] = is_performance_schema_enabled + return result + + @property + def server_logs(self): + return Agent(self.server).get(f"benches/{self.bench}/sites/{self.name}/logs") + + def get_server_log(self, log): + return Agent(self.server).get(f"benches/{self.bench}/sites/{self.name}/logs/{log}") + + def get_server_log_for_log_browser(self, log): + return Agent(self.server).get(f"benches/{self.bench}/sites/{self.name}/logs_v2/{log}") + + @property + def has_paid(self) -> bool: + """Has the site been paid for by customer.""" + invoice_items = jingrow.get_all( + "Invoice Item", + { + "document_type": self.pagetype, + "document_name": self.name, + "Amount": (">", 0), + }, + pluck="parent", + ) + today = jingrow.utils.getdate() + today_last_month = jingrow.utils.add_to_date(today, months=-1) + last_month_last_date = jingrow.utils.get_last_day(today_last_month) + return jingrow.db.exists( + "Invoice", + { + "status": "Paid", + "name": ("in", invoice_items or ["NULL"]), + "period_end": (">=", last_month_last_date), + # this month's or last month's invoice has been paid for + }, + ) + + @property + def inbound_ip(self): + server = jingrow.db.get_value( + "Server", + self.server, + ["ip", "is_standalone", "proxy_server", "team"], + as_dict=True, + ) + if server.is_standalone: + ip = server.ip + else: + ip = jingrow.db.get_value("Proxy Server", server.proxy_server, "ip") + return ip + + @property + def current_usage(self): + from jcloud.api.analytics import get_current_cpu_usage + + result = jingrow.db.get_all( + "Site Usage", + fields=["database", "public", "private"], + filters={"site": self.name}, + order_by="creation desc", + limit=1, + ) + usage = result[0] if result else {} + + # number of hours until cpu usage resets + now = jingrow.utils.now_datetime() + today_end = now.replace(hour=23, minute=59, second=59) + hours_left_today = flt(time_diff_in_hours(today_end, now), 2) + + return { + "cpu": flt(get_current_cpu_usage(self.name) / (3.6 * (10**9)), 5), + "storage": usage.get("public", 0) + usage.get("private", 0), + "database": usage.get("database", 0), + "hours_until_cpu_usage_resets": hours_left_today, + } + + @property + def last_updated(self): + result = jingrow.db.get_all( + "Site Activity", + filters={"site": self.name, "action": "Update"}, + order_by="creation desc", + limit=1, + pluck="creation", + ) + return result[0] if result else None + + @classmethod + def get_sites_with_backup_time(cls) -> list[dict]: + sites = jingrow.qb.PageType(cls.PAGETYPE) + return ( + jingrow.qb.from_(sites) + .select(sites.name, sites.backup_time) + .where(sites.backup_time.isnotnull()) + .where(sites.status == "Active") + .where(sites.skip_scheduled_backups == 0) + .run(as_dict=True) + ) + + @classmethod + def get_sites_for_backup(cls, interval: int): + sites = cls.get_sites_without_backup_in_interval(interval) + servers_with_backups = jingrow.get_all( + "Server", + {"status": "Active", "skip_scheduled_backups": False}, + pluck="name", + ) + return jingrow.get_all( + "Site", + { + "name": ("in", sites), + "skip_scheduled_backups": False, + "backup_time": ("is", "not set"), + "server": ("in", servers_with_backups), + }, + ["name", "timezone", "server"], + order_by="server", + ignore_ifnull=True, + ) + + @classmethod + def get_sites_without_backup_in_interval(cls, interval: int) -> list[str]: + """Return active sites that haven't had backup taken in interval hours.""" + interval_hrs_ago = jingrow.utils.add_to_date(None, hours=-interval) + all_sites = set( + jingrow.get_all( + "Site", + { + "status": "Active", + "creation": ("<=", interval_hrs_ago), + "is_standby": False, + "plan": ("not like", "%Trial"), + }, + pluck="name", + ) + ) + return list( + all_sites + - set(cls.get_sites_with_backup_in_interval(interval_hrs_ago)) + - set(cls.get_sites_with_pending_backups(interval_hrs_ago)) + ) + # TODO: query using creation time of account request for actual new sites <03-09-21, Balamurali M> # + + @classmethod + def get_sites_with_pending_backups(cls, interval_hrs_ago: datetime) -> list[str]: + return jingrow.get_all( + "Site Backup", + { + "status": ("in", ["Running", "Pending"]), + "creation": (">=", interval_hrs_ago), + }, + pluck="site", + ) + + @classmethod + def get_sites_with_backup_in_interval(cls, interval_hrs_ago) -> list[str]: + return jingrow.get_all( + "Site Backup", + { + "creation": (">", interval_hrs_ago), + "status": ("!=", "Failure"), + "owner": "Administrator", + }, + pluck="site", + ignore_ifnull=True, + ) + + @classmethod + def exists(cls, subdomain, domain) -> bool: + """Check if subdomain is available""" + banned_domains = jingrow.get_all("Blocked Domain", {"block_for_all": 1}, pluck="name") + if banned_domains and subdomain in banned_domains: + return True + return bool( + jingrow.db.exists("Blocked Domain", {"name": subdomain, "root_domain": domain}) + or jingrow.db.exists( + "Site", + { + "subdomain": subdomain, + "domain": domain, + "status": ("!=", "Archived"), + }, + ) + ) + + @jingrow.whitelist() + def run_after_migrate_steps(self): + agent = Agent(self.server) + agent.run_after_migrate_steps(self) + + @jingrow.whitelist() + def get_actions(self): + is_group_public = jingrow.get_cached_value("Release Group", self.group, "public") + + actions = [ + { + "action": "激活站点", + "description": "激活站点使其可以在互联网上被访问", + "button_label": "激活", + "condition": self.status in ["Inactive", "Broken"], + "pg_method": "activate", + }, + { + "action": "管理数据库用户", + "description": "管理您站点数据库的用户和权限", + "button_label": "管理", + "pg_method": "dummy", + "condition": not self.hybrid_site, + }, + { + "action": "计划备份", + "description": "为此站点计划一个备份", + "button_label": "计划", + "pg_method": "schedule_backup", + }, + { + "action": "转移站点", + "description": "将此站点的所有权转移给另一个团队", + "button_label": "转移", + "pg_method": "send_change_team_request", + }, + { + "action": "版本升级", + "description": "将您的站点升级到主要版本", + "button_label": "升级", + "pg_method": "upgrade", + "condition": self.status == "Active", + }, + { + "action": "更改区域", + "description": "将您的站点迁移到不同的区域", + "button_label": "更改", + "pg_method": "change_region", + "condition": self.status == "Active", + }, + { + "action": "更改工作台组", + "description": "将您的站点迁移到不同的工作台组", + "button_label": "更改", + "pg_method": "change_bench", + "condition": self.status == "Active", + }, + { + "action": "更改服务器", + "description": "将您的站点迁移到不同的服务器", + "button_label": "更改", + "pg_method": "change_server", + "condition": self.status == "Active" and not is_group_public, + }, + { + "action": "清除缓存", + "description": "清除站点上的缓存", + "button_label": "清除", + "pg_method": "clear_site_cache", + }, + { + "action": "停用站点", + "description": "停用的站点在互联网上无法访问", + "button_label": "停用", + "condition": self.status == "Active", + "pg_method": "deactivate", + }, + { + "action": "更新数据库", + "description": "在您的站点上运行更新数据库命令", + "button_label": "更新", + "pg_method": "migrate", + "group": "危险操作", + }, + { + "action": "使用文件恢复", + "description": "使用数据库、公共和私有文件进行恢复", + "button_label": "恢复", + "pg_method": "restore_site_from_files", + "group": "危险操作", + }, + { + "action": "从现有站点恢复", + "description": "从另一个站点恢复数据库、公共和私有文件", + "button_label": "恢复", + "pg_method": "restore_site_from_files", + "group": "危险操作", + }, + { + "action": "重置站点", + "description": "将您的站点数据库重置为干净状态", + "button_label": "重置", + "pg_method": "reinstall", + "group": "危险操作", + }, + { + "action": "删除站点", + "description": "当您删除站点时,所有站点数据将永久删除", + "button_label": "删除", + "pg_method": "archive", + "group": "危险操作", + }, + ] + + return [d for d in actions if d.get("condition", True)] + + @property + def hybrid_site(self) -> bool: + return bool(jingrow.get_cached_value("Server", self.server, "is_self_hosted")) + + @property + def pending_for_long(self) -> bool: + if self.status != "Pending": + return False + return (jingrow.utils.now_datetime() - self.modified).total_seconds() > 60 * 60 * 4 # 4 hours + + @jingrow.whitelist() + def fetch_bench_from_agent(self): + agent = Agent(self.server) + benches_with_this_site = [] + for bench in agent.get("server")["benches"].values(): + if self.name in bench["sites"]: + benches_with_this_site.append(bench["name"]) + if len(benches_with_this_site) == 1: + jingrow.db.set_value("Site", self.name, "bench", benches_with_this_site[0]) + + @cached_property + def is_on_dedicated_plan(self): + return bool(jingrow.db.get_value("Site Plan", self.plan, "dedicated_server_plan")) + + @jingrow.whitelist() + def forcefully_remove_site(self, bench): + """Bypass all agent/jcloud callbacks and just remove this site from the target bench/server""" + from jcloud.utils import get_mariadb_root_password + + jingrow.only_for("System Manager") + + if bench == self.bench: + jingrow.throw("Use Archive Site action to remove site from current bench") + + # Mimic archive_site method in the agent.py + server = jingrow.db.get_value("Bench", bench, ["server"]) + data = { + "mariadb_root_password": get_mariadb_root_password(self), + "force": True, + } + + response = {"server": server, "bench": bench} + agent = Agent(server) + result = agent.request("POST", f"benches/{bench}/sites/{self.name}/archive", data, raises=False) + if "job" in result: + job = result["job"] + response["job"] = job + else: + response["error"] = result["error"] + self.add_comment( + text=f"{jingrow.session.user} attempted to forcefully remove site from {bench}.
{json.dumps(response, indent=1)}
" + ) + return response + + @dashboard_whitelist() + def fetch_database_table_schema(self, reload=False): + """ + Store dump in redis cache + """ + key_for_schema = f"database_table_schema__data:{self.name}" + key_for_schema_status = ( + f"database_table_schema__status:{self.name}" # 1 - loading, 2 - done, None - not available + ) + + if reload: + jingrow.cache().delete_value(key_for_schema) + jingrow.cache().delete_value(key_for_schema_status) + + status = jingrow.utils.cint(jingrow.cache().get_value(key_for_schema_status)) + if status: + if status == 1: + return { + "loading": True, + "data": [], + } + if status == 2: + return { + "loading": False, + "data": json.loads(jingrow.cache().get_value(key_for_schema)), + } + + # Check if any agent job is created within 5 minutes and in pending/running condition + # Checks to prevent duplicate agent job creation due to race condition + if not jingrow.db.exists( + "Agent Job", + { + "job_type": "Fetch Database Table Schema", + "site": self.name, + "status": ["in", ["Undelivered", "Pending", "Running"]], + "creation": (">", jingrow.utils.add_to_date(None, minutes=-5)), + }, + ): + # create the agent job and put it in loading state + jingrow.cache().set_value(key_for_schema_status, 1, expires_in_sec=600) + Agent(self.server).fetch_database_table_schema( + self, include_index_info=True, include_table_size=True + ) + return { + "loading": True, + "data": [], + } + + @dashboard_whitelist() + def fetch_database_processes(self): + agent = Agent(self.server) + if agent.should_skip_requests(): + return None + return agent.fetch_database_processes(self) + + @dashboard_whitelist() + def kill_database_process(self, id): + agent = Agent(self.server) + if agent.should_skip_requests(): + return None + processes = agent.fetch_database_processes(self) + if not processes: + return None + isFoundPid = True + for process in processes: + if str(process["id"]) == str(id): + isFoundPid = True + break + if not isFoundPid: + return None + return agent.kill_database_process(self, id) + + @dashboard_whitelist() + def run_sql_query_in_database(self, query: str, commit: bool): + if not query: + return {"success": False, "output": "SQL Query cannot be empty"} + pg = jingrow.get_pg( + { + "pagetype": "SQL Playground Log", + "site": self.name, + "team": self.team, + "query": query, + "committed": commit, + } + ) + response = Agent(self.server).run_sql_query_in_database(self, query, commit) + pg.is_successful = response.get("success", False) + pg.insert(ignore_permissions=True) + return response + + @dashboard_whitelist() + def suggest_database_indexes(self): + from jcloud.jcloud.report.mariadb_slow_queries.mariadb_slow_queries import get_data as get_slow_queries + + existing_agent_job_name = jingrow.db.exists( + "Agent Job", + { + "site": self.name, + "status": ("not in", ("Failure", "Delivery Failure")), + "job_type": "Analyze Slow Queries", + "creation": ( + ">", + jingrow.utils.add_to_date(None, minutes=-30), + ), + "retry_count": 0, + }, + ) + + if existing_agent_job_name: + existing_agent_job = jingrow.get_pg("Agent Job", existing_agent_job_name) + if existing_agent_job.status == "Success": + return { + "loading": False, + "data": json.loads(existing_agent_job.data).get("result", []), + } + return { + "loading": True, + "data": [], + } + + # fetch slow queries of last 7 days + slow_queries = get_slow_queries( + jingrow._dict( + { + "database": self.database_name, + "start_datetime": jingrow.utils.add_to_date(None, days=-7), + "stop_datetime": jingrow.utils.now_datetime(), + "search_pattern": ".*", + "max_lines": 1000, + "normalize_queries": True, + } + ) + ) + slow_queries = [{"example": x["example"], "normalized": x["query"]} for x in slow_queries] + if len(slow_queries) == 0: + return { + "loading": False, + "data": [], + } + agent = Agent(self.server) + agent.analyze_slow_queries(self, slow_queries) + + return { + "loading": True, + "data": [], + } + + @dashboard_whitelist() + def add_database_index(self, table, column): + record = jingrow.db.exists( + "Agent Job", + { + "site": self.name, + "status": ["in", ["Undelivered", "Running", "Pending"]], + "job_type": "Add Database Index", + }, + ) + if record: + return { + "success": False, + "message": "There is already a job running for adding database index. Please wait until finished.", + "job_name": record, + } + pagetype = get_pagetype_name(table) + agent = Agent(self.server) + job = agent.add_database_index(self, pagetype=pagetype, columns=[column]) + return { + "success": True, + "message": "Database index will be added on site.", + "job_name": job.name, + } + + +def site_cleanup_after_archive(site): + delete_site_domains(site) + delete_site_subdomain(site) + release_name(site) + + +def delete_site_subdomain(site): + site_pg = jingrow.get_pg("Site", site) + domain = jingrow.get_pg("Root Domain", site_pg.domain) + is_standalone = jingrow.get_value("Server", site_pg.server, "is_standalone") + if is_standalone: + proxy_server = site_pg.server + else: + proxy_server = jingrow.get_value("Server", site_pg.server, "proxy_server") + site_pg.remove_dns_record(domain, proxy_server, site) + + +def delete_site_domains(site): + domains = jingrow.get_all("Site Domain", {"site": site}) + jingrow.db.set_value("Site", site, "host_name", None) + for domain in domains: + jingrow.delete_pg("Site Domain", domain.name) + + +def release_name(name): + if ".archived" in name: + return + new_name = f"{name}.archived" + new_name = append_number_if_name_exists("Site", new_name, separator=".") + jingrow.rename_pg("Site", name, new_name) + + +def process_fetch_database_table_schema_job_update(job): + key_for_schema = f"database_table_schema__data:{job.site}" + key_for_schema_status = ( + f"database_table_schema__status:{job.site}" # 1 - loading, 2 - done, None - not available + ) + + if job.status in ["Failure", "Delivery Failure"]: + jingrow.cache().delete_value(key_for_schema) + jingrow.cache().delete_value(key_for_schema_status) + return + + if job.status == "Success": + """ + Support old agent versions + Remove this once all agents are updated + """ + data = json.loads(job.data) + is_old_agent = False + + if len(data) > 0 and isinstance(data[next(iter(data.keys()))], list): + is_old_agent = True + + if is_old_agent: + data_copy = data.copy() + data = {} + for key, value in data_copy.items(): + data[key] = { + "columns": value, + "size": { + "data_length": 0, + "index_length": 0, + "total_size": 0, + }, # old agent api doesn't have size info + } + for column in data[key]["columns"]: + column["index_info"] = { + "index_usage": {x: 0 for x in column["indexes"]}, # just fill some dummy value + "indexes": column["indexes"], + "is_indexed": len(column["indexes"]) > 0, + } + + jingrow.cache().set_value(key_for_schema, json.dumps(data), expires_in_sec=6000) + jingrow.cache().set_value(key_for_schema_status, 2, expires_in_sec=6000) + + +def process_new_site_job_update(job): # noqa: C901 + site_status = jingrow.get_value("Site", job.site, "status", for_update=True) + + other_job_types = { + "Add Site to Upstream": ("New Site", "New Site from Backup"), + "New Site": ("Add Site to Upstream",), + "New Site from Backup": ("Add Site to Upstream",), + }[job.job_type] + + first = job.status + second = jingrow.get_value( + "Agent Job", + {"job_type": ("in", other_job_types), "site": job.site}, + "status", + for_update=True, + ) + + backup_tests = jingrow.get_all( + "Backup Restoration Test", + dict(test_site=job.site, status="Running"), + pluck="name", + ) + + if "Success" == first == second: + updated_status = "Active" + marketplace_app_hook(site=Site("Site", job.site), op="install") + elif "Failure" in (first, second) or "Delivery Failure" in (first, second): + updated_status = "Broken" + elif "Running" in (first, second): + updated_status = "Installing" + else: + updated_status = "Pending" + + status_map = { + "Active": "Success", + "Broken": "Failure", + "Installing": "Running", + "Pending": "Running", + } + + if updated_status != site_status: + if backup_tests: + jingrow.db.set_value( + "Backup Restoration Test", + backup_tests[0], + "status", + status_map[updated_status], + ) + jingrow.db.commit() + + jingrow.db.set_value("Site", job.site, "status", updated_status) + create_site_status_update_webhook_event(job.site) + + if job.status == "Success": + request_data = json.loads(job.request_data) + if "create_user" in request_data: + jingrow.db.set_value("Site", job.site, "additional_system_user_created", True) + jingrow.db.commit() + + # Update in product trial request + if job.job_type in ("New Site", "Add Site to Upstream") and updated_status in ( + "Active", + "Broken", + ): + update_product_trial_request_status_based_on_site_status(job.site, updated_status == "Active") + + # check if new bench related to a site group deploy + site_group_deploy = jingrow.db.get_value( + "Site Group Deploy", + { + "site": job.site, + "status": "Creating Site", + }, + ) + if site_group_deploy: + jingrow.get_pg("Site Group Deploy", site_group_deploy).update_site_group_deploy_on_process_job(job) + + +def update_product_trial_request_status_based_on_site_status(site, is_site_active): + records = jingrow.get_list("Product Trial Request", filters={"site": site}, fields=["name"]) + if not records: + return + product_trial_request = jingrow.get_pg("Product Trial Request", records[0].name, for_update=True) + if is_site_active: + mode = jingrow.get_value( + "Product Trial", product_trial_request.product_trial, "setup_wizard_completion_mode" + ) + if mode != "auto": + product_trial_request.status = "Site Created" + product_trial_request.site_creation_completed_on = now_datetime() + product_trial_request.save(ignore_permissions=True) + else: + product_trial_request.complete_setup_wizard() + else: + product_trial_request.status = "Error" + product_trial_request.save(ignore_permissions=True) + + +def process_complete_setup_wizard_job_update(job): + records = jingrow.get_list("Product Trial Request", filters={"site": job.site}, fields=["name"]) + if not records: + return + product_trial_request = jingrow.get_pg("Product Trial Request", records[0].name, for_update=True) + if job.status == "Success": + jingrow.db.set_value("Site", job.site, "additional_system_user_created", True) + if jingrow.get_all("Site Domain", filters={"site": job.site, "status": ["!=", "Active"]}): + product_trial_request.status = "Adding Domain" + else: + product_trial_request.status = "Site Created" + product_trial_request.site_creation_completed_on = now_datetime() + product_trial_request.save(ignore_permissions=True) + elif job.status in ("Failure", "Delivery Failure"): + product_trial_request.status = "Error" + product_trial_request.save(ignore_permissions=True) + + +def process_add_domain_job_update(job): + records = jingrow.get_list("Product Trial Request", filters={"site": job.site}, fields=["name"]) + if not records: + return + + product_trial_request = jingrow.get_pg("Product Trial Request", records[0].name, for_update=True) + if job.status == "Success": + if jingrow.get_all( + "Agent Job", + filters={"site": job.site, "job_type": "Complete Setup Wizard", "status": ["!=", "Success"]}, + ): + product_trial_request.status = "Completing Setup Wizard" + else: + product_trial_request.status = "Site Created" + product_trial_request.site_creation_completed_on = now_datetime() + + product_trial_request.save(ignore_permissions=True) + + site_domain = json.loads(job.request_data).get("domain") + site = jingrow.get_pg("Site", job.site) + auto_generated_domain = site.host_name + site.host_name = site_domain + site.save() + site.set_redirect(auto_generated_domain) + + elif job.status in ("Failure", "Delivery Failure"): + product_trial_request.status = "Error" + product_trial_request.save(ignore_permissions=True) + + +def get_remove_step_status(job): + remove_step_name = { + "Archive Site": "Archive Site", + "Remove Site from Upstream": "Remove Site File from Upstream Directory", + }[job.job_type] + + return jingrow.db.get_value( + "Agent Job Step", + {"step_name": remove_step_name, "agent_job": job.name}, + "status", + for_update=True, + ) + + +def process_archive_site_job_update(job): + site_status = jingrow.get_value("Site", job.site, "status", for_update=True) + + other_job_type = { + "Remove Site from Upstream": "Archive Site", + "Archive Site": "Remove Site from Upstream", + }[job.job_type] + + try: + other_job = jingrow.get_last_pg( + "Agent Job", + filters={"job_type": other_job_type, "site": job.site}, + for_update=True, + ) + except jingrow.DoesNotExistError: + # Site is already renamed, the other job beat us to it + # Our work is done + return + + first = get_remove_step_status(job) + second = get_remove_step_status(other_job) + + if ( + ("Success" == first == second) + or ("Skipped" == first == second) + or sorted(("Success", "Skipped")) == sorted((first, second)) + ): + updated_status = "Archived" + elif "Failure" in (first, second): + updated_status = "Broken" + elif "Delivery Failure" == first == second: + updated_status = "Active" + elif "Delivery Failure" in (first, second): + updated_status = "Broken" + else: + updated_status = "Pending" + + if updated_status != site_status: + jingrow.db.set_value( + "Site", + job.site, + {"status": updated_status, "archive_failed": updated_status != "Archived"}, + ) + if updated_status == "Archived": + site_cleanup_after_archive(job.site) + + +def process_install_app_site_job_update(job): + updated_status = { + "Pending": "Pending", + "Running": "Installing", + "Success": "Active", + "Failure": "Active", + "Delivery Failure": "Active", + }[job.status] + + site_status = jingrow.get_value("Site", job.site, "status") + if updated_status != site_status: + if job.status == "Success": + site = jingrow.get_pg("Site", job.site) + app = json.loads(job.request_data).get("name") + app_pg = find(site.apps, lambda x: x.app == app) + if not app_pg: + site.append("apps", {"app": app}) + site.save() + jingrow.db.set_value("Site", job.site, "status", updated_status) + create_site_status_update_webhook_event(job.site) + + +def process_uninstall_app_site_job_update(job): + updated_status = { + "Pending": "Pending", + "Running": "Installing", + "Success": "Active", + "Failure": "Active", + "Delivery Failure": "Active", + }[job.status] + + site_status = jingrow.get_value("Site", job.site, "status") + if updated_status != site_status: + if job.status == "Success": + site = jingrow.get_pg("Site", job.site) + app = job.request_path.rsplit("/", 1)[-1] + app_pg = find(site.apps, lambda x: x.app == app) + if app_pg: + site.remove(app_pg) + site.save() + jingrow.db.set_value("Site", job.site, "status", updated_status) + create_site_status_update_webhook_event(job.site) + + +def process_marketplace_hooks_for_backup_restore(apps_from_backup: set[str], site: Site): + site_apps = set([app.app for app in site.apps]) + apps_to_install = apps_from_backup - site_apps + apps_to_uninstall = site_apps - apps_from_backup + for app in apps_to_install: + if ( + jingrow.get_cached_value("Marketplace App", app, "subscription_type") == "Free" + ): # like china_compliance; no need to check subscription + marketplace_app_hook(app=app, site=site, op="install") + for app in apps_to_uninstall: + if ( + jingrow.get_cached_value("Marketplace App", app, "subscription_type") == "Free" + ): # like china_compliance; no need to check subscription + marketplace_app_hook(app=app, site=site, op="uninstall") + + +def process_restore_job_update(job, force=False): + """ + force: force updates apps table sync + """ + updated_status = { + "Pending": "Pending", + "Running": "Installing", + "Success": "Active", + "Failure": "Broken", + "Delivery Failure": "Active", + }[job.status] + + site_status = jingrow.get_value("Site", job.site, "status") + if force or updated_status != site_status: + if job.status == "Success": + apps_from_backup: list[str] = [line.split()[0] for line in job.output.splitlines() if line] + site = Site("Site", job.site) + process_marketplace_hooks_for_backup_restore(set(apps_from_backup), site) + site.set_apps(apps_from_backup) + jingrow.db.set_value("Site", job.site, "status", updated_status) + create_site_status_update_webhook_event(job.site) + + +def process_reinstall_site_job_update(job): + updated_status = { + "Pending": "Pending", + "Running": "Installing", + "Success": "Active", + "Failure": "Broken", + "Delivery Failure": "Active", + }[job.status] + + site_status = jingrow.get_value("Site", job.site, "status") + if updated_status != site_status: + jingrow.db.set_value("Site", job.site, "status", updated_status) + create_site_status_update_webhook_event(job.site) + if job.status == "Success": + jingrow.db.set_value("Site", job.site, "setup_wizard_complete", 0) + + +def process_migrate_site_job_update(job): + updated_status = { + "Pending": "Pending", + "Running": "Updating", + "Success": "Active", + "Failure": "Broken", + "Delivery Failure": "Active", + }[job.status] + + if updated_status == "Active": + site: Site = jingrow.get_pg("Site", job.site) + if site.status_before_update: + site.reset_previous_status(fix_broken=True) + return + site_status = jingrow.get_value("Site", job.site, "status") + if updated_status != site_status: + jingrow.db.set_value("Site", job.site, "status", updated_status) + create_site_status_update_webhook_event(job.site) + + +def get_rename_step_status(job): + rename_step_name = { + "Rename Site": "Rename Site", + "Rename Site on Upstream": "Rename Site File in Upstream Directory", + }[job.job_type] + + return jingrow.db.get_value( + "Agent Job Step", + {"step_name": rename_step_name, "agent_job": job.name}, + "status", + for_update=True, + ) + + +def process_rename_site_job_update(job): # noqa: C901 + site_status = jingrow.get_value("Site", job.site, "status", for_update=True) + + other_job_type = { + "Rename Site": "Rename Site on Upstream", + "Rename Site on Upstream": "Rename Site", + }[job.job_type] + + if job.job_type == "Rename Site" and job.status == "Success": + request_data = json.loads(job.request_data) + if "create_user" in request_data: + jingrow.db.set_value("Site", job.site, "additional_system_user_created", True) + + try: + other_job = jingrow.get_last_pg( + "Agent Job", + filters={"job_type": other_job_type, "site": job.site}, + for_update=True, + ) + except jingrow.DoesNotExistError: + # Site is already renamed, he other job beat us to it + # Our work is done + return + + first = get_rename_step_status(job) + second = get_rename_step_status(other_job) + + if "Success" == first == second: + update_records_for_rename(job) + # update job obj with new name + job.reload() + updated_status = "Active" + from jcloud.jcloud.pagetype.site.pool import create as create_pooled_sites + + create_pooled_sites() + + elif "Failure" in (first, second): + updated_status = "Broken" + elif "Delivery Failure" == first == second: + updated_status = "Active" + elif "Delivery Failure" in (first, second): + updated_status = "Broken" + elif "Running" in (first, second): + updated_status = "Updating" + else: + updated_status = "Pending" + + if updated_status != site_status: + jingrow.db.set_value("Site", job.site, "status", updated_status) + create_site_status_update_webhook_event(job.site) + + +def process_move_site_to_bench_job_update(job): + updated_status = { + "Pending": "Pending", + "Running": "Updating", + "Failure": "Broken", + }.get(job.status) + if job.status in ("Success", "Failure"): + dest_bench = json.loads(job.request_data).get("target") + dest_group = jingrow.db.get_value("Bench", dest_bench, "group") + + move_site_step_status = jingrow.db.get_value( + "Agent Job Step", + {"step_name": "Move Site", "agent_job": job.name}, + "status", + ) + if move_site_step_status == "Success": + jingrow.db.set_value("Site", job.site, "bench", dest_bench) + jingrow.db.set_value("Site", job.site, "group", dest_group) + if updated_status: + jingrow.db.set_value("Site", job.site, "status", updated_status) + create_site_status_update_webhook_event(job.site) + return + if job.status == "Success": + site = jingrow.get_pg("Site", job.site) + site.reset_previous_status(fix_broken=True) + + +def update_records_for_rename(job): + """Update jcloud records for successful site rename.""" + data = json.loads(job.request_data) + new_name = data["new_name"] + if new_name == job.site: # idempotency + return + + site = jingrow.get_pg("Site", job.site, for_update=True) + if site.host_name == job.site: + # Host name already updated in f server, no need to create another job + site._update_configuration({"host_name": f"https://{new_name}"}) + site.db_set("host_name", new_name) + + jingrow.rename_pg("Site", job.site, new_name) + jingrow.rename_pg("Site Domain", job.site, new_name) + + +def process_restore_tables_job_update(job): + updated_status = { + "Pending": "Pending", + "Running": "Updating", + "Success": "Active", + "Failure": "Broken", + }[job.status] + + site_status = jingrow.get_value("Site", job.site, "status") + if updated_status != site_status: + if updated_status == "Active": + jingrow.get_pg("Site", job.site).reset_previous_status(fix_broken=True) + else: + jingrow.db.set_value("Site", job.site, "status", updated_status) + create_site_status_update_webhook_event(job.site) + + +def process_create_user_job_update(job): + if job.status == "Success": + jingrow.db.set_value("Site", job.site, "additional_system_user_created", True) + update_product_trial_request_status_based_on_site_status(job.site, True) + elif job.status in ("Failure", "Delivery Failure"): + update_product_trial_request_status_based_on_site_status(job.site, False) + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Site") + + +def prepare_site(site: str, subdomain: str | None = None) -> dict: + # prepare site details + pg = jingrow.get_pg("Site", site) + site_name = subdomain if subdomain else "brt-" + pg.subdomain + app_plans = [app.app for app in pg.apps] + backups = jingrow.get_all( + "Site Backup", + dict(status="Success", site=site, files_availability="Available", offsite=1), + pluck="name", + ) + if not backups: + jingrow.throw("Backup Files not found.") + backup = jingrow.get_pg("Site Backup", backups[0]) + + files = { + "config": backup.remote_config_file, + "database": backup.remote_database_file, + "public": backup.remote_public_file, + "private": backup.remote_private_file, + } + return { + "domain": jingrow.db.get_single_value("Jcloud Settings", "domain"), + "plan": pg.plan, + "name": site_name, + "group": pg.group, + "selected_app_plans": {}, + "apps": app_plans, + "files": files, + } + + +@jingrow.whitelist() +def options_for_new(group: str | None = None, selected_values=None) -> dict: + domain = jingrow.db.get_single_value("Jcloud Settings", "domain") + selected_values = jingrow.parse_json(selected_values) if selected_values else jingrow._dict() + + versions = [] + bench = None + apps = [] + clusters = [] + + versions_filters = {"public": True} + if not group: + versions_filters.update({"status": ("!=", "End of Life")}) + + versions = jingrow.db.get_all( + "Jingrow Version", + ["name", "default", "status", "number"], + versions_filters, + order_by="number desc", + ) + for v in versions: + v.label = v.name + v.value = v.name + + if selected_values.version: + bench = _get_bench_for_new(selected_values.version) + apps = _get_apps_of_bench(selected_values.version, bench) if bench else [] + cluster_names = unique( + jingrow.db.get_all( + "Bench", + filters={"candidate": jingrow.db.get_value("Bench", bench, "candidate")}, + pluck="cluster", + ) + ) + clusters = jingrow.db.get_all( + "Cluster", + filters={"name": ("in", cluster_names), "public": True}, + fields=["name", "title", "image", "beta"], + ) + for cluster in clusters: + cluster.label = cluster.title + cluster.value = cluster.name + + return { + "domain": domain, + "bench": bench, + "versions": versions, + "apps": apps, + "clusters": clusters, + } + + +def _get_bench_for_new(version): + restricted_release_group_names = jingrow.db.get_all( + "Site Plan Release Group", + pluck="release_group", + filters={"parenttype": "Site Plan", "parentfield": "release_groups"}, + ) + release_group = jingrow.db.get_value( + "Release Group", + fieldname=["name", "`default`", "title"], + filters={ + "enabled": 1, + "public": 1, + "version": version, + "name": ("not in", restricted_release_group_names), + }, + order_by="creation desc", + as_dict=1, + ) + if not release_group: + return None + + return jingrow.db.get_value( + "Bench", + filters={"status": "Active", "group": release_group.name}, + order_by="creation desc", + ) + + +def _get_apps_of_bench(version, bench): + team = jingrow.local.team().name + bench_apps = jingrow.db.get_all("Bench App", {"parent": bench}, pluck="source") + app_sources = jingrow.get_all( + "App Source", + [ + "name", + "app", + "repository_url", + "repository", + "repository_owner", + "branch", + "team", + "public", + "app_title", + "jingrow", + ], + filters={"name": ("in", bench_apps), "jingrow": 0}, + or_filters={"public": True, "team": team}, + ) + for app in app_sources: + app.label = app.app_title + app.value = app.app + apps = sorted(app_sources, key=lambda x: bench_apps.index(x.name)) + marketplace_apps = jingrow.db.get_all( + "Marketplace App", + fields=["title", "image", "description", "app", "route"], + filters={"app": ("in", [app.app for app in apps])}, + ) + for app in apps: + marketplace_details = find(marketplace_apps, lambda x: x.app == app.app) + if marketplace_details: + app.update(marketplace_details) + app.plans = get_plans_for_app(app.app, version) + return apps + + +def sync_sites_setup_wizard_complete_status(): + team_name = jingrow.get_value("Team", {"user": "Administrator"}, "name") + sites = jingrow.get_all( + "Site", + filters={ + "status": "Active", + "setup_wizard_complete": 0, + "setup_wizard_status_check_retries": ("<", 18), + "setup_wizard_status_check_next_retry_on": ("<=", jingrow.utils.now()), + "team": ("!=", team_name), + }, + pluck="name", + order_by="RAND()", + limit=100, + ) + for site in sites: + jingrow.enqueue( + "jcloud.jcloud.pagetype.site.site.fetch_setup_wizard_complete_status_if_site_exists", + site=site, + queue="sync", + job_id=f"fetch_setup_wizard_complete_status:{site}", + deduplicate=True, + ) + + +def fetch_setup_wizard_complete_status_if_site_exists(site): + if not jingrow.db.exists("Site", site): + return + with suppress(jingrow.DoesNotExistError): + jingrow.get_pg("Site", site).fetch_setup_wizard_complete_status() + + +def create_site_status_update_webhook_event(site: str): + record = jingrow.get_pg("Site", site) + if record.team == "Administrator": + return + create_webhook_event("Site Status Update", record, record.team) + + +def send_renew_notification(): + """ + 发送站点续费通知给用户: + - 到期前60天 + - 到期前30天 + - 到期前7天 + - 到期当天 + 通知方式包括邮件和站内信 + """ + today = jingrow.utils.today() + + # 计算关键日期 + sixty_days_later = jingrow.utils.add_days(today, 60) # 60天后 + thirty_days_later = jingrow.utils.add_days(today, 30) # 30天后 + seven_days_later = jingrow.utils.add_days(today, 7) # 7天后 + + # 查找今天需要发送通知的站点 + sites = jingrow.get_all( + "Site", + filters={ + "status": ["in", ["Active", "Suspended"]], + "site_end_date": ["in", [sixty_days_later, thirty_days_later, seven_days_later, today]] + }, + fields=["name", "host_name", "team", "site_end_date", "notify_email"] + ) + + for site in sites: + # 获取团队信息(不包括phone字段,因为Team文档类型中没有这个字段) + team_info = jingrow.db.get_value("Team", site.team, ["user", "notify_email", "billing_email"], as_dict=True) + if not team_info: + continue + + site_name = site.host_name or site.name + + # 优先使用站点的notify_email,如果没有则使用团队的notify_email,再没有则使用团队的billing_email + email_recipient = site.notify_email or team_info.notify_email or team_info.billing_email + if not email_recipient: + email_recipient = jingrow.db.get_value("User", team_info.user, "email") + + # 从User文档中获取手机号码用于发送短信 + phone_number = jingrow.db.get_value("User", team_info.user, "mobile_no") + + if not email_recipient and not phone_number: + continue + + # 初始化变量 + subject = "" + message = "" + days_remaining = 0 + + # 根据到期日确定通知类型 + if str(site.site_end_date) == str(sixty_days_later): + subject = f"续费通知 {site_name}" + message = f"您的站点 {site_name} 将在60天后【{site.site_end_date}】到期,请及时续费以确保服务不中断。" + days_remaining = 60 + elif str(site.site_end_date) == str(thirty_days_later): + subject = f"续费通知 {site_name}" + message = f"您的站点 {site_name} 将在30天后【{site.site_end_date}】到期,请及时续费以确保服务不中断。" + days_remaining = 30 + elif str(site.site_end_date) == str(seven_days_later): + subject = f"续费通知 {site_name}" + message = f"您的站点 {site_name} 将在7天后【{site.site_end_date}】到期,请尽快续费以确保服务不中断。" + days_remaining = 7 + elif str(site.site_end_date) == str(today): + subject = f"续费通知 {site_name}" + message = f"您的站点 {site_name} 今天({site.site_end_date})到期,如需继续使用,请尽快完成续费。" + days_remaining = 0 + + if not subject: + continue + + try: + # 发送邮件通知 + if email_recipient: + jingrow.sendmail( + recipients=email_recipient, + subject=subject, + message=message, + reference_pagetype="Site", + reference_name=site.name + ) + + # 发送短信通知 + if phone_number: + try: + # 格式化日期为字符串 + formatted_end_date = str(site.site_end_date) + + # 发送短信 + send_renew_sms( + phone_number, + days_remaining, + formatted_end_date + ) + + except Exception as sms_error: + jingrow.log_error(f"站点 {site.name} 发送续费短信通知失败: {str(sms_error)}", "Renewal SMS Notification Error") + + # 直接创建站内通知,不检查重复 + notification = jingrow.get_pg({ + "pagetype": "Jcloud Notification", + "team": site.team, + "title": subject, + "type": "Site Update", + "document_type": "Site", + "document_name": site.name, + "message": message, + "reference_pagetype": "Site", + "reference_name": site.name, + "read": 0, + "is_actionable": True + }).insert() + + # 触发实时事件 + jingrow.publish_realtime( + "jcloud_notification", + pagetype="Jcloud Notification", + message={"team": site.team} + ) + except Exception as e: + jingrow.log_error(f"站点 {site.name} 发送续费通知失败: {str(e)}", "Renewal Notification Error") diff --git a/jcloud/jcloud/pagetype/site/site_usages.py b/jcloud/jcloud/pagetype/site/site_usages.py new file mode 100644 index 0000000..3e58fb8 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/site_usages.py @@ -0,0 +1,146 @@ +import functools + +import rq +import jingrow + +from jcloud.api.analytics import get_current_cpu_usage_for_sites_on_server +from jcloud.jcloud.pagetype.site_plan.site_plan import get_plan_config +from jcloud.utils import log_error + + +@functools.lru_cache(maxsize=128) +def get_cpu_limit(plan): + return jingrow.db.get_value("Site Plan", plan, "cpu_time_per_day") * 3600 * 1000_000 + + +@functools.lru_cache(maxsize=128) +def get_cpu_limits(plan): + return get_config(plan).get("rate_limit", {}).get("limit", 1) * 1000_000 + + +@functools.lru_cache(maxsize=128) +def get_disk_limits(plan): + return jingrow.db.get_value( + "Site Plan", plan, ["max_database_usage", "max_storage_usage"] + ) + + +@functools.lru_cache(maxsize=128) +def get_config(plan): + return get_plan_config(plan) + + +def update_cpu_usages(): + """Update CPU Usages field Site.current_cpu_usage across all Active sites from Site Request Log""" + servers = jingrow.get_all( + "Server", filters={"status": "Active", "is_primary": True}, pluck="name" + ) + for server in servers: + jingrow.enqueue( + "jcloud.jcloud.pagetype.site.site_usages.update_cpu_usage_server", + server=server, + queue="long", + deduplicate=True, + job_id=f"update_cpu_usages:{server}", + ) + + +def update_cpu_usage_server(server): + usage = get_current_cpu_usage_for_sites_on_server(server) + sites = jingrow.get_all( + "Site", + filters={"status": "Active", "server": server}, + fields=["name", "plan", "current_cpu_usage"], + ) + + for site in sites: + if site.name not in usage: + continue + try: + cpu_usage = usage[site.name] + cpu_limit = get_cpu_limits(site.plan) + latest_cpu_usage = int((cpu_usage / cpu_limit) * 100) + + if site.current_cpu_usage != latest_cpu_usage: + site_pg = jingrow.get_pg("Site", site.name) + site_pg.current_cpu_usage = latest_cpu_usage + site_pg.save() + jingrow.db.commit() + except rq.timeouts.JobTimeoutException: + jingrow.db.rollback() + return + except Exception: + log_error( + "Site CPU Usage Update Error", site=site, cpu_usage=cpu_usage, cpu_limit=cpu_limit + ) + jingrow.db.rollback() + + +def update_disk_usages(): + """Update Storage and Database Usages fields Site.current_database_usage and Site.current_disk_usage for sites that have Site Usage documents""" + + latest_disk_usages = jingrow.db.sql( + """WITH disk_usage AS ( + SELECT + `site`, + `database`, + `public` + `private` as disk, + ROW_NUMBER() OVER (PARTITION BY `site` ORDER BY `creation` DESC) AS 'rank' + FROM + `tabSite Usage` + WHERE + `creation` > %s + ), + joined AS ( + SELECT + u.site, + site.current_database_usage, + site.current_disk_usage, + CAST(u.database / plan.max_database_usage * 100 AS INTEGER) AS latest_database_usage, + CAST(u.disk / plan.max_storage_usage * 100 AS INTEGER) AS latest_disk_usage + FROM + disk_usage u + INNER JOIN + `tabSubscription` s + ON + u.site = s.document_name + LEFT JOIN + `tabSite` site + ON + u.site = site.name + LEFT JOIN + `tabSite Plan` plan + ON + s.plan = plan.name + WHERE + `rank` = 1 AND + s.`document_type` = 'Site' AND + s.`enabled` AND + site.`status` != "Archived" + ) + SELECT + j.site, + j.latest_database_usage, + j.latest_disk_usage + FROM + joined j + WHERE + ABS(j.latest_database_usage - j.current_database_usage ) > 1 OR + ABS(j.latest_disk_usage - j.current_disk_usage) > 1 + """, + values=(jingrow.utils.add_to_date(jingrow.utils.now(), hours=-12),), + as_dict=True, + ) + + for usage in latest_disk_usages: + try: + site = jingrow.get_pg("Site", usage.site, for_update=True) + site.current_database_usage = usage.latest_database_usage + site.current_disk_usage = usage.latest_disk_usage + site.save() + jingrow.db.commit() + except jingrow.DoesNotExistError: + jingrow.db.rollback() + except Exception: + log_error("Site Disk Usage Update Error", usage=usage) + jingrow.db.rollback() diff --git a/jcloud/jcloud/pagetype/site/sync.py b/jcloud/jcloud/pagetype/site/sync.py new file mode 100644 index 0000000..931620f --- /dev/null +++ b/jcloud/jcloud/pagetype/site/sync.py @@ -0,0 +1,27 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow + + +def sync_setup_wizard_status(): + sites = jingrow.get_all( + "Site", + { + "status": "Active", + "setup_wizard_complete": False, + "is_standby": False, + "domain": ("in", ("jingrow.com", "jingrow.cloud", "jingrowhr.com", "jingrowdesk.com")), + }, + pluck="name", + order_by="RAND()", + limit=20, + ) + + for site_name in sites: + site = jingrow.get_pg("Site", site_name) + try: + site.is_setup_wizard_complete() + jingrow.db.commit() + except Exception: + jingrow.db.rollback() diff --git a/jcloud/jcloud/pagetype/site/test_backups.py b/jcloud/jcloud/pagetype/site/test_backups.py new file mode 100644 index 0000000..594739b --- /dev/null +++ b/jcloud/jcloud/pagetype/site/test_backups.py @@ -0,0 +1,149 @@ +from datetime import timedelta +from unittest.mock import MagicMock, Mock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.site.backups import ( + ScheduledBackupJob, + schedule_for_sites_with_backup_time, +) +from jcloud.jcloud.pagetype.site.site import Site +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.site_backup.test_site_backup import create_test_site_backup + + +@patch("jcloud.jcloud.pagetype.site.backups.jingrow.db.commit", new=MagicMock) +@patch("jcloud.jcloud.pagetype.site.backups.jingrow.db.rollback", new=MagicMock) +@patch.object(AgentJob, "after_insert", new=Mock()) +class TestScheduledBackupJob(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + def _offsite_count(self, site: str): + return jingrow.db.count("Site Backup", {"site": site, "offsite": True}) + + def _with_files_count(self, site: str): + return jingrow.db.count("Site Backup", {"site": site, "with_files": True}) + + def setUp(self): + self.interval = 6 + jingrow.db.set_single_value("Jcloud Settings", "backup_interval", 6) + + def _interval_hrs_ago(self): + return jingrow.utils.now_datetime() - timedelta(hours=self.interval) + + def _create_site_requiring_backup(self, **kwargs): + return create_test_site( + creation=self._interval_hrs_ago() - timedelta(hours=1), **kwargs + ) + + @patch.object( + ScheduledBackupJob, + "is_backup_hour", + new=lambda self, x: True, # always backup hour + ) + @patch.object( + ScheduledBackupJob, + "take_offsite", + new=lambda self, x, y: True, # take offsite anyway + ) + def test_offsite_taken_once_per_day(self): + site = self._create_site_requiring_backup() + job = ScheduledBackupJob() + + offsite_count_before = self._offsite_count(site.name) + job.start() + jingrow.get_last_pg("Site Backup", dict(site=site.name)).db_set("status", "Success") + offsite_count_after = self._offsite_count(site.name) + self.assertEqual(offsite_count_after, offsite_count_before + 1) + + offsite_count_before = self._offsite_count(site.name) + job = ScheduledBackupJob() + job.start() + offsite_count_after = self._offsite_count(site.name) + self.assertEqual(offsite_count_after, offsite_count_before) + + @patch.object( + ScheduledBackupJob, + "is_backup_hour", + new=lambda self, x: True, # always backup hour + ) + def test_with_files_taken_once_per_day(self): + site = self._create_site_requiring_backup() + job = ScheduledBackupJob() + + offsite_count_before = self._with_files_count(site.name) + job.start() + jingrow.get_last_pg("Site Backup", dict(site=site.name)).db_set("status", "Success") + offsite_count_after = self._with_files_count(site.name) + self.assertEqual(offsite_count_after, offsite_count_before + 1) + + offsite_count_before = self._with_files_count(site.name) + job = ScheduledBackupJob() + job.start() + offsite_count_after = self._with_files_count(site.name) + self.assertEqual(offsite_count_after, offsite_count_before) + + def _create_x_sites_on_1_bench(self, x): + site = self._create_site_requiring_backup() + bench = site.bench + for i in range(x - 1): + self._create_site_requiring_backup(bench=bench) + + def test_limit_number_of_sites_backed_up(self): + self._create_x_sites_on_1_bench(1) + self._create_x_sites_on_1_bench(2) + limit = 3 + + job = ScheduledBackupJob() + sites_num_old = len(job.sites) + + job.limit = limit + job.start() + sites_for_backup = [site.name for site in job.sites] + jingrow.db.set_value( + "Site Backup", + {"site": ("in", sites_for_backup)}, + "status", + "Success", # fake succesful backup + ) + + job = ScheduledBackupJob() + sites_num_new = len(job.sites) + + self.assertLess(sites_num_new, sites_num_old) + self.assertEqual(sites_num_old - sites_num_new, limit) + + def test_sites_considered_for_backup(self): + """Ensure sites with succesful or pending backups in past interval are skipped.""" + sites = Site.get_sites_for_backup(self.interval) + self.assertEqual(sites, []) + + site_1 = self._create_site_requiring_backup() + create_test_site_backup(site_1.name, status="Pending") + site_2 = self._create_site_requiring_backup() + create_test_site_backup(site_2.name, status="Failure") + site_3 = self._create_site_requiring_backup() + create_test_site_backup(site_3.name, status="Success") + site_4 = self._create_site_requiring_backup() + create_test_site_backup(site_4.name, status="Running") + + sites = Site.get_sites_for_backup(self.interval) + self.assertEqual(len(sites), 1) + + sites_for_backup = [site.name for site in sites] + self.assertIn(site_2.name, sites_for_backup) + + @patch.object(Site, "backup") + def test_site_with_backup_time_taken_at_right_time(self, mock_backup): + self._create_site_requiring_backup(backup_time="00:00:00") + with self.freeze_time("2021-01-01 01:00"): + schedule_for_sites_with_backup_time() + mock_backup.assert_not_called() + with self.freeze_time("2021-01-01 00:00"): + schedule_for_sites_with_backup_time() + mock_backup.assert_called_once() + job = ScheduledBackupJob() + self.assertEqual(len(job.sites), 0) # site with backup time should be skipped diff --git a/jcloud/jcloud/pagetype/site/test_site.py b/jcloud/jcloud/pagetype/site/test_site.py new file mode 100644 index 0000000..88f2b65 --- /dev/null +++ b/jcloud/jcloud/pagetype/site/test_site.py @@ -0,0 +1,523 @@ +# Copyright (c) 2019, JINGROW +# See license.txt +from __future__ import annotations + +import json +import typing +import unittest +from unittest.mock import Mock, patch + +import jingrow +import responses +from jingrow.model.naming import make_autoname + +from jcloud.exceptions import InsufficientSpaceOnServer +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.database_server.test_database_server import ( + create_test_database_server, +) +from jcloud.jcloud.pagetype.release_group.test_release_group import ( + create_test_release_group, +) +from jcloud.jcloud.pagetype.remote_file.remote_file import RemoteFile +from jcloud.jcloud.pagetype.remote_file.test_remote_file import ( + create_test_remote_file, +) +from jcloud.jcloud.pagetype.server.server import BaseServer, Server +from jcloud.jcloud.pagetype.site.site import Site, process_rename_site_job_update +from jcloud.telegram_utils import Telegram +from jcloud.utils import get_current_team + +if typing.TYPE_CHECKING: + from datetime import datetime + + from jcloud.jcloud.pagetype.bench.bench import Bench + from jcloud.jcloud.pagetype.release_group.release_group import ReleaseGroup + + +def create_test_bench( + user: str | None = None, + group: ReleaseGroup = None, + server: str | None = None, + apps: list[dict] | None = None, + creation: datetime | None = None, +) -> "Bench": + """ + Create test Bench pg. + + API call to agent will be faked when creating the pg. + """ + from jcloud.jcloud.pagetype.proxy_server.test_proxy_server import create_test_proxy_server + from jcloud.jcloud.pagetype.server.test_server import create_test_server + + creation = creation or jingrow.utils.now_datetime() + user = user or jingrow.session.user + if not server: + proxy_server = create_test_proxy_server() + database_server = create_test_database_server() + server = create_test_server(proxy_server.name, database_server.name).name + + if not group: + app = create_test_app() + group = create_test_release_group([app], user) + + name = jingrow.mock("name") + candidate = group.create_deploy_candidate() + candidate.db_set("docker_image", jingrow.mock("url")) + bench = jingrow.get_pg( + { + "name": f"Test Bench{name}", + "pagetype": "Bench", + "status": "Active", + "background_workers": 1, + "gunicorn_workers": 2, + "group": group.name, + "apps": apps, + "candidate": candidate.name, + "server": server, + } + ).insert(ignore_if_duplicate=True) + bench.db_set("creation", creation) + bench.reload() + return bench + + +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +def create_test_site( + subdomain: str = "", + new: bool = False, + creation: datetime | None = None, + bench: str | None = None, + server: str | None = None, + team: str | None = None, + standby_for: str | None = None, + apps: list[str] | None = None, + remote_database_file=None, + remote_public_file=None, + remote_private_file=None, + remote_config_file=None, + backup_time=None, + **kwargs, +) -> Site: + """Create test Site pg. + + Installs all apps present in bench. + """ + creation = creation or jingrow.utils.now_datetime() + subdomain = subdomain or make_autoname("test-site-.#####") + apps = [{"app": app} for app in apps] if apps else None + if not bench: + bench = create_test_bench(server=server) + else: + bench = jingrow.get_pg("Bench", bench) + group = jingrow.get_pg("Release Group", bench.group) + + status = "Pending" if new else "Active" + # on_update checks won't be triggered if not Active + + site = jingrow.get_pg( + { + "pagetype": "Site", + "status": status, + "subdomain": subdomain, + "server": bench.server, + "bench": bench.name, + "team": team or get_current_team(), + "apps": apps or [{"app": app.app} for app in group.apps], + "admin_password": "admin", + "standby_for": standby_for, + "remote_database_file": remote_database_file, + "remote_public_file": remote_public_file, + "remote_private_file": remote_private_file, + "remote_config_file": remote_config_file, + } + ) + site.update(kwargs) + site.insert() + site.db_set("creation", creation) + site.db_set("backup_time", backup_time) + site.reload() + return site + + +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +@patch("jcloud.jcloud.pagetype.site.site._change_dns_record", new=Mock()) +class TestSite(unittest.TestCase): + """Tests for Site Document methods.""" + + def setUp(self): + jingrow.db.truncate("Agent Request Failure") + + def tearDown(self): + jingrow.db.rollback() + + def test_host_name_updates_perform_checks_on_host_name(self): + """Ensure update of host name triggers verification of host_name.""" + site = create_test_site("testsubdomain") + site.host_name = "balu.codes" # domain that doesn't exist + self.assertRaises(jingrow.exceptions.ValidationError, site.save) + + def test_site_has_default_site_domain_on_create(self): + """Ensure site has default site domain on create.""" + site = create_test_site("testsubdomain") + self.assertEqual(site.name, site.host_name) + self.assertTrue(jingrow.db.exists("Site Domain", {"domain": site.name})) + + def test_new_sites_set_host_name_in_site_config(self): + """Ensure new sites set host_name in site config in f server.""" + with patch.object(Site, "_update_configuration") as mock_update_config: + site = create_test_site("testsubdomain", new=True) + mock_update_config.assert_called_with({"host_name": f"https://{site.name}"}, save=False) + + def test_rename_updates_name(self): + """Ensure rename changes name of site.""" + domain = jingrow.db.get_single_value("Jcloud Settings", "domain") + site = create_test_site("old-name") + new_name = f"new-name.{domain}" + site.rename(new_name) + + rename_job = self._fake_succeed_rename_jobs() + process_rename_site_job_update(rename_job) + + self.assertFalse(jingrow.db.exists("Site", {"name": f"old-name.{domain}"})) + self.assertTrue(jingrow.db.exists("Site", {"name": new_name})) + + def test_rename_creates_2_agent_jobs(self): + """Ensure rename creates 2 agent jobs (for f & n).""" + domain = jingrow.db.get_single_value("Jcloud Settings", "domain") + site = create_test_site("old-name") + new_name = f"new-name.{domain}" + + rename_jobs_count_before = jingrow.db.count("Agent Job", {"job_type": "Rename Site"}) + rename_upstream_jobs_count_before = jingrow.db.count( + "Agent Job", {"job_type": "Rename Site on Upstream"} + ) + + site.rename(new_name) + + rename_jobs_count_after = jingrow.db.count("Agent Job", {"job_type": "Rename Site"}) + rename_upstream_jobs_count_after = jingrow.db.count( + "Agent Job", {"job_type": "Rename Site on Upstream"} + ) + + self.assertEqual(rename_jobs_count_after - rename_jobs_count_before, 1) + self.assertEqual(rename_upstream_jobs_count_after - rename_upstream_jobs_count_before, 1) + + def test_subdomain_update_renames_site(self): + """Ensure updating subdomain renames site.""" + site = create_test_site("old-name") + new_subdomain_name = "new-name" + + rename_jobs_count_before = jingrow.db.count("Agent Job", {"job_type": "Rename Site"}) + rename_upstream_jobs_count_before = jingrow.db.count( + "Agent Job", {"job_type": "Rename Site on Upstream"} + ) + + site.subdomain = new_subdomain_name + site.save() + + rename_jobs_count_after = jingrow.db.count("Agent Job", {"job_type": "Rename Site"}) + rename_upstream_jobs_count_after = jingrow.db.count( + "Agent Job", {"job_type": "Rename Site on Upstream"} + ) + + self.assertEqual(rename_jobs_count_after - rename_jobs_count_before, 1) + self.assertEqual(rename_upstream_jobs_count_after - rename_upstream_jobs_count_before, 1) + + def _fake_succeed_rename_jobs(self): + rename_step_name_map = { + "Rename Site": "Rename Site", + "Rename Site on Upstream": "Rename Site File in Upstream Directory", + } + rename_job = jingrow.get_last_pg("Agent Job", {"job_type": "Rename Site"}) + rename_upstream_job = jingrow.get_last_pg("Agent Job", {"job_type": "Rename Site on Upstream"}) + jingrow.db.set_value( + "Agent Job Step", + { + "step_name": rename_step_name_map[rename_job.job_type], + "agent_job": rename_job.name, + }, + "status", + "Success", + ) + jingrow.db.set_value( + "Agent Job Step", + { + "step_name": rename_step_name_map[rename_upstream_job.job_type], + "agent_job": rename_upstream_job.name, + }, + "status", + "Success", + ) + return rename_job + + def test_default_domain_is_renamed_along_with_site(self): + """Ensure default domains are renamed when site is renamed.""" + site = create_test_site("old-name") + old_name = site.name + new_name = "new-name.fc.dev" + + self.assertTrue(jingrow.db.exists("Site Domain", site.name)) + site.rename(new_name) + + rename_job = self._fake_succeed_rename_jobs() + process_rename_site_job_update(rename_job) + + self.assertFalse(jingrow.db.exists("Site Domain", old_name)) + self.assertTrue(jingrow.db.exists("Site Domain", new_name)) + + def test_site_becomes_active_after_successful_rename(self): + """Ensure site becomes active after successful rename.""" + site = create_test_site("old-name") + new_name = "new-name.fc.dev" + site.rename(new_name) + + rename_job = self._fake_succeed_rename_jobs() + process_rename_site_job_update(rename_job) + + site = jingrow.get_pg("Site", new_name) + self.assertEqual(site.status, "Active") + + @patch.object(Site, "rename") + def test_rename_site_not_called_for_new_site(self, mock_rename): + """Rename Site job isn't created for new site.""" + create_test_site("some-name", new=True) + mock_rename.assert_not_called() + + def test_site_rename_update_site_config(self): + """Ensure site configuration child table is updated after rename.""" + site = create_test_site("old-name") + new_name = "new-name.fc.dev" + site.rename(new_name) + + rename_job = self._fake_succeed_rename_jobs() + process_rename_site_job_update(rename_job) + site = jingrow.get_pg("Site", new_name) + if site.configuration[0].key == "host_name": + config_host = site.configuration[0].value + self.assertEqual(config_host, f"https://{new_name}") + + def test_no_new_jobs_after_rename(self): + """Ensure no new jobs are created after rename.""" + site = create_test_site("old-name") + new_name = "new-name.fc.dev" + site.rename(new_name) + + rename_job = self._fake_succeed_rename_jobs() + job_count_before = jingrow.db.count("Agent Job") + process_rename_site_job_update(rename_job) + job_count_after = jingrow.db.count("Agent Job") + self.assertEqual(job_count_before, job_count_after) + + def test_add_domain_to_config_adds_domains_key_to_site_configuration(self): + site = create_test_site("testsubdomain") + domain = "prod.jingrow.dev" + + site.add_domain_to_config(domain) + site.reload() + + domains = site.get_config_value_for_key("domains") + self.assertIn(domain, domains) + + def test_add_domain_to_config_updates_config_for_existing_domains_key(self): + site = create_test_site("testsubdomain") + domain = "prod.jingrow.dev" + domain_2 = "prod2.jingrow.dev" + site._update_configuration({"domains": [domain]}) + + site.add_domain_to_config(domain_2) + site.reload() + + domains = site.get_config_value_for_key("domains") + self.assertIn(domain, domains) + self.assertIn(domain_2, domains) + + def test_add_remove_domain_from_config_updates_domains_key(self): + site = create_test_site("testsubdomain") + domain = "prod.jingrow.dev" + domain_2 = "prod2.jingrow.dev" + site._update_configuration({"domains": [domain, domain_2]}) + + site.remove_domain_from_config(domain) + site.reload() + + domains = site.get_config_value_for_key("domains") + self.assertNotIn(domain, domains) + self.assertIn(domain_2, domains) + + def test_site_rename_doesnt_update_host_name_for_custom_domain(self): + """Ensure site configuration isn't updated after rename when custom domain is host_name.""" + from jcloud.jcloud.pagetype.site_domain.test_site_domain import create_test_site_domain + + site = create_test_site("old-name") + site_domain1 = create_test_site_domain(site.name, "sitedomain1.com") + site.set_host_name(site_domain1.name) + new_name = "new-name.fc.dev" + site.rename(new_name) + + rename_job = self._fake_succeed_rename_jobs() + process_rename_site_job_update(rename_job) + site = jingrow.get_pg("Site", new_name) + if site.configuration[0].key == "host_name": + config_host = site.configuration[0].value + self.assertEqual(config_host, f"https://{site_domain1.name}") + + def test_suspend_without_reload_creates_agent_job_with_skip_reload(self): + site = create_test_site("testsubdomain") + site.suspend(skip_reload=True) + + job = jingrow.get_pg("Agent Job", {"site": site.name}) + self.assertTrue(json.loads(job.request_data).get("skip_reload")) + + def test_suspend_without_skip_reload_creates_agent_job_without_skip_reload(self): + site = create_test_site("testsubdomain") + site.suspend() + + job = jingrow.get_pg("Agent Job", {"site": site.name}) + self.assertFalse(json.loads(job.request_data).get("skip_reload")) + + def test_archive_with_skip_reload_creates_agent_job_with_skip_reload(self): + site = create_test_site("testsubdomain") + site.archive(skip_reload=True) + + job = jingrow.get_pg("Agent Job", {"site": site.name}) + self.assertTrue(json.loads(job.request_data).get("skip_reload")) + + def test_archive_without_skip_reload_creates_agent_job_without_skip_reload(self): + site = create_test_site("testsubdomain") + site.archive() + + job = jingrow.get_pg("Agent Job", {"site": site.name}) + self.assertFalse(json.loads(job.request_data).get("skip_reload")) + + @patch.object(RemoteFile, "download_link", new="http://test.com") + @patch.object(RemoteFile, "get_content", new=lambda x: {"a": "test"}) # type: ignore + def test_new_site_with_backup_files(self): + # no asserts here, just checking if it doesn't fail + database = create_test_remote_file().name + public = create_test_remote_file().name + private = create_test_remote_file().name + config = create_test_remote_file().name + plan = jingrow.get_pg( + pagetype="Site Plan", + name="Plan-10", + document_type="Site", + interval="Daily", + price_usd=30, + price_cny=30, + period=30, + ).insert() + create_test_site( + "test-site-restore", + remote_database_file=database, + remote_public_file=public, + remote_private_file=private, + remote_config_file=config, + subscription_plan=plan.name, + ) + + @patch.object(Telegram, "send", new=Mock()) + @patch.object(BaseServer, "disk_capacity", new=Mock(return_value=100)) + @patch.object(RemoteFile, "download_link", new="http://test.com") + @patch.object(RemoteFile, "get_content", new=lambda _: {"a": "test"}) + @patch.object(RemoteFile, "exists", lambda _: True) + @patch.object(BaseServer, "increase_disk_size") + @patch.object(BaseServer, "create_subscription_for_storage", new=Mock()) + def test_restore_site_adds_storage_if_no_sufficient_storage_available_on_public_server( + self, mock_increase_disk_size: Mock + ): + """Ensure restore site adds storage if no sufficient storage available.""" + site = create_test_site() + site.remote_database_file = create_test_remote_file(file_size=1024).name + site.remote_public_file = create_test_remote_file(file_size=1024).name + site.remote_private_file = create_test_remote_file(file_size=1024).name + db_server = jingrow.get_value("Server", site.server, "database_server") + + jingrow.db.set_value("Server", site.server, "public", True) + jingrow.db.set_value( + "Database Server", + db_server, + "public", + True, + ) + with patch.object(BaseServer, "free_space", new=Mock(return_value=500 * 1024 * 1024 * 1024)): + site.restore_site() + mock_increase_disk_size.assert_not_called() + + with patch.object(BaseServer, "free_space", new=Mock(return_value=0)): + site.restore_site() + mock_increase_disk_size.assert_called() + + jingrow.db.set_value("Server", site.server, "public", False) + jingrow.db.set_value( + "Database Server", + db_server, + "public", + False, + ) + with patch.object(Server, "free_space", new=Mock(return_value=0)): + self.assertRaises(InsufficientSpaceOnServer, site.restore_site) + + def test_user_cannot_disable_auto_update_if_site_in_public_release_group(self): + rg = create_test_release_group([create_test_app()], public=True) + bench = create_test_bench(group=rg) + site = create_test_site("testsite", bench=bench) + site.skip_auto_updates = True + with self.assertRaises(jingrow.exceptions.ValidationError) as context: + site.save(ignore_permissions=True) + self.assertTrue( + "Auto updates can't be disabled for sites on public benches" in str(context.exception) + ) + + def test_user_can_disable_auto_update_if_site_in_private_bench(self): + rg = create_test_release_group([create_test_app()], public=False) + bench = create_test_bench(group=rg) + site = create_test_site("testsite", bench=bench) + site.skip_auto_updates = True + site.save(ignore_permissions=True) + + @responses.activate + def test_sync_apps_updates_apps_child_table(self): + app1 = create_test_app() + app2 = create_test_app("jerp", "JERP") + group = create_test_release_group([app1, app2]) + bench = create_test_bench(group=group) + site = create_test_site(bench=bench) + responses.get( + f"https://{site.server}:443/agent/benches/{site.bench}/sites/{site.name}/apps", + json.dumps({"data": "jingrow\njerp"}), + ) + site.sync_apps() + self.assertEqual(site.apps[0].app, "jingrow") + self.assertEqual(site.apps[1].app, "jerp") + self.assertEqual(len(site.apps), 2) + + def test_delete_multiple_config_creates_job_to_remove_multiple_site_config_keys(self): + site = create_test_site() + site._set_configuration( + [ + {"key": "key1", "value": "value1", "type": "String"}, + {"key": "key2", "value": "value2", "type": "String"}, + ] + ) + site.delete_multiple_config(["key1", "key2"]) + update_job = jingrow.get_last_pg( + "Agent Job", {"job_type": "Update Site Configuration", "site": site.name} + ) + self.assertEqual( + json.loads(update_job.request_data).get("remove"), + ["key1", "key2"], + ) + + def test_apps_are_reordered_to_follow_bench_order(self): + app1 = create_test_app() + app2 = create_test_app("jerp", "JERP") + app3 = create_test_app("crm", "Jingrow CRM") + group = create_test_release_group([app1, app2, app3]) + bench = create_test_bench(group=group) + site = create_test_site(bench=bench.name, apps=["jingrow", "crm", "jerp"]) + site.reload() + self.assertEqual(site.apps[0].app, "jingrow") + self.assertEqual(site.apps[1].app, "jerp") + self.assertEqual(site.apps[2].app, "crm") diff --git a/jcloud/jcloud/pagetype/site_activity/__init__.py b/jcloud/jcloud/pagetype/site_activity/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_activity/site_activity.js b/jcloud/jcloud/pagetype/site_activity/site_activity.js new file mode 100644 index 0000000..8d0f287 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_activity/site_activity.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Activity', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/site_activity/site_activity.json b/jcloud/jcloud/pagetype/site_activity/site_activity.json new file mode 100644 index 0000000..a99773c --- /dev/null +++ b/jcloud/jcloud/pagetype/site_activity/site_activity.json @@ -0,0 +1,90 @@ +{ + "actions": [], + "creation": "2022-01-28 20:07:38.369240", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "team", + "action", + "reason", + "job" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Site", + "options": "Site", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "action", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Action", + "options": "Activate Site\nAdd Domain\nArchive\nBackup\nCreate\nClear Cache\nDeactivate Site\nInstall App\nLogin as Administrator\nMigrate\nReinstall\nRestore\nSuspend Site\nUninstall App\nUnsuspend Site\nUpdate\nUpdate Configuration\nDrop Offsite Backups\nEnable Database Access\nDisable Database Access\nCreate Database User\nRemove Database User\nModify Database User Permissions", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "reason", + "fieldtype": "Small Text", + "label": "Reason" + }, + { + "fetch_from": "site.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "depends_on": "eval: pg.job", + "fieldname": "job", + "fieldtype": "Link", + "label": "Job", + "options": "Agent Job" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-11-26 11:53:47.035359", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Activity", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin" + }, + { + "create": 1, + "role": "Jcloud Member" + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_activity/site_activity.py b/jcloud/jcloud/pagetype/site_activity/site_activity.py new file mode 100644 index 0000000..a9a90dc --- /dev/null +++ b/jcloud/jcloud/pagetype/site_activity/site_activity.py @@ -0,0 +1,70 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + + +class SiteActivity(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + action: DF.Literal[ + "Activate Site", + "Add Domain", + "Archive", + "Backup", + "Create", + "Clear Cache", + "Deactivate Site", + "Install App", + "Login as Administrator", + "Migrate", + "Reinstall", + "Restore", + "Suspend Site", + "Uninstall App", + "Unsuspend Site", + "Update", + "Update Configuration", + "Drop Offsite Backups", + "Enable Database Access", + "Disable Database Access", + "Create Database User", + "Remove Database User", + "Modify Database User Permissions", + ] + job: DF.Link | None + reason: DF.SmallText | None + site: DF.Link + team: DF.Link | None + # end: auto-generated types + + dashboard_fields = ("action", "reason", "site", "job") + + def after_insert(self): + if self.action == "Login as Administrator" and self.reason: + d = jingrow.get_all("Site", {"name": self.site}, ["notify_email", "team"])[0] + recipient = d.notify_email or jingrow.get_pg("Team", d.team).user + if recipient: + team = jingrow.get_pg("Team", d.team) + team.notify_with_email( + [recipient], + subject="Administrator login to your site", + template="admin_login", + args={"site": self.site, "user": self.owner, "reason": self.reason}, + reference_pagetype=self.pagetype, + reference_name=self.name, + ) + + +def log_site_activity(site, action, reason=None, job=None): + return jingrow.get_pg( + {"pagetype": "Site Activity", "site": site, "action": action, "reason": reason, "job": job} + ).insert() diff --git a/jcloud/jcloud/pagetype/site_activity/test_site_activity.py b/jcloud/jcloud/pagetype/site_activity/test_site_activity.py new file mode 100644 index 0000000..4eb62b5 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_activity/test_site_activity.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestSiteActivity(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/site_analytics/__init__.py b/jcloud/jcloud/pagetype/site_analytics/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_analytics/site_analytics.js b/jcloud/jcloud/pagetype/site_analytics/site_analytics.js new file mode 100644 index 0000000..9894cc3 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics/site_analytics.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Analytics', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/site_analytics/site_analytics.json b/jcloud/jcloud/pagetype/site_analytics/site_analytics.json new file mode 100644 index 0000000..c69eb74 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics/site_analytics.json @@ -0,0 +1,220 @@ +{ + "actions": [], + "creation": "2022-05-02 23:26:52.559658", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "column_break_2", + "timestamp", + "jingrow_section", + "country", + "language", + "time_zone", + "setup_complete", + "scheduler_enabled", + "column_break_8", + "emails_sent", + "space_used", + "database_size", + "backup_size", + "files_size", + "section_break_17", + "users", + "last_logins", + "last_active", + "installed_apps", + "jerp_section", + "activation_level", + "company", + "domain", + "column_break_26", + "sales_data" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Site", + "options": "Site", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "timestamp", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "Timestamp", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "jingrow_section", + "fieldtype": "Section Break", + "label": "Jingrow" + }, + { + "fieldname": "jerp_section", + "fieldtype": "Section Break", + "label": "JERP" + }, + { + "fieldname": "country", + "fieldtype": "Data", + "label": "Country", + "read_only": 1 + }, + { + "fieldname": "language", + "fieldtype": "Data", + "label": "Language", + "read_only": 1 + }, + { + "fieldname": "time_zone", + "fieldtype": "Data", + "label": "Time Zone", + "read_only": 1 + }, + { + "fieldname": "column_break_8", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "scheduler_enabled", + "fieldtype": "Check", + "label": "Scheduler Enabled", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "setup_complete", + "fieldtype": "Check", + "label": "Setup Complete", + "read_only": 1 + }, + { + "fieldname": "emails_sent", + "fieldtype": "Int", + "label": "Emails Sent", + "read_only": 1 + }, + { + "fieldname": "space_used", + "fieldtype": "Int", + "label": "Space Used", + "read_only": 1 + }, + { + "fieldname": "database_size", + "fieldtype": "Int", + "label": "Database Size", + "read_only": 1 + }, + { + "fieldname": "backup_size", + "fieldtype": "Int", + "label": "Backup Size", + "read_only": 1 + }, + { + "fieldname": "files_size", + "fieldtype": "Int", + "label": "Files Size", + "read_only": 1 + }, + { + "fieldname": "section_break_17", + "fieldtype": "Section Break" + }, + { + "fieldname": "users", + "fieldtype": "Table", + "label": "Users", + "options": "Site Analytics User", + "read_only": 1 + }, + { + "fieldname": "last_logins", + "fieldtype": "Table", + "label": "Last Logins", + "options": "Site Analytics Login", + "read_only": 1 + }, + { + "fieldname": "installed_apps", + "fieldtype": "Table", + "label": "Installed Apps", + "options": "Site Analytics App", + "read_only": 1 + }, + { + "fieldname": "activation_level", + "fieldtype": "Int", + "label": "Activation Level", + "read_only": 1 + }, + { + "fieldname": "sales_data", + "fieldtype": "Table", + "label": "Sales Data", + "options": "Site Analytics PageType", + "read_only": 1 + }, + { + "fieldname": "company", + "fieldtype": "Data", + "label": "Company", + "read_only": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Data", + "label": "Domain", + "read_only": 1 + }, + { + "fieldname": "column_break_26", + "fieldtype": "Column Break" + }, + { + "fieldname": "last_active", + "fieldtype": "Table", + "label": "Last Active", + "options": "Site Analytics Active", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-09-29 18:13:51.797874", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Analytics", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "site" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_analytics/site_analytics.py b/jcloud/jcloud/pagetype/site_analytics/site_analytics.py new file mode 100644 index 0000000..edfe909 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics/site_analytics.py @@ -0,0 +1,133 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document +from jingrow.query_builder import Interval +from jingrow.query_builder.functions import Now + + +class SiteAnalytics(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.site_analytics_active.site_analytics_active import ( + SiteAnalyticsActive, + ) + from jcloud.jcloud.pagetype.site_analytics_app.site_analytics_app import SiteAnalyticsApp + from jcloud.jcloud.pagetype.site_analytics_pagetype.site_analytics_pagetype import ( + SiteAnalyticsDocType, + ) + from jcloud.jcloud.pagetype.site_analytics_login.site_analytics_login import ( + SiteAnalyticsLogin, + ) + from jcloud.jcloud.pagetype.site_analytics_user.site_analytics_user import ( + SiteAnalyticsUser, + ) + + activation_level: DF.Int + backup_size: DF.Int + company: DF.Data | None + country: DF.Data | None + database_size: DF.Int + domain: DF.Data | None + emails_sent: DF.Int + files_size: DF.Int + installed_apps: DF.Table[SiteAnalyticsApp] + language: DF.Data | None + last_active: DF.Table[SiteAnalyticsActive] + last_logins: DF.Table[SiteAnalyticsLogin] + sales_data: DF.Table[SiteAnalyticsDocType] + scheduler_enabled: DF.Check + setup_complete: DF.Check + site: DF.Link + space_used: DF.Int + time_zone: DF.Data | None + timestamp: DF.Datetime + users: DF.Table[SiteAnalyticsUser] + # end: auto-generated types + + @staticmethod + def clear_old_logs(days=30): + tables = [ + "Site Analytics", + "Site Analytics User", + "Site Analytics Login", + "Site Analytics App", + "Site Analytics PageType", + "Site Analytics Active", + ] + for table in tables: + table = jingrow.qb.PageType(table) + jingrow.db.delete(table, filters=(table.modified < (Now() - Interval(days=days)))) + jingrow.db.commit() + + +def create_site_analytics(site, data): + def get_last_logins(analytics): + last_logins = [] + for login in analytics.get("last_logins", []): + last_logins.append( + { + "user": login["user"], + "full_name": login["full_name"], + "timestamp": login["creation"], + } + ) + return last_logins + + def get_sales_data(analytics): + sales_data = [] + for row in analytics.get("activation", {}).get("sales_data", []): + pagetype, count = tuple(row.items())[0] + if count: + sales_data.append( + { + "document_type": pagetype, + "count": count, + } + ) + return sales_data + + def get_last_active(analytics): + last_active = [] + for user in analytics.get("users", []): + if user and user.get("enabled") == 1: + last_active.append(user) + + return last_active + + timestamp = data["timestamp"] + analytics = data["analytics"] + if not jingrow.db.exists("Site Analytics", {"site": site, "timestamp": timestamp}): + pg = jingrow.get_pg( + { + "pagetype": "Site Analytics", + "site": site, + "timestamp": timestamp, + "country": analytics.get("country"), + "time_zone": analytics.get("time_zone"), + "language": analytics.get("language"), + "scheduler_enabled": analytics.get("scheduler_enabled"), + "setup_complete": analytics.get("setup_complete"), + "space_used": analytics.get("space_used"), + "backup_size": analytics.get("backup_size"), + "database_size": analytics.get("database_size"), + "files_size": analytics.get("files_size"), + "emails_sent": analytics.get("emails_sent"), + "installed_apps": analytics.get("installed_apps", []), + "users": analytics.get("users", []), + "last_logins": get_last_logins(analytics), + "last_active": get_last_active(analytics), + "company": analytics.get("company"), + "domain": analytics.get("domain"), + "activation_level": analytics.get("activation", {}).get("activation_level"), + "sales_data": get_sales_data(analytics), + } + ) + pg.insert() diff --git a/jcloud/jcloud/pagetype/site_analytics/test_site_analytics.py b/jcloud/jcloud/pagetype/site_analytics/test_site_analytics.py new file mode 100644 index 0000000..5d9a6f9 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics/test_site_analytics.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestSiteAnalytics(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/site_analytics_active/__init__.py b/jcloud/jcloud/pagetype/site_analytics_active/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_analytics_active/site_analytics_active.json b/jcloud/jcloud/pagetype/site_analytics_active/site_analytics_active.json new file mode 100644 index 0000000..89c6c83 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics_active/site_analytics_active.json @@ -0,0 +1,78 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2022-09-29 17:09:45.285414", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "email", + "full_name", + "last_active", + "last_login", + "enabled", + "is_system_manager", + "language", + "time_zone" + ], + "fields": [ + { + "fieldname": "email", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Email" + }, + { + "fieldname": "full_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Full Name" + }, + { + "fieldname": "last_active", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "Last Active" + }, + { + "fieldname": "last_login", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "Last Login" + }, + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "default": "0", + "fieldname": "is_system_manager", + "fieldtype": "Check", + "label": "Is System Manager" + }, + { + "fieldname": "language", + "fieldtype": "Data", + "label": "Language" + }, + { + "fieldname": "time_zone", + "fieldtype": "Data", + "label": "Time Zone" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-09-30 11:41:01.033086", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Analytics Active", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_analytics_active/site_analytics_active.py b/jcloud/jcloud/pagetype/site_analytics_active/site_analytics_active.py new file mode 100644 index 0000000..22d2f3a --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics_active/site_analytics_active.py @@ -0,0 +1,31 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SiteAnalyticsActive(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + email: DF.Data | None + enabled: DF.Check + full_name: DF.Data | None + is_system_manager: DF.Check + language: DF.Data | None + last_active: DF.Datetime | None + last_login: DF.Datetime | None + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + time_zone: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/site_analytics_app/__init__.py b/jcloud/jcloud/pagetype/site_analytics_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_analytics_app/site_analytics_app.json b/jcloud/jcloud/pagetype/site_analytics_app/site_analytics_app.json new file mode 100644 index 0000000..1e09fe1 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics_app/site_analytics_app.json @@ -0,0 +1,45 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2022-05-02 23:37:43.357398", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "app_name", + "version", + "branch" + ], + "fields": [ + { + "fieldname": "app_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "App Name" + }, + { + "fieldname": "version", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Version" + }, + { + "fieldname": "branch", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Branch" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-05-05 09:27:45.993311", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Analytics App", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_analytics_app/site_analytics_app.py b/jcloud/jcloud/pagetype/site_analytics_app/site_analytics_app.py new file mode 100644 index 0000000..fe3cf0a --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics_app/site_analytics_app.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SiteAnalyticsApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app_name: DF.Data | None + branch: DF.Data | None + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + version: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/site_analytics_login/__init__.py b/jcloud/jcloud/pagetype/site_analytics_login/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_analytics_login/site_analytics_login.json b/jcloud/jcloud/pagetype/site_analytics_login/site_analytics_login.json new file mode 100644 index 0000000..b74e587 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics_login/site_analytics_login.json @@ -0,0 +1,45 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2022-05-02 23:36:42.966427", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "user", + "full_name", + "timestamp" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Data", + "in_list_view": 1, + "label": "User" + }, + { + "fieldname": "full_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Full Name" + }, + { + "fieldname": "timestamp", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Timestamp" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-05-05 09:27:37.972446", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Analytics Login", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_analytics_login/site_analytics_login.py b/jcloud/jcloud/pagetype/site_analytics_login/site_analytics_login.py new file mode 100644 index 0000000..5e8f4ad --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics_login/site_analytics_login.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SiteAnalyticsLogin(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + full_name: DF.Data | None + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + timestamp: DF.Data | None + user: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/site_analytics_pagetype/__init__.py b/jcloud/jcloud/pagetype/site_analytics_pagetype/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_analytics_pagetype/site_analytics_pagetype.json b/jcloud/jcloud/pagetype/site_analytics_pagetype/site_analytics_pagetype.json new file mode 100644 index 0000000..5150a74 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics_pagetype/site_analytics_pagetype.json @@ -0,0 +1,38 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2022-05-02 23:39:27.920170", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "document_type", + "count" + ], + "fields": [ + { + "fieldname": "document_type", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Document Type" + }, + { + "fieldname": "count", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Count" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-05-05 09:27:41.077818", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Analytics PageType", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_analytics_pagetype/site_analytics_pagetype.py b/jcloud/jcloud/pagetype/site_analytics_pagetype/site_analytics_pagetype.py new file mode 100644 index 0000000..8f7b446 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics_pagetype/site_analytics_pagetype.py @@ -0,0 +1,25 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SiteAnalyticsDocType(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + count: DF.Int + document_type: DF.Data | None + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/site_analytics_user/__init__.py b/jcloud/jcloud/pagetype/site_analytics_user/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_analytics_user/site_analytics_user.json b/jcloud/jcloud/pagetype/site_analytics_user/site_analytics_user.json new file mode 100644 index 0000000..4ab0ccd --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics_user/site_analytics_user.json @@ -0,0 +1,83 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2022-05-02 23:35:08.922911", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "email", + "full_name", + "last_active", + "last_login", + "column_break_4", + "enabled", + "is_system_manager", + "language", + "time_zone" + ], + "fields": [ + { + "fieldname": "email", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Email" + }, + { + "fieldname": "full_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Full Name" + }, + { + "fieldname": "last_active", + "fieldtype": "Datetime", + "label": "Last Active" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Enabled" + }, + { + "default": "0", + "fieldname": "is_system_manager", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Is System Manager" + }, + { + "fieldname": "language", + "fieldtype": "Data", + "label": "Language" + }, + { + "fieldname": "time_zone", + "fieldtype": "Data", + "label": "Time Zone" + }, + { + "fieldname": "last_login", + "fieldtype": "Datetime", + "label": "Last Login" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-05-05 09:27:43.694054", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Analytics User", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_analytics_user/site_analytics_user.py b/jcloud/jcloud/pagetype/site_analytics_user/site_analytics_user.py new file mode 100644 index 0000000..5338d9a --- /dev/null +++ b/jcloud/jcloud/pagetype/site_analytics_user/site_analytics_user.py @@ -0,0 +1,31 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SiteAnalyticsUser(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + email: DF.Data | None + enabled: DF.Check + full_name: DF.Data | None + is_system_manager: DF.Check + language: DF.Data | None + last_active: DF.Datetime | None + last_login: DF.Datetime | None + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + time_zone: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/site_app/__init__.py b/jcloud/jcloud/pagetype/site_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_app/site_app.json b/jcloud/jcloud/pagetype/site_app/site_app.json new file mode 100644 index 0000000..6bc92ab --- /dev/null +++ b/jcloud/jcloud/pagetype/site_app/site_app.json @@ -0,0 +1,32 @@ +{ + "actions": [], + "creation": "2020-01-14 14:14:41.801519", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "app" + ], + "fields": [ + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App", + "options": "App", + "reqd": 1 + } + ], + "istable": 1, + "links": [], + "modified": "2020-12-15 22:39:14.441837", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site App", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_app/site_app.py b/jcloud/jcloud/pagetype/site_app/site_app.py new file mode 100644 index 0000000..8fa054b --- /dev/null +++ b/jcloud/jcloud/pagetype/site_app/site_app.py @@ -0,0 +1,51 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.document import Document +from jingrow.utils import cstr + +from jcloud.api.site import get_installed_apps + + +class SiteApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + @staticmethod + def get_list_query(query, filters=None, **list_args): + site = cstr(filters.get("parent", "")) if filters else None + if not site: + return None + + site_pg = jingrow.get_pg("Site", site) + installed_apps = get_installed_apps(site_pg, filters) + + # Apply is_app_patched flag to installed_apps + app_names = [a.app for a in site_pg.apps] + patched_apps = jingrow.get_all( + "App Patch", + fields=["app"], + filters={ + "bench": site_pg.bench, + "app": ["in", app_names], + }, + pluck="app", + ) + for app in installed_apps: + if app.app in patched_apps: + app.is_app_patched = True + + return installed_apps diff --git a/jcloud/jcloud/pagetype/site_backup/__init__.py b/jcloud/jcloud/pagetype/site_backup/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_backup/site_backup.js b/jcloud/jcloud/pagetype/site_backup/site_backup.js new file mode 100644 index 0000000..6c022dd --- /dev/null +++ b/jcloud/jcloud/pagetype/site_backup/site_backup.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Backup', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/site_backup/site_backup.json b/jcloud/jcloud/pagetype/site_backup/site_backup.json new file mode 100644 index 0000000..8f05b31 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_backup/site_backup.json @@ -0,0 +1,320 @@ +{ + "actions": [], + "creation": "2023-06-06 19:43:02.419970", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "site", + "database_name", + "team", + "column_break_obsx", + "job", + "files_availability", + "physical", + "with_files", + "offsite", + "data_7", + "database_file", + "database_url", + "remote_database_file", + "database_size", + "column_break_wiuq", + "config_file", + "config_file_url", + "remote_config_file", + "config_file_size", + "data_12", + "public_file", + "public_url", + "remote_public_file", + "public_size", + "column_break_16", + "private_file", + "private_url", + "remote_private_file", + "private_size", + "section_break_21", + "offsite_backup", + "section_break_hiaw", + "database_snapshot", + "column_break_hksx", + "snapshot_request_key" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Site", + "options": "Site", + "reqd": 1, + "set_only_once": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "job", + "fieldtype": "Link", + "label": "Job", + "options": "Agent Job", + "read_only": 1, + "search_index": 1 + }, + { + "default": "0", + "depends_on": "eval: pg.physical != 1", + "fieldname": "with_files", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "With Files", + "set_only_once": 1 + }, + { + "fieldname": "database_file", + "fieldtype": "Data", + "label": "Database File", + "read_only": 1 + }, + { + "fieldname": "database_size", + "fieldtype": "Data", + "label": "Database Size", + "read_only": 1 + }, + { + "fieldname": "database_url", + "fieldtype": "Text", + "label": "Database URL", + "read_only": 1 + }, + { + "fieldname": "public_file", + "fieldtype": "Data", + "label": "Public File", + "read_only": 1 + }, + { + "fieldname": "public_size", + "fieldtype": "Data", + "label": "Public Size", + "read_only": 1 + }, + { + "fieldname": "public_url", + "fieldtype": "Text", + "label": "Public URL", + "read_only": 1 + }, + { + "fieldname": "private_file", + "fieldtype": "Data", + "label": "Private File", + "read_only": 1 + }, + { + "fieldname": "private_size", + "fieldtype": "Data", + "label": "Private Size", + "read_only": 1 + }, + { + "fieldname": "private_url", + "fieldtype": "Text", + "label": "Private URL", + "read_only": 1 + }, + { + "depends_on": "eval: pg.physical != 1", + "fieldname": "data_7", + "fieldtype": "Section Break" + }, + { + "depends_on": "eval: pg.physical != 1", + "fieldname": "data_12", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_16", + "fieldtype": "Column Break" + }, + { + "default": "0", + "depends_on": "eval: pg.physical != 1", + "fieldname": "offsite", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Offsite", + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.physical != 1 && pg.offsite == 1", + "fieldname": "section_break_21", + "fieldtype": "Section Break" + }, + { + "fieldname": "offsite_backup", + "fieldtype": "Code", + "label": "Offsite Backup", + "read_only": 1 + }, + { + "fieldname": "remote_database_file", + "fieldtype": "Link", + "label": "Remote File", + "options": "Remote File", + "read_only": 1 + }, + { + "fieldname": "remote_public_file", + "fieldtype": "Link", + "label": "Remote File", + "options": "Remote File", + "read_only": 1 + }, + { + "fieldname": "remote_private_file", + "fieldtype": "Link", + "label": "Remote File", + "options": "Remote File", + "read_only": 1 + }, + { + "fieldname": "files_availability", + "fieldtype": "Select", + "in_filter": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Files Availability", + "options": "\nAvailable\nUnavailable", + "read_only": 1 + }, + { + "fieldname": "column_break_wiuq", + "fieldtype": "Column Break" + }, + { + "fieldname": "config_file", + "fieldtype": "Data", + "label": "Config File", + "read_only": 1 + }, + { + "fieldname": "config_file_url", + "fieldtype": "Text", + "label": "Config File URL", + "read_only": 1 + }, + { + "fieldname": "config_file_size", + "fieldtype": "Data", + "label": "Config File Size", + "read_only": 1 + }, + { + "fieldname": "remote_config_file", + "fieldtype": "Link", + "label": "Remote File", + "options": "Remote File", + "read_only": 1 + }, + { + "fetch_from": "site.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "physical", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Physical", + "set_only_once": 1 + }, + { + "collapsible_depends_on": "1", + "depends_on": "eval: pg.physical == 1", + "fieldname": "section_break_hiaw", + "fieldtype": "Section Break", + "label": "Physical Backup" + }, + { + "fieldname": "column_break_hksx", + "fieldtype": "Column Break" + }, + { + "fieldname": "snapshot_request_key", + "fieldtype": "Data", + "label": "Snapshot Request Key", + "read_only": 1 + }, + { + "depends_on": "eval: pg.physical", + "fieldname": "database_snapshot", + "fieldtype": "Link", + "label": "Database Snapshot", + "options": "Virtual Disk Snapshot", + "read_only": 1, + "search_index": 1 + }, + { + "depends_on": "eval: pg.database_name", + "fieldname": "database_name", + "fieldtype": "Data", + "label": "Database Name", + "read_only": 1 + }, + { + "fieldname": "column_break_obsx", + "fieldtype": "Column Break" + } + ], + "links": [], + "modified": "2025-02-06 13:21:10.061731", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Backup", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin" + }, + { + "create": 1, + "role": "Jcloud Member" + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_backup/site_backup.py b/jcloud/jcloud/pagetype/site_backup/site_backup.py new file mode 100644 index 0000000..2cedc40 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_backup/site_backup.py @@ -0,0 +1,391 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json +import time +from typing import TYPE_CHECKING + +import jingrow +import jingrow.utils +from jingrow.desk.pagetype.tag.tag import add_tag +from jingrow.model.document import Document + +from jcloud.agent import Agent +from jcloud.jcloud.pagetype.ansible_console.ansible_console import AnsibleAdHoc + +if TYPE_CHECKING: + from datetime import datetime + + +class SiteBackup(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + config_file: DF.Data | None + config_file_size: DF.Data | None + config_file_url: DF.Text | None + database_file: DF.Data | None + database_name: DF.Data | None + database_size: DF.Data | None + database_snapshot: DF.Link | None + database_url: DF.Text | None + files_availability: DF.Literal["", "Available", "Unavailable"] + job: DF.Link | None + offsite: DF.Check + offsite_backup: DF.Code | None + physical: DF.Check + private_file: DF.Data | None + private_size: DF.Data | None + private_url: DF.Text | None + public_file: DF.Data | None + public_size: DF.Data | None + public_url: DF.Text | None + remote_config_file: DF.Link | None + remote_database_file: DF.Link | None + remote_private_file: DF.Link | None + remote_public_file: DF.Link | None + site: DF.Link + snapshot_request_key: DF.Data | None + status: DF.Literal["Pending", "Running", "Success", "Failure"] + team: DF.Link | None + with_files: DF.Check + # end: auto-generated types + + dashboard_fields = ( + "job", + "status", + "database_url", + "public_url", + "private_url", + "config_file_url", + "site", + "database_size", + "public_size", + "private_size", + "with_files", + "offsite", + "files_availability", + "remote_database_file", + "remote_public_file", + "remote_private_file", + "remote_config_file", + "physical", + ) + + @property + def database_server(self): + server = jingrow.get_value("Site", self.site, "server") + return jingrow.get_value("Server", server, "database_server") + + @staticmethod + def get_list_query(query): + results = query.run(as_dict=True) + return [result for result in results if not result.get("physical")] + + def validate(self): + if self.physical and self.with_files: + jingrow.throw("Physical backups cannot be taken with files") + if self.physical and self.offsite: + jingrow.throw("Physical and offsite logical backups cannot be taken together") + + def before_insert(self): + if getattr(self, "force", False): + if self.physical: + jingrow.throw("Physical backups cannot be forcefully triggered") + return + # For backups, check if there are too many pending backups + two_hours_ago = jingrow.utils.add_to_date(None, hours=-2) + if jingrow.db.count( + "Site Backup", + { + "site": self.site, + "status": ("in", ["Running", "Pending"]), + "creation": (">", two_hours_ago), + }, + ): + jingrow.throw("Too many pending backups") + + if self.physical: + # Set some default values + site = jingrow.get_pg("Site", self.site) + if not site.database_name: + site.sync_info() + site.reload() + if not site.database_name: + jingrow.throw("Database name is missing in the site") + self.database_name = site.database_name + self.snapshot_request_key = jingrow.generate_hash(length=32) + + def after_insert(self): + if self.physical: + jingrow.enqueue_pg( + pagetype=self.pagetype, + name=self.name, + method="_create_physical_backup", + enqueue_after_commit=True, + ) + else: + site = jingrow.get_pg("Site", self.site) + agent = Agent(site.server) + job = agent.backup_site(site, self) + jingrow.db.set_value("Site Backup", self.name, "job", job.name) + + def after_delete(self): + if self.job: + jingrow.delete_pg_if_exists("Agent Job", self.job) + + def on_update(self): + if self.physical and self.has_value_changed("status") and self.status in ["Success", "Failure"]: + site_update_pg_name = jingrow.db.exists("Site Update", {"site_backup": self.name}) + if site_update_pg_name: + """ + If site backup was trigerred for Site Update, + Then, trigger Site Update to proceed with the next steps + """ + site_update = jingrow.get_pg("Site Update", site_update_pg_name) + if self.status == "Success": + site_update.create_update_site_agent_request() + elif self.status == "Failure": + site_update.activate_site() + + """ + Rollback the permission changes made to the database directory + Change it back to 770 from 700 + + Check `_create_physical_backup` method for more information + """ + success = self.run_ansible_command_in_database_server( + f"chmod 700 /var/lib/mysql/{self.database_name}" + ) + if not success: + """ + Don't throw an error here, Because the backup is already created + And keeping the permission as 770 will not cause issue in database operations + """ + jingrow.log_error( + "Failed to rollback the permission changes of the database directory", + reference_pagetype=self.pagetype, + reference_name=self.name, + ) + + def _create_physical_backup(self): + site = jingrow.get_pg("Site", self.site) + """ + Change the /var/lib/mysql/ directory's permission to 700 from 770 + The files inside that directory will have 660 permission, So no need to change the permission of the files + + `jingrow` user on server is already part of `mysql` group. + So `jingrow` user can read-write the files inside that directory + """ + success = self.run_ansible_command_in_database_server( + f"chmod 770 /var/lib/mysql/{self.database_name}" + ) + if not success: + jingrow.db.set_value("Site Backup", self.name, "status", "Failure") + return + agent = Agent(self.database_server, "Database Server") + job = agent.physical_backup_database(site, self) + jingrow.db.set_value("Site Backup", self.name, "job", job.name) + + def run_ansible_command_in_database_server(self, command: str) -> bool: + virtual_machine_ip = jingrow.db.get_value( + "Virtual Machine", + jingrow.get_value("Database Server", self.database_server, "virtual_machine"), + "public_ip_address", + ) + result = AnsibleAdHoc(sources=f"{virtual_machine_ip},").run(command, self.name)[0] + success = result.get("status") == "Success" + if not success: + pretty_result = json.dumps(result, indent=2, sort_keys=True, default=str) + jingrow.log_error( + "During physical backup creation, failed to execute command in database server", + message=pretty_result, + reference_pagetype=self.pagetype, + reference_name=self.name, + ) + comment = f"
{command}
{pretty_result}
" + self.add_comment(text=comment) + return success + + def create_database_snapshot(self): + if self.database_snapshot: + # Snapshot already exists, So no need to create a new one + return + + server = jingrow.get_value("Site", self.site, "server") + database_server = jingrow.get_value("Server", server, "database_server") + virtual_machine = jingrow.get_pg( + "Virtual Machine", jingrow.get_value("Database Server", database_server, "virtual_machine") + ) + + cache_key = f"volume_active_snapshot:{self.database_server}" + + max_retries = 3 + while max_retries > 0: + is_ongoing_snapshot = jingrow.utils.cint(jingrow.cache.get_value(cache_key, expires=True)) + if not is_ongoing_snapshot: + break + time.sleep(2) + max_retries -= 1 + + if jingrow.cache.get_value(cache_key, expires=True): + raise OngoingSnapshotError("Snapshot creation per volume rate exceeded") + + jingrow.cache.set_value( + cache_key, + 1, + expires_in_sec=15, + ) + + virtual_machine.create_snapshots(exclude_boot_volume=True, created_for_site_update=True) + if len(virtual_machine.flags.created_snapshots) == 0: + jingrow.throw("Failed to create a snapshot for the database server") + jingrow.db.set_value( + "Site Backup", self.name, "database_snapshot", virtual_machine.flags.created_snapshots[0] + ) + + @classmethod + def offsite_backup_exists(cls, site: str, day: datetime.date) -> bool: + return cls.backup_exists(site, day, {"offsite": True}) + + @classmethod + def backup_exists(cls, site: str, day: datetime.date, filters: dict): + base_filters = { + "creation": ("between", [day, day]), + "site": site, + "status": "Success", + } + return jingrow.get_all("Site Backup", {**base_filters, **filters}) + + @classmethod + def file_backup_exists(cls, site: str, day: datetime.date) -> bool: + return cls.backup_exists(site, day, {"with_files": True}) + + +class OngoingSnapshotError(Exception): + """Exception raised when other snapshot creation is ongoing""" + + pass + + +def track_offsite_backups(site: str, backup_data: dict, offsite_backup_data: dict) -> tuple: + remote_files = {"database": None, "site_config": None, "public": None, "private": None} + + if offsite_backup_data: + bucket = get_backup_bucket(jingrow.db.get_value("Site", site, "cluster")) + for type, backup in backup_data.items(): + file_name, file_size = backup["file"], backup["size"] + file_path = offsite_backup_data.get(file_name) + + file_types = { + "database": "application/x-gzip", + "site_config": "application/json", + } + if file_path: + remote_file = jingrow.get_pg( + { + "pagetype": "Remote File", + "site": site, + "file_name": file_name, + "file_path": file_path, + "file_size": file_size, + "file_type": file_types.get(type, "application/x-tar"), + "bucket": bucket, + } + ) + remote_file.save() + add_tag("Offsite Backup", remote_file.pagetype, remote_file.name) + remote_files[type] = remote_file.name + + return ( + remote_files["database"], + remote_files["site_config"], + remote_files["public"], + remote_files["private"], + ) + + +def process_backup_site_job_update(job): + backups = jingrow.get_all("Site Backup", fields=["name", "status"], filters={"job": job.name}, limit=1) + if not backups: + return + backup = backups[0] + if job.status != backup.status: + status = job.status + if job.status == "Delivery Failure": + status = "Failure" + + if job.status == "Success": + if jingrow.get_value("Site Backup", backup.name, "physical"): + pg: SiteBackup = jingrow.get_pg("Site Backup", backup.name) + pg.files_availability = "Available" + pg.status = "Success" + pg.save() + else: + jingrow.db.set_value("Site Backup", backup.name, "status", status) + job_data = json.loads(job.data) + backup_data, offsite_backup_data = job_data["backups"], job_data["offsite"] + ( + remote_database, + remote_config_file, + remote_public, + remote_private, + ) = track_offsite_backups(job.site, backup_data, offsite_backup_data) + + site_backup_dict = { + "files_availability": "Available", + "database_size": backup_data["database"]["size"], + "database_url": backup_data["database"]["url"], + "database_file": backup_data["database"]["file"], + "remote_database_file": remote_database, + } + + if "site_config" in backup_data: + site_backup_dict.update( + { + "config_file_size": backup_data["site_config"]["size"], + "config_file_url": backup_data["site_config"]["url"], + "config_file": backup_data["site_config"]["file"], + "remote_config_file": remote_config_file, + } + ) + + if "private" in backup_data and "public" in backup_data: + site_backup_dict.update( + { + "private_size": backup_data["private"]["size"], + "private_url": backup_data["private"]["url"], + "private_file": backup_data["private"]["file"], + "remote_public_file": remote_public, + "public_size": backup_data["public"]["size"], + "public_url": backup_data["public"]["url"], + "public_file": backup_data["public"]["file"], + "remote_private_file": remote_private, + } + ) + + jingrow.db.set_value("Site Backup", backup.name, site_backup_dict) + else: + site_backup: SiteBackup = jingrow.get_pg("Site Backup", backup.name) + site_backup.status = status + site_backup.save() + + +def get_backup_bucket(cluster, region=False): + bucket_for_cluster = jingrow.get_all("Backup Bucket", {"cluster": cluster}, ["name", "region"], limit=1) + default_bucket = jingrow.db.get_single_value("Jcloud Settings", "aws_s3_bucket") + + if region: + return bucket_for_cluster[0] if bucket_for_cluster else default_bucket + return bucket_for_cluster[0]["name"] if bucket_for_cluster else default_bucket + + +def on_pagetype_update(): + jingrow.db.add_index("Site Backup", ["files_availability", "job"]) diff --git a/jcloud/jcloud/pagetype/site_backup/test_site_backup.py b/jcloud/jcloud/pagetype/site_backup/test_site_backup.py new file mode 100644 index 0000000..fdfabb4 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_backup/test_site_backup.py @@ -0,0 +1,232 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +import json +from datetime import datetime +from unittest.mock import Mock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob, process_job_updates +from jcloud.jcloud.pagetype.remote_file.test_remote_file import create_test_remote_file +from jcloud.jcloud.pagetype.site.test_site import create_test_site + + +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +def create_test_site_backup( + site: str, + creation: datetime = None, + files_availability: str = "Available", + offsite: bool = True, + status: str = "Success", +): + """ + Create test site backup pg for required timestamp. + + Makes offsite backups by default along with remote files. + """ + creation = creation or jingrow.utils.now_datetime() + params_dict = { + "pagetype": "Site Backup", + "status": status, + "site": site, + "files_availability": files_availability, + "offsite": offsite, + } + if offsite: + params_dict["remote_public_file"] = create_test_remote_file(site, creation).name + params_dict["remote_private_file"] = create_test_remote_file(site, creation).name + params_dict["remote_database_file"] = create_test_remote_file(site, creation).name + site_backup = jingrow.get_pg(params_dict).insert(ignore_if_duplicate=True) + + site_backup.db_set("creation", creation) + site_backup.reload() + return site_backup + + +class TestSiteBackup(JingrowTestCase): + def setUp(self): + self.site = create_test_site(subdomain="breadshop") + self.site_backup = create_test_site_backup( + site=self.site.name, + files_availability="Unavailable", + offsite=False, + status="Pending", + ) + self.job = jingrow.get_pg("Agent Job", self.site_backup.job) + + def tearDown(self): + jingrow.db.rollback() + + def test_backup_job_callback_with_only_database(self): + self.job.db_set("status", "Success") + self.job.db_set( + "data", + json.dumps( + { + "backups": { + "database": { + "file": "breadshop_database.sql.gz", + "path": "/benches/breadshop_database.sql.gz", + "size": 12345, + "url": "https://breadshop.com/backups/breadshop-database.sql.gz", + }, + }, + "offsite": {}, + } + ), + ) + process_job_updates(self.job.name) + self.site_backup.reload() + self.assertEqual(self.site_backup.status, "Success") + self.assertEqual(self.site_backup.database_file, "breadshop_database.sql.gz") + self.assertEqual(self.site_backup.database_size, "12345") + self.assertEqual( + self.site_backup.database_url, + "https://breadshop.com/backups/breadshop-database.sql.gz", + ) + + def test_backup_job_callback_with_config(self): + self.job.db_set("status", "Success") + self.job.db_set( + "data", + json.dumps( + { + "backups": { + "database": { + "file": "breadshop_database.sql.gz", + "path": "/benches/breadshop_database.sql.gz", + "size": 12345, + "url": "https://breadshop.com/backups/breadshop-database.sql.gz", + }, + "site_config": { + "file": "breadshop_config.json", + "path": "/benches/breadshop_config.json", + "size": 12345, + "url": "https://breadshop.com/backups/breadshop-config.json", + }, + }, + "offsite": {}, + } + ), + ) + process_job_updates(self.job.name) + self.site_backup.reload() + self.assertEqual(self.site_backup.status, "Success") + self.assertEqual(self.site_backup.database_file, "breadshop_database.sql.gz") + self.assertEqual(self.site_backup.database_size, "12345") + self.assertEqual( + self.site_backup.database_url, + "https://breadshop.com/backups/breadshop-database.sql.gz", + ) + self.assertEqual(self.site_backup.config_file, "breadshop_config.json") + self.assertEqual(self.site_backup.config_file_size, "12345") + self.assertEqual( + self.site_backup.config_file_url, + "https://breadshop.com/backups/breadshop-config.json", + ) + + def test_backup_job_callback_with_files(self): + self.job.db_set("status", "Success") + self.job.db_set( + "data", + json.dumps( + { + "backups": { + "database": { + "file": "breadshop_database.sql.gz", + "path": "/benches/breadshop_database.sql.gz", + "size": 12345, + "url": "https://breadshop.com/backups/breadshop-database.sql.gz", + }, + "public": { + "file": "breadshop_public_files.tar", + "path": "/benches/breadshop_public_files.tar", + "size": 12345, + "url": "https://breadshop.com/backups/breadshop-public-files.tar", + }, + "private": { + "file": "breadshop_private_files.tar", + "path": "/benches/breadshop_private_files.tar", + "size": 12345, + "url": "https://breadshop.com/backups/breadshop-private-files.tar", + }, + }, + "offsite": {}, + } + ), + ) + process_job_updates(self.job.name) + self.site_backup.reload() + self.assertEqual(self.site_backup.status, "Success") + self.assertEqual(self.site_backup.database_file, "breadshop_database.sql.gz") + self.assertEqual(self.site_backup.database_size, "12345") + self.assertEqual( + self.site_backup.database_url, + "https://breadshop.com/backups/breadshop-database.sql.gz", + ) + self.assertEqual(self.site_backup.public_file, "breadshop_public_files.tar") + self.assertEqual(self.site_backup.public_size, "12345") + self.assertEqual( + self.site_backup.public_url, + "https://breadshop.com/backups/breadshop-public-files.tar", + ) + self.assertEqual(self.site_backup.private_file, "breadshop_private_files.tar") + self.assertEqual(self.site_backup.private_size, "12345") + self.assertEqual( + self.site_backup.private_url, + "https://breadshop.com/backups/breadshop-private-files.tar", + ) + + def test_backup_job_callback_with_offsite(self): + self.job.db_set("status", "Success") + self.job.db_set( + "data", + json.dumps( + { + "backups": { + "database": { + "file": "breadshop_database.sql.gz", + "path": "/benches/breadshop_database.sql.gz", + "size": 12345, + "url": "https://breadshop.com/backups/breadshop-database.sql.gz", + }, + "site_config": { + "file": "breadshop_config.json", + "path": "/benches/breadshop_config.json", + "size": 12345, + "url": "https://breadshop.com/backups/breadshop-config.json", + }, + "public": { + "file": "breadshop_public_files.tar", + "path": "/benches/breadshop_public_files.tar", + "size": 12345, + "url": "https://breadshop.com/backups/breadshop-public-files.tar", + }, + "private": { + "file": "breadshop_private_files.tar", + "path": "/benches/breadshop_private_files.tar", + "size": 12345, + "url": "https://breadshop.com/backups/breadshop-private-files.tar", + }, + }, + "offsite": { + "breadshop_database.sql.gz": "offsite.dev/breadshop_database.sql.gz", + "breadshop_config.json": "offsite.dev/breadshop_config.json", + "breadshop_public_files.tar": "offsite.dev/breadshop_public_files.tar", + "breadshop_private_files.tar": "offsite.dev/breadshop_private_files.tar", + }, + } + ), + ) + + process_job_updates(self.job.name) + self.site_backup.reload() + self.assertEqual(self.site_backup.status, "Success") + self.assertTrue(self.site_backup.remote_database_file) + self.assertTrue(self.site_backup.remote_public_file) + self.assertTrue(self.site_backup.remote_private_file) + self.assertTrue(self.site_backup.remote_config_file) diff --git a/jcloud/jcloud/pagetype/site_config/__init__.py b/jcloud/jcloud/pagetype/site_config/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_config/site_config.json b/jcloud/jcloud/pagetype/site_config/site_config.json new file mode 100644 index 0000000..06e3385 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_config/site_config.json @@ -0,0 +1,57 @@ +{ + "actions": [], + "creation": "2020-09-11 11:55:45.987702", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "key", + "value", + "type", + "internal" + ], + "fields": [ + { + "fieldname": "key", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Key", + "reqd": 1 + }, + { + "fieldname": "value", + "fieldtype": "Code", + "in_list_view": 1, + "label": "Value", + "reqd": 1 + }, + { + "allow_in_quick_entry": 1, + "default": "0", + "fieldname": "internal", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Internal Usage", + "read_only": 1 + }, + { + "fieldname": "type", + "fieldtype": "Select", + "label": "Type", + "options": "\nString\nPassword\nNumber\nBoolean\nJSON" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-02-23 09:28:25.746695", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Config", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_config/site_config.py b/jcloud/jcloud/pagetype/site_config/site_config.py new file mode 100644 index 0000000..37634aa --- /dev/null +++ b/jcloud/jcloud/pagetype/site_config/site_config.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.document import Document + + +class Config(Document): + dashboard_fields = ["key", "type", "value"] + + def get_type(self): + return jingrow.db.get_value("Site Config Key", self.key, "type") + + def format_config_for_list(configs): + config_key_titles = jingrow.db.get_all( + "Site Config Key", + fields=["key", "title"], + filters={"key": ["in", [c.key for c in configs]]}, + ) + secret_keys = jingrow.get_all( + "Site Config Key", filters={"type": "Password"}, pluck="key" + ) + for config in configs: + if config.key in secret_keys: + config.value = "*******" + config.title = next((c.title for c in config_key_titles if c.key == config.key), "") + return configs + + +class SiteConfig(Config): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + internal: DF.Check + key: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + type: DF.Literal["", "String", "Password", "Number", "Boolean", "JSON"] + value: DF.Code + # end: auto-generated types + + @staticmethod + def get_list_query(query, filters=None, **list_args): + Config = jingrow.qb.PageType("Site Config") + query = query.where(Config.internal == 0) + configs = query.run(as_dict=True) + return SiteConfig.format_config_for_list(configs) diff --git a/jcloud/jcloud/pagetype/site_config_key/__init__.py b/jcloud/jcloud/pagetype/site_config_key/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_config_key/site_config_key.js b/jcloud/jcloud/pagetype/site_config_key/site_config_key.js new file mode 100644 index 0000000..bdbd39b --- /dev/null +++ b/jcloud/jcloud/pagetype/site_config_key/site_config_key.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Config Key', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/site_config_key/site_config_key.json b/jcloud/jcloud/pagetype/site_config_key/site_config_key.json new file mode 100644 index 0000000..9a9830a --- /dev/null +++ b/jcloud/jcloud/pagetype/site_config_key/site_config_key.json @@ -0,0 +1,84 @@ +{ + "actions": [], + "autoname": "field:key", + "creation": "2020-09-15 12:24:29.013894", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "key", + "type", + "title", + "description", + "internal" + ], + "fields": [ + { + "fieldname": "key", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Key", + "reqd": 1, + "unique": 1 + }, + { + "allow_in_quick_entry": 1, + "fieldname": "type", + "fieldtype": "Select", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "Type", + "options": "Password\nString\nNumber\nBoolean\nJSON" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Title", + "translatable": 1 + }, + { + "fieldname": "description", + "fieldtype": "Small Text", + "label": "Description" + }, + { + "default": "0", + "description": "If set, this key won't be an allowed to be set via external APIs.", + "fieldname": "internal", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Internal Usage" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2021-02-15 20:21:36.837382", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Config Key", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "title", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_config_key/site_config_key.py b/jcloud/jcloud/pagetype/site_config_key/site_config_key.py new file mode 100644 index 0000000..0587959 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_config_key/site_config_key.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow.model.document import Document + + +class SiteConfigKey(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + description: DF.SmallText | None + internal: DF.Check + key: DF.Data + title: DF.Data | None + type: DF.Literal["Password", "String", "Number", "Boolean", "JSON"] + # end: auto-generated types + + dashboard_fields = ["key", "title", "description", "type", "internal"] + + def validate(self): + import jingrow + + if not self.title: + self.title = self.key.replace("_", " ").title() + + if not self.internal and jingrow.db.exists( + "Site Config Key Blacklist", {"key": self.key} + ): + jingrow.throw( + f"Key {self.key} is Blacklisted. Please contact Administrators to enable it." + ) diff --git a/jcloud/jcloud/pagetype/site_config_key/test_site_config_key.py b/jcloud/jcloud/pagetype/site_config_key/test_site_config_key.py new file mode 100644 index 0000000..70e52c6 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_config_key/test_site_config_key.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestSiteConfigKey(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/site_config_key_blacklist/__init__.py b/jcloud/jcloud/pagetype/site_config_key_blacklist/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_config_key_blacklist/site_config_key_blacklist.js b/jcloud/jcloud/pagetype/site_config_key_blacklist/site_config_key_blacklist.js new file mode 100644 index 0000000..cfaffb4 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_config_key_blacklist/site_config_key_blacklist.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Config Key Blacklist', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/site_config_key_blacklist/site_config_key_blacklist.json b/jcloud/jcloud/pagetype/site_config_key_blacklist/site_config_key_blacklist.json new file mode 100644 index 0000000..bcda77b --- /dev/null +++ b/jcloud/jcloud/pagetype/site_config_key_blacklist/site_config_key_blacklist.json @@ -0,0 +1,54 @@ +{ + "actions": [], + "autoname": "field:key", + "creation": "2020-09-15 12:39:37.408188", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "key", + "reason" + ], + "fields": [ + { + "fieldname": "key", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Key", + "reqd": 1, + "unique": 1 + }, + { + "fieldname": "reason", + "fieldtype": "Small Text", + "label": "Reason" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2021-02-15 20:20:47.860074", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Config Key Blacklist", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "key", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_config_key_blacklist/site_config_key_blacklist.py b/jcloud/jcloud/pagetype/site_config_key_blacklist/site_config_key_blacklist.py new file mode 100644 index 0000000..a8d389e --- /dev/null +++ b/jcloud/jcloud/pagetype/site_config_key_blacklist/site_config_key_blacklist.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow.model.document import Document + + +class SiteConfigKeyBlacklist(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + key: DF.Data + reason: DF.SmallText | None + # end: auto-generated types + + def validate(self): + import jingrow + + if jingrow.db.exists("Site Config Key", {"key": self.key, "enabled": True}): + jingrow.msgprint(f"Key {self.key} exists in Site Config Key. This means that ") diff --git a/jcloud/jcloud/pagetype/site_config_key_blacklist/test_site_config_key_blacklist.py b/jcloud/jcloud/pagetype/site_config_key_blacklist/test_site_config_key_blacklist.py new file mode 100644 index 0000000..70aec7f --- /dev/null +++ b/jcloud/jcloud/pagetype/site_config_key_blacklist/test_site_config_key_blacklist.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestSiteConfigKeyBlacklist(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/site_database_table_permission/__init__.py b/jcloud/jcloud/pagetype/site_database_table_permission/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_database_table_permission/site_database_table_permission.json b/jcloud/jcloud/pagetype/site_database_table_permission/site_database_table_permission.json new file mode 100644 index 0000000..41b902b --- /dev/null +++ b/jcloud/jcloud/pagetype/site_database_table_permission/site_database_table_permission.json @@ -0,0 +1,72 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-10-31 17:08:37.280675", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "table", + "column_break_fbqg", + "mode", + "section_break_rswb", + "allow_all_columns", + "selected_columns" + ], + "fields": [ + { + "fieldname": "table", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Table", + "reqd": 1 + }, + { + "fieldname": "mode", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Mode", + "options": "read_only\nread_write", + "reqd": 1 + }, + { + "fieldname": "column_break_fbqg", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_rswb", + "fieldtype": "Section Break" + }, + { + "default": "1", + "fieldname": "allow_all_columns", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Allow All Columns" + }, + { + "depends_on": "eval: !pg.allow_all_columns", + "description": "Comma seperated column names", + "fieldname": "selected_columns", + "fieldtype": "Small Text", + "label": "Selected Columns", + "mandatory_depends_on": "eval: !pg.allow_all_columns", + "not_nullable": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-10-31 17:17:51.606102", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Database Table Permission", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_database_table_permission/site_database_table_permission.py b/jcloud/jcloud/pagetype/site_database_table_permission/site_database_table_permission.py new file mode 100644 index 0000000..9297660 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_database_table_permission/site_database_table_permission.py @@ -0,0 +1,26 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SiteDatabaseTablePermission(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + allow_all_columns: DF.Check + mode: DF.Literal["read_only", "read_write"] + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + selected_columns: DF.SmallText + table: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/site_database_user/__init__.py b/jcloud/jcloud/pagetype/site_database_user/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_database_user/site_database_user.js b/jcloud/jcloud/pagetype/site_database_user/site_database_user.js new file mode 100644 index 0000000..e6af91d --- /dev/null +++ b/jcloud/jcloud/pagetype/site_database_user/site_database_user.js @@ -0,0 +1,65 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Database User', { + refresh(frm) { + [ + [__('Apply Changes'), 'apply_changes', true], + [ + __('Create User in Database'), + 'create_user', + !frm.pg.user_created_in_database, + ], + [ + __('Remove User from Database'), + 'remove_user', + frm.pg.user_created_in_database, + ], + [ + __('Add User to ProxySQL'), + 'add_user_to_proxysql', + !frm.pg.user_added_in_proxysql, + ], + [ + __('Remove User from ProxySQL'), + 'remove_user_from_proxysql', + frm.pg.user_added_in_proxysql, + ], + [__('Archive User'), 'archive', frm.pg.status !== 'Archived'], + ].forEach(([label, method, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()} this site?`, + () => frm.call(method).then((r) => frm.refresh()), + ); + }, + __('Actions'), + ); + } + }); + + frm.add_custom_button( + __('Show Credential'), + () => + frm.call('get_credential').then((r) => { + let message = `Host: ${r.message.host} + +Port: ${r.message.port} + +Database: ${r.message.database} + +Username: ${r.message.username} + +Password: ${r.message.password} + +\`\`\`\nmysql -u ${r.message.username} -p${r.message.password} -h ${r.message.host} -P ${r.message.port} --ssl --ssl-verify-server-cert\n\`\`\``; + + jingrow.msgprint(jingrow.markdown(message), 'Database Credentials'); + }), + __('Actions'), + ); + }, +}); diff --git a/jcloud/jcloud/pagetype/site_database_user/site_database_user.json b/jcloud/jcloud/pagetype/site_database_user/site_database_user.json new file mode 100644 index 0000000..c9cc821 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_database_user/site_database_user.json @@ -0,0 +1,200 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-10-31 16:54:56.752608", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "status", + "label", + "mode", + "site", + "team", + "column_break_udtx", + "max_connections", + "username", + "password", + "user_created_in_database", + "user_added_in_proxysql", + "section_break_cpbg", + "permissions", + "section_break_ubkn", + "column_break_rczb", + "failed_agent_job", + "failure_reason" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Site", + "options": "Site", + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "mode", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Mode", + "options": "read_only\nread_write\ngranular", + "reqd": 1 + }, + { + "fieldname": "column_break_udtx", + "fieldtype": "Column Break" + }, + { + "fieldname": "username", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Username", + "not_nullable": 1, + "read_only": 1, + "set_only_once": 1, + "unique": 1 + }, + { + "fieldname": "password", + "fieldtype": "Password", + "label": "Password", + "not_nullable": 1, + "read_only": 1, + "set_only_once": 1 + }, + { + "fieldname": "section_break_cpbg", + "fieldtype": "Section Break" + }, + { + "depends_on": "eval: pg.mode == \"granular\"", + "fieldname": "permissions", + "fieldtype": "Table", + "label": "Permissions", + "options": "Site Database Table Permission" + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nActive\nFailed\nArchived", + "read_only": 1, + "reqd": 1 + }, + { + "default": "0", + "fieldname": "user_added_in_proxysql", + "fieldtype": "Check", + "label": "User Added in ProxySQL", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "user_created_in_database", + "fieldtype": "Check", + "label": "User Created in Database", + "read_only": 1 + }, + { + "depends_on": "eval: pg.status === \"Failed\"", + "fieldname": "section_break_ubkn", + "fieldtype": "Section Break" + }, + { + "depends_on": "eval: pg.status === \"Failed\"", + "fieldname": "column_break_rczb", + "fieldtype": "Column Break" + }, + { + "fieldname": "failed_agent_job", + "fieldtype": "Link", + "label": "Failed Agent Job", + "options": "Agent Job" + }, + { + "fieldname": "failure_reason", + "fieldtype": "Small Text", + "label": "Failure Reason", + "not_nullable": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "reqd": 1, + "search_index": 1 + }, + { + "default": "16", + "fieldname": "max_connections", + "fieldtype": "Int", + "label": "Max Connections", + "set_only_once": 1 + }, + { + "fieldname": "label", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Label", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [ + { + "group": "Related Documents", + "link_pagetype": "Agent Job", + "link_fieldname": "reference_name" + } + ], + "modified": "2024-11-29 16:29:57.632579", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Database User", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_database_user/site_database_user.py b/jcloud/jcloud/pagetype/site_database_user/site_database_user.py new file mode 100644 index 0000000..ed8a057 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_database_user/site_database_user.py @@ -0,0 +1,381 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import re +from collections import Counter + +import jingrow +from jingrow.model.document import Document + +from jcloud.agent import Agent +from jcloud.api.client import dashboard_whitelist +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.jcloud.pagetype.site_activity.site_activity import log_site_activity + + +class SiteDatabaseUser(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.site_database_table_permission.site_database_table_permission import ( + SiteDatabaseTablePermission, + ) + + failed_agent_job: DF.Link | None + failure_reason: DF.SmallText + label: DF.Data + max_connections: DF.Int + mode: DF.Literal["read_only", "read_write", "granular"] + password: DF.Password + permissions: DF.Table[SiteDatabaseTablePermission] + site: DF.Link + status: DF.Literal["Pending", "Active", "Failed", "Archived"] + team: DF.Link + user_added_in_proxysql: DF.Check + user_created_in_database: DF.Check + username: DF.Data + # end: auto-generated types + + dashboard_fields = ( + "label", + "status", + "site", + "username", + "team", + "mode", + "failed_agent_job", + "failure_reason", + "permissions", + "max_connections", + ) + + def validate(self): + if not self.has_value_changed("status"): + self._raise_error_if_archived() + # remove permissions if not granular mode + if self.mode != "granular": + self.permissions.clear() + + if not self.is_new() and self.has_value_changed("max_connections"): + jingrow.throw("You can't update the max database connections. Archive it and create a new one.") + + if not self.max_connections: + jingrow.throw( + "Max database connections can't be zero. You need to opt for at least one connection." + ) + + def before_insert(self): + site = jingrow.get_pg("Site", self.site) + if not site.has_permission(): + jingrow.throw("You don't have permission to create database user") + if not jingrow.db.get_value("Site Plan", site.plan, "database_access"): + jingrow.throw(f"Database Access is not available on {site.plan} plan") + + # validate connection limit + exists_db_users_connection_limit = jingrow.db.get_all( + "Site Database User", + {"site": self.site, "status": ("!=", "Archived")}, + pluck="max_connections", + ) + total_used_connections = sum(exists_db_users_connection_limit) + allowed_max_connections_for_site = site.database_access_connection_limit - total_used_connections + if self.max_connections > allowed_max_connections_for_site: + jingrow.throw( + f"Your site has quota of {site.database_access_connection_limit} database connections.\nYou can't allocate more than {allowed_max_connections_for_site} connections for new user. You can drop other database users to allocate more connections." + ) + + self.status = "Pending" + if not self.username: + self.username = jingrow.generate_hash(length=15) + if not self.password: + self.password = jingrow.generate_hash(length=20) + + def after_insert(self): + log_site_activity( + self.site, + "Create Database User", + reason=f"Created user {self.username} with {self.mode} permission", + ) + if hasattr(self.flags, "ignore_after_insert_hooks") and self.flags.ignore_after_insert_hooks: + """ + Added for make it easy to migrate records of db access users from site pagetype to site database user + """ + return + self.apply_changes() + + def on_update(self): + if self.has_value_changed("status") and self.status == "Archived": + log_site_activity( + self.site, + "Remove Database User", + reason=f"Removed user {self.username} with {self.mode} permission", + ) + + def _raise_error_if_archived(self): + if self.status == "Archived": + jingrow.throw("user has been deleted and no further changes can be made") + + def _get_database_name(self): + site = jingrow.get_pg("Site", self.site) + db_name = site.fetch_info().get("config", {}).get("db_name") + if not db_name: + jingrow.throw("Failed to fetch database name of site") + return db_name + + @dashboard_whitelist() + def save_and_apply_changes(self, label: str, mode: str, permissions: list): # noqa: C901 + if self.status == "Pending" or self.status == "Archived": + jingrow.throw(f"You can't modify information in {self.status} state. Please try again later") + + self.label = label + is_db_user_configuration_changed = self.mode != mode or self._is_permissions_changed(permissions) + if is_db_user_configuration_changed: + self.mode = mode + new_permissions = permissions + new_permission_tables = [p["table"] for p in new_permissions] + current_permission_tables = [p.table for p in self.permissions] + # add new permissions + for permission in new_permissions: + if permission["table"] not in current_permission_tables: + self.append("permissions", permission) + # modify permissions + for permission in self.permissions: + for new_permission in new_permissions: + if permission.table == new_permission["table"]: + permission.update(new_permission) + break + # delete permissions which are not in the modified list + self.permissions = [p for p in self.permissions if p.table in new_permission_tables] + + self.save() + if is_db_user_configuration_changed: + self.apply_changes() + + def _is_permissions_changed(self, new_permissions): + if len(new_permissions) != len(self.permissions): + return True + + for permission in new_permissions: + for p in self.permissions: + if permission["table"] == p.table and ( + permission["mode"] != p.mode + or permission["allow_all_columns"] != p.allow_all_columns + or Counter(permission["selected_columns"]) != Counter(p.selected_columns) + ): + return True + + return False + + @jingrow.whitelist() + def apply_changes(self): + if not self.user_created_in_database: + self.create_user() + elif not self.user_added_in_proxysql: + self.add_user_to_proxysql() + else: + self.modify_permissions() + + self.status = "Pending" + self.save(ignore_permissions=True) + + @jingrow.whitelist() + def create_user(self): + self._raise_error_if_archived() + agent = Agent(jingrow.db.get_value("Site", self.site, "server")) + agent.create_database_user( + jingrow.get_pg("Site", self.site), self.username, self.get_password("password"), self.name + ) + + @jingrow.whitelist() + def remove_user(self): + self._raise_error_if_archived() + agent = Agent(jingrow.db.get_value("Site", self.site, "server")) + agent.remove_database_user( + jingrow.get_pg("Site", self.site), + self.username, + self.name, + ) + + @jingrow.whitelist() + def add_user_to_proxysql(self): + self._raise_error_if_archived() + database = self._get_database_name() + server = jingrow.db.get_value("Site", self.site, "server") + proxy_server = jingrow.db.get_value("Server", server, "proxy_server") + database_server_name = jingrow.db.get_value( + "Bench", jingrow.db.get_value("Site", self.site, "bench"), "database_server" + ) + database_server = jingrow.get_pg("Database Server", database_server_name) + agent = Agent(proxy_server, server_type="Proxy Server") + agent.add_proxysql_user( + jingrow.get_pg("Site", self.site), + database, + self.username, + self.get_password("password"), + self.max_connections, + database_server, + reference_pagetype="Site Database User", + reference_name=self.name, + ) + + @jingrow.whitelist() + def remove_user_from_proxysql(self): + self._raise_error_if_archived() + server = jingrow.db.get_value("Site", self.site, "server") + proxy_server = jingrow.db.get_value("Server", server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + agent.remove_proxysql_user( + jingrow.get_pg("Site", self.site), + self.username, + reference_pagetype="Site Database User", + reference_name=self.name, + ) + + @jingrow.whitelist() + def modify_permissions(self): + self._raise_error_if_archived() + log_site_activity( + self.site, + "Modify Database User Permissions", + reason=f"Modified user {self.username} with {self.mode} permission", + ) + server = jingrow.db.get_value("Site", self.site, "server") + agent = Agent(server) + table_permissions = {} + + if self.mode == "granular": + for x in self.permissions: + table_permissions[x.table] = { + "mode": x.mode, + "columns": "*" + if x.allow_all_columns + else [c.strip() for c in x.selected_columns.splitlines() if c.strip()], + } + + agent.modify_database_user_permissions( + jingrow.get_pg("Site", self.site), + self.username, + self.mode, + table_permissions, + self.name, + ) + + @dashboard_whitelist() + def get_credential(self): + server = jingrow.db.get_value("Site", self.site, "server") + proxy_server = jingrow.db.get_value("Server", server, "proxy_server") + database = self._get_database_name() + return { + "host": proxy_server, + "port": 3306, + "database": database, + "username": self.username, + "password": self.get_password("password"), + "mode": self.mode, + "max_connections": self.max_connections, + } + + @dashboard_whitelist() + def archive(self, raise_error: bool = True, skip_remove_db_user_step: bool = False): + if not raise_error and self.status == "Archived": + return + self._raise_error_if_archived() + self.status = "Pending" + self.save() + + if self.user_created_in_database and not skip_remove_db_user_step: + """ + If we are dropping the database, there is no need to drop + db users separately. + In those cases, use `skip_remove_db_user_step` param to skip it + """ + self.remove_user() + else: + self.user_created_in_database = False + self.save() + + if self.user_added_in_proxysql: + self.remove_user_from_proxysql() + + if not self.user_created_in_database and not self.user_added_in_proxysql: + self.status = "Archived" + self.save() + + @staticmethod + def process_job_update(job): # noqa: C901 + if job.status not in ("Success", "Failure"): + return + + if not job.reference_name or not jingrow.db.exists("Site Database User", job.reference_name): + return + + pg: SiteDatabaseUser = jingrow.get_pg("Site Database User", job.reference_name) + + if job.status == "Failure": + pg.status = "Failed" + pg.failed_agent_job = job.name + if job.job_type == "Modify Database User Permissions": + pg.failure_reason = SiteDatabaseUser.user_addressable_error_from_stacktrace(job.traceback) + pg.save(ignore_permissions=True) + return + + if job.job_type == "Create Database User": + pg.user_created_in_database = True + if not pg.user_added_in_proxysql: + pg.add_user_to_proxysql() + if job.job_type == "Remove Database User": + pg.user_created_in_database = False + elif job.job_type == "Add User to ProxySQL": + pg.user_added_in_proxysql = True + pg.modify_permissions() + elif job.job_type == "Remove User from ProxySQL": + pg.user_added_in_proxysql = False + elif job.job_type == "Modify Database User Permissions": + pg.status = "Active" + + pg.save(ignore_permissions=True) + pg.reload() + + if ( + job.job_type in ("Remove Database User", "Remove User from ProxySQL") + and not pg.user_added_in_proxysql + and not pg.user_created_in_database + ): + pg.archive() + + @staticmethod + def user_addressable_error_from_stacktrace(stacktrace: str): + pattern = r"peewee\.\w+Error: (.*)?" + default_error_msg = "Unknown error. Please try again.\nIf the error persists, please contact support." + + matches = re.findall(pattern, stacktrace) + if len(matches) == 0: + return default_error_msg + data = matches[0].strip().replace("(", "").replace(")", "").split(",", 1) + if len(data) != 2: + return default_error_msg + + if data[0] == "1054": + pattern = r"Unknown column '(.*)' in '(.*)'\"*?" + matches = re.findall(pattern, data[1]) + if len(matches) == 1 and len(matches[0]) == 2: + return f"Column '{matches[0][0]}' doesn't exist in '{matches[0][1]}' table.\nPlease remove the column from permissions configuration and apply changes." + + elif data[0] == "1146": + pattern = r"Table '(.*)' doesn't exist" + matches = re.findall(pattern, data[1]) + if len(matches) == 1 and isinstance(matches[0], str): + table_name = matches[0] + table_name = table_name.split(".")[-1] + return f"Table '{table_name}' doesn't exist.\nPlease remove it from permissions table and apply changes." + + return default_error_msg + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Site Database User") diff --git a/jcloud/jcloud/pagetype/site_database_user/test_site_database_user.py b/jcloud/jcloud/pagetype/site_database_user/test_site_database_user.py new file mode 100644 index 0000000..4e03873 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_database_user/test_site_database_user.py @@ -0,0 +1,20 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class TestSiteDatabaseUser(UnitTestCase): + """ + Unit tests for SiteDatabaseUser. + Use this class for testing individual functions and methods. + """ + + pass diff --git a/jcloud/jcloud/pagetype/site_domain/__init__.py b/jcloud/jcloud/pagetype/site_domain/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_domain/site_domain.js b/jcloud/jcloud/pagetype/site_domain/site_domain.js new file mode 100644 index 0000000..6fe01dc --- /dev/null +++ b/jcloud/jcloud/pagetype/site_domain/site_domain.js @@ -0,0 +1,10 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Domain', { + refresh: function (frm) { + frm.add_custom_button('Create DNS Record', () => { + frm.call('create_dns_record').then((r) => frm.refresh()); + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/site_domain/site_domain.json b/jcloud/jcloud/pagetype/site_domain/site_domain.json new file mode 100644 index 0000000..5e5af12 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_domain/site_domain.json @@ -0,0 +1,149 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "field:domain", + "creation": "2020-03-17 16:35:11.520003", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "site", + "team", + "domain", + "column_break_faqy", + "tls_certificate", + "retry_count", + "redirect_to_primary", + "dns_section", + "dns_type", + "dns_response" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Site", + "options": "Site", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Domain", + "read_only": 1, + "reqd": 1, + "unique": 1 + }, + { + "default": "CNAME", + "fieldname": "dns_type", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "DNS Type", + "options": "A\nNS\nCNAME", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "tls_certificate", + "fieldtype": "Link", + "label": "TLS Certificate", + "options": "TLS Certificate" + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nIn Progress\nActive\nBroken", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "retry_count", + "fieldtype": "Int", + "label": "Retry Count", + "read_only": 1, + "reqd": 1 + }, + { + "default": "0", + "fieldname": "redirect_to_primary", + "fieldtype": "Check", + "label": "Redirect To Primary" + }, + { + "fetch_from": "site.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_faqy", + "fieldtype": "Column Break" + }, + { + "fieldname": "dns_section", + "fieldtype": "Section Break", + "label": "DNS" + }, + { + "fieldname": "dns_response", + "fieldtype": "Code", + "label": "DNS Response", + "read_only": 1 + } + ], + "links": [], + "modified": "2024-06-21 15:19:41.758948", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Domain", + "naming_rule": "By fieldname", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "delete": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "domain", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_domain/site_domain.py b/jcloud/jcloud/pagetype/site_domain/site_domain.py new file mode 100644 index 0000000..aa44175 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_domain/site_domain.py @@ -0,0 +1,279 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import json +from typing import ClassVar + +import jingrow +import rq +from jingrow.model.document import Document + +from jcloud.agent import Agent +from jcloud.api.site import check_dns +from jcloud.exceptions import ( + AAAARecordExists, + ConflictingCAARecord, + ConflictingDNSRecord, + MultipleARecords, + MultipleCNAMERecords, +) +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.utils import log_error +from jcloud.utils.dns import create_dns_record +from jcloud.utils.jobs import has_job_timeout_exceeded + + +class SiteDomain(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + dns_response: DF.Code | None + dns_type: DF.Literal["A", "NS", "CNAME"] + domain: DF.Data + redirect_to_primary: DF.Check + retry_count: DF.Int + site: DF.Link + status: DF.Literal["Pending", "In Progress", "Active", "Broken"] + team: DF.Link + tls_certificate: DF.Link | None + # end: auto-generated types + + dashboard_fields: ClassVar = ["domain", "status", "dns_type", "site", "redirect_to_primary"] + + @staticmethod + def get_list_query(query, filters=None, **list_args): + domains = query.run(as_dict=1) + if filters.site: + host_name = jingrow.db.get_value("Site", filters.site, "host_name") + for domain in domains: + if domain.domain == host_name: + domain.primary = True + break + domains.sort(key=lambda domain: not domain.primary) + return domains + return None + + def after_insert(self): + if self.default: + return + + if self.has_root_tls_certificate: + server = jingrow.db.get_value("Site", self.site, "server") + proxy_server = jingrow.db.get_value("Server", server, "proxy_server") + + agent = Agent(server=proxy_server, server_type="Proxy Server") + agent.add_domain_to_upstream(server=server, site=self.site, domain=self.domain) + return + + self.create_tls_certificate() + + def validate(self): + if self.has_value_changed("redirect_to_primary"): + if self.redirect_to_primary: + self.setup_redirect_in_proxy() + elif not self.is_new(): + self.remove_redirect_in_proxy() + + @jingrow.whitelist() + def create_dns_record(self): + site = jingrow.get_pg("Site", self.site) + if not self.domain.endswith(site.domain): + return + create_dns_record(site, self.domain) + + @property + def default(self): + return self.domain == self.site + + @property + def has_root_tls_certificate(self): + return bool(jingrow.db.exists("Root Domain", self.domain.split(".", 1)[1], "name")) + + def setup_redirect_in_proxy(self): + site = jingrow.get_pg("Site", self.site) + target = site.host_name + if target == self.name: + jingrow.throw("Primary domain can't be redirected.", exc=jingrow.exceptions.ValidationError) + site.set_redirects_in_proxy([self.name]) + + def remove_redirect_in_proxy(self): + site = jingrow.get_pg("Site", self.site) + site.unset_redirects_in_proxy([self.name]) + + def setup_redirect(self): + self.redirect_to_primary = True + self.save() + + def remove_redirect(self): + self.redirect_to_primary = False + self.save() + + def create_tls_certificate(self): + certificate = jingrow.get_pg( + { + "pagetype": "TLS Certificate", + "wildcard": False, + "domain": self.domain, + "team": self.team, + } + ).insert() + self.tls_certificate = certificate.name + self.save() + + def process_tls_certificate_update(self): + certificate = jingrow.db.get_value( + "TLS Certificate", self.tls_certificate, ["status", "creation"], as_dict=True + ) + if certificate.status == "Active": + if jingrow.utils.add_days(None, -1) > certificate.creation: + # This is an old (older than 1 day) certificate, we are renewing it. + # NGINX likely has a valid certificate, no need to reload. + skip_reload = True + else: + skip_reload = False + self.create_agent_request(skip_reload=skip_reload) + elif certificate.status == "Failure": + self.status = "Broken" + self.save() + + def create_agent_request(self, skip_reload=False): + server = jingrow.db.get_value("Site", self.site, "server") + is_standalone = jingrow.db.get_value("Server", server, "is_standalone") + if is_standalone: + agent = Agent(server, server_type="Server") + else: + proxy_server = jingrow.db.get_value("Server", server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + agent.new_host(self, skip_reload=skip_reload) + + def create_remove_host_agent_request(self): + server = jingrow.db.get_value("Site", self.site, "server") + is_standalone = jingrow.db.get_value("Server", server, "is_standalone") + if is_standalone: + agent = Agent(server, server_type="Server") + else: + proxy_server = jingrow.db.get_value("Server", server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + agent.remove_host(self) + + def retry(self): + self.status = "Pending" + self.retry_count += 1 + self.save() + if self.tls_certificate: + certificate = jingrow.get_pg("TLS Certificate", self.tls_certificate) + certificate.obtain_certificate() + else: + self.create_tls_certificate() + + def on_trash(self): + if self.domain == jingrow.db.get_value("Site", self.site, "host_name"): + jingrow.throw(msg="Primary domain cannot be deleted", exc=jingrow.exceptions.LinkExistsError) + + self.disavow_agent_jobs() + if not self.default or self.redirect_to_primary: + self.create_remove_host_agent_request() + if self.status == "Active": + self.remove_domain_from_site_config() + + def after_delete(self): + self.delete_tls_certificate() + + def delete_tls_certificate(self): + jingrow.delete_pg("TLS Certificate", self.tls_certificate) + + def disavow_agent_jobs(self): + jobs = jingrow.get_all("Agent Job", filters={"host": self.name}) + for job in jobs: + jingrow.db.set_value("Agent Job", job.name, "host", None) + + def remove_domain_from_site_config(self): + site_pg = jingrow.get_pg("Site", self.site) + if site_pg.status == "Archived": + return + site_pg.remove_domain_from_config(self.domain) + + +def process_new_host_job_update(job): + domain_status = jingrow.get_value("Site Domain", job.host, "status") + + updated_status = { + "Pending": "Pending", + "Running": "In Progress", + "Success": "Active", + "Failure": "Broken", + "Delivery Failure": "Broken", + }[job.status] + + if updated_status != domain_status: + jingrow.db.set_value("Site Domain", job.host, "status", updated_status) + if updated_status == "Active": + jingrow.get_pg("Site", job.site).add_domain_to_config(job.host) + + +def process_add_domain_to_upstream_job_update(job): + request_data = json.loads(job.request_data) + domain = request_data.get("domain") + domain_status = jingrow.get_value("Site Domain", domain, "status") + + updated_status = { + "Pending": "Pending", + "Running": "In Progress", + "Success": "Active", + "Failure": "Broken", + "Delivery Failure": "Broken", + }[job.status] + + if updated_status != domain_status: + jingrow.db.set_value("Site Domain", domain, "status", updated_status) + + if job.status in ["Failure", "Delivery Failure"]: + jingrow.db.set_value( + "Product Trial Request", {"domain": request_data.get("domain")}, "status", "Error" + ) + + +def update_dns_type(): # noqa: C901 + domains = jingrow.get_all( + "Site Domain", + filters={"tls_certificate": ("is", "set")}, # Don't query wildcard subdomains + fields=["name", "domain", "dns_type", "site"], + ) + for domain in domains: + if has_job_timeout_exceeded(): + return + try: + response = check_dns(domain.site, domain.domain) + if response["matched"] and response["type"] != domain.dns_type: + jingrow.db.set_value( + "Site Domain", domain.name, "dns_type", response["type"], update_modified=False + ) + pretty_response = json.dumps(response, indent=4, default=str) + jingrow.db.set_value( + "Site Domain", domain.name, "dns_response", pretty_response, update_modified=False + ) + jingrow.db.commit() + except AAAARecordExists: + pass + except ConflictingCAARecord: + pass + except ConflictingDNSRecord: + pass + except MultipleARecords: + pass + except MultipleCNAMERecords: + pass + except rq.timeouts.JobTimeoutException: + return + except Exception: + jingrow.db.rollback() + log_error("DNS Check Failed", domain=domain) + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Site Domain") diff --git a/jcloud/jcloud/pagetype/site_domain/test_site_domain.py b/jcloud/jcloud/pagetype/site_domain/test_site_domain.py new file mode 100644 index 0000000..9199c3b --- /dev/null +++ b/jcloud/jcloud/pagetype/site_domain/test_site_domain.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +import unittest +from unittest.mock import Mock, call, patch + +import jingrow + +from jcloud.agent import Agent +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.site.site import site_cleanup_after_archive +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.site_domain.site_domain import SiteDomain +from jcloud.jcloud.pagetype.tls_certificate.tls_certificate import TLSCertificate + + +def create_test_site_domain( + site: str, domain: str, status: str = "Active" +) -> SiteDomain: + """Create test Site Domain pg.""" + with patch.object(TLSCertificate, "obtain_certificate"): + site_domain = jingrow.get_pg( + { + "pagetype": "Site Domain", + "site": site, + "domain": domain, + "status": status, + "retry_count": 1, + "dns_type": "A", + } + ).insert(ignore_if_duplicate=True) + site_domain.reload() + return site_domain + + +@patch.object(AgentJob, "after_insert", new=Mock()) +@patch("jcloud.jcloud.pagetype.site.site._change_dns_record", new=Mock()) +class TestSiteDomain(unittest.TestCase): + """Tests for Site Domain Document methods.""" + + def tearDown(self): + jingrow.db.rollback() + + def setUp(self): + self.site_subdomain = "testsubdomain" + + def test_set_host_name(self): + """Test set_host_name() method of Site pagetype sets host_name property.""" + site = create_test_site(self.site_subdomain) + domain_name = jingrow.mock("domain_name") + + site_domain = create_test_site_domain(site.name, domain_name) + site.set_host_name(site_domain.name) + self.assertEqual(site.host_name, domain_name) + + def test_only_active_site_domain_can_be_primary(self): + """Ensure only active site domains can be primary.""" + site = create_test_site(self.site_subdomain) + domain_name = jingrow.mock("domain_name") + + site_domain = create_test_site_domain(site.name, domain_name, "Pending") + self.assertRaises( + jingrow.exceptions.LinkValidationError, site.set_host_name, site_domain.name + ) + + def test_default_host_name_is_site_subdomain(self): + """Ensure subdomain+domain is default primary host_name.""" + site = create_test_site(self.site_subdomain) + self.assertEqual(site.host_name, site.name) + + def test_default_site_domain_cannot_be_deleted(self): + """Ensure default site domain for a site cannot be deleted.""" + site = create_test_site(self.site_subdomain) + default_domain = jingrow.get_pg( + {"pagetype": "Site Domain", "site": site.name, "name": site.name} + ) + site_domain2 = create_test_site_domain(site.name, "hellohello.com") + site.set_host_name(site_domain2.name) + self.assertRaises(Exception, site.remove_domain, default_domain.name) + + def test_only_site_domains_can_be_host_names(self): + """Ensure error is thrown if string other than site domain name is passed.""" + site = create_test_site(self.site_subdomain) + self.assertRaises( + jingrow.exceptions.LinkValidationError, + site.set_host_name, + "site-domain-name-that-doesnt-exist", + ) + + def test_site_domain_for_other_site_cant_be_primary(self): + """Ensure host_name cannot be set to site domain for another site.""" + site1 = create_test_site(self.site_subdomain) + site2 = create_test_site("testing-another") + site_domain = create_test_site_domain(site2.name, "hellohello.com") + self.assertRaises( + jingrow.exceptions.LinkValidationError, site1.set_host_name, site_domain.name + ) + + def test_set_host_name_removes_redirect_of_domain(self): + """Ensure set_host_name removes redirect of domain.""" + site = create_test_site(self.site_subdomain) + site_domain = create_test_site_domain(site.name, "hellohello.com") + site_domain.redirect_to_primary = True + site_domain.save() + site.set_host_name(site_domain.domain) + site_domain.reload() + self.assertFalse(site_domain.redirect_to_primary) + + def test_primary_domain_cannot_have_redirect_to_primary_checked(self): + """Ensure primary domain cannot have redirect_to_primary checked.""" + site = create_test_site(self.site_subdomain) + site_domain = create_test_site_domain(site.name, "hellohello.com") + site.set_host_name(site_domain.domain) + site_domain.reload() + site_domain.redirect_to_primary = True + self.assertRaises(jingrow.exceptions.ValidationError, site_domain.save) + + def test_all_redirects_updated_on_updating_host_name(self): + """ + Ensure all redirects are updated when host_name of site is updated. + + (At least agent method is called.) + """ + site = create_test_site(self.site_subdomain) + site_domain1 = create_test_site_domain(site.name, "sitedomain1.com") + site_domain2 = create_test_site_domain(site.name, "sitedomain2.com") + site_domain3 = create_test_site_domain(site.name, "sitedomain3.com") + + site_domain2.setup_redirect() + site_domain3.setup_redirect() + + with patch.object(Agent, "setup_redirects") as mock_set_redirects: + site.set_host_name(site_domain1.name) + + mock_set_redirects.assert_called() + + def test_setup_redirect_updates_redirect_in_agent(self): + """ + Ensure setting redirect_to_primary in pg updates agent. + + (At least agent method is called.) + """ + site = create_test_site(self.site_subdomain) + site_domain = create_test_site_domain(site.name, "hellohello.com") + + with patch.object(Agent, "setup_redirects") as mock_setup_redirects: + site_domain.setup_redirect() + mock_setup_redirects.assert_called_with(site.name, [site_domain.name], site.name) + + def test_remove_redirect_updates_redirect_in_agent(self): + """ + Ensure removing redirect_to_primary in pg updates agent. + + (At least agent method is called.) + """ + site = create_test_site(self.site_subdomain) + site_domain = create_test_site_domain(site.name, "hellohello.com") + site_domain.setup_redirect() + + with patch.object(Agent, "remove_redirects") as mock_remove_redirects: + site_domain.remove_redirect() + mock_remove_redirects.assert_called_with(site.name, [site_domain.name]) + + def test_making_pg_with_redirect_to_primary_true_updates_agent(self): + """Ensure agent is updated when redirected site domain is created.""" + site = create_test_site(self.site_subdomain) + with patch.object(Agent, "setup_redirects") as mock_setup_redirects: + site_domain = jingrow.get_pg( + { + "pagetype": "Site Domain", + "site": site.name, + "domain": "hellohello.com", + "status": "Active", + "retry_count": 1, + "dns_type": "A", + "redirect_to_primary": True, + } + ).insert(ignore_if_duplicate=True) + mock_setup_redirects.assert_called_with(site.name, [site_domain.name], site.name) + + def test_site_archive_removes_all_site_domains(self): + """Ensure site archive removes all site domains.""" + site = create_test_site(self.site_subdomain) + site_domain = create_test_site_domain(site.name, "hellohello.com") + + site.archive() + with patch("jcloud.jcloud.pagetype.site.site.jingrow.delete_pg") as mock_jingrow_del: + site_cleanup_after_archive(site.name) + mock_jingrow_del.assert_has_calls( + [call("Site Domain", site.name), call("Site Domain", site_domain.name)], + any_order=True, + ) + + def test_tls_certificate_isnt_created_for_default_domain(self): + """Ensure TLS Certificate isn't created for default domain.""" + with patch.object(SiteDomain, "create_tls_certificate") as mock_create_tls: + create_test_site(self.site_subdomain) + mock_create_tls.assert_not_called() + + def test_remove_host_called_for_site_domains_on_trash(self): + """Ensure remove host agent job is created when site domain is deleted.""" + site = create_test_site(self.site_subdomain) + site_domain = create_test_site_domain(site.name, "hellohello.com") + site.add_domain_to_config(site_domain.name) + + with patch.object(SiteDomain, "create_remove_host_agent_request") as mock_remove_host: + site_domain.on_trash() + mock_remove_host.assert_called() + + def test_remove_host_called_for_default_domain_only_on_redirect(self): + """ + Ensure remove host agent job isn't always created for default domain. + + Default domain host should be removed only if redirect exists. + """ + site = create_test_site(self.site_subdomain) + def_domain = jingrow.get_pg("Site Domain", site.name) + site_domain = create_test_site_domain(site.name, "hellohello.com") + site.set_host_name(site_domain.name) + + with patch.object(SiteDomain, "create_remove_host_agent_request") as mock_remove_host: + # fake archive + site.db_set("status", "Archived") + def_domain.on_trash() + mock_remove_host.assert_not_called() + + def_domain.setup_redirect() + with patch.object(SiteDomain, "create_remove_host_agent_request") as mock_remove_host: + def_domain.on_trash() + mock_remove_host.assert_called() + + def test_domains_other_than_default_get_sent_for_rename(self): + """Ensure site domains are sent for rename.""" + site = create_test_site(self.site_subdomain) + site_domain1 = create_test_site_domain(site.name, "sitedomain1.com") + site_domain2 = create_test_site_domain(site.name, "sitedomain2.com") + new_name = "new-name.fc.dev" + with patch.object(Agent, "rename_upstream_site") as mock_rename_upstream_site: + site.rename(new_name) + args, kwargs = mock_rename_upstream_site.call_args + from collections import Counter + + self.assertEqual(Counter(args[-1]), Counter([site_domain1.name, site_domain2.name])) + + def test_primary_domain_cannot_be_deleted(self): + site = create_test_site("old-name") + site_domain = create_test_site_domain(site.name, "sitedomain1.com") + site.add_domain_to_config(site_domain.name) + + site.set_host_name(site_domain.name) + + self.assertRaises(jingrow.exceptions.LinkExistsError, site_domain.delete) + self.assertTrue(jingrow.db.exists("Site Domain", {"name": site_domain.name})) diff --git a/jcloud/jcloud/pagetype/site_group_deploy/__init__.py b/jcloud/jcloud/pagetype/site_group_deploy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_group_deploy/site_group_deploy.js b/jcloud/jcloud/pagetype/site_group_deploy/site_group_deploy.js new file mode 100644 index 0000000..bda260b --- /dev/null +++ b/jcloud/jcloud/pagetype/site_group_deploy/site_group_deploy.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Site Group Deploy", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/site_group_deploy/site_group_deploy.json b/jcloud/jcloud/pagetype/site_group_deploy/site_group_deploy.json new file mode 100644 index 0000000..4330cac --- /dev/null +++ b/jcloud/jcloud/pagetype/site_group_deploy/site_group_deploy.json @@ -0,0 +1,158 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "hash", + "creation": "2024-08-28 14:35:33.621134", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "status", + "team", + "site_section", + "site", + "subdomain", + "release_group_section", + "release_group", + "version", + "column_break_smfr", + "cluster", + "bench", + "apps_section", + "apps_column", + "apps" + ], + "fields": [ + { + "fieldname": "site_section", + "fieldtype": "Section Break", + "label": "Site" + }, + { + "fieldname": "site", + "fieldtype": "Link", + "label": "Site", + "options": "Site" + }, + { + "fieldname": "release_group_section", + "fieldtype": "Section Break", + "label": "Release Group" + }, + { + "fieldname": "subdomain", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Subdomain", + "reqd": 1 + }, + { + "fieldname": "release_group", + "fieldtype": "Link", + "label": "Release Group", + "options": "Release Group" + }, + { + "fieldname": "apps_section", + "fieldtype": "Section Break", + "label": "Apps" + }, + { + "fieldname": "version", + "fieldtype": "Link", + "label": "Version", + "options": "Jingrow Version" + }, + { + "fieldname": "apps_column", + "fieldtype": "Column Break" + }, + { + "fieldname": "apps", + "fieldtype": "Table", + "label": "Apps", + "options": "Site Group Deploy App", + "reqd": 1 + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Cluster", + "options": "Cluster", + "reqd": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "label": "Status", + "options": "Pending\nDeploying Bench\nBench Deployed\nBench Deploy Failed\nCreating Site\nSite Created\nSite Creation Failed" + }, + { + "fieldname": "column_break_smfr", + "fieldtype": "Column Break" + }, + { + "fieldname": "bench", + "fieldtype": "Link", + "label": "Bench", + "options": "Bench" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-09-02 10:30:41.897673", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Group Deploy", + "naming_rule": "Random", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_group_deploy/site_group_deploy.py b/jcloud/jcloud/pagetype/site_group_deploy/site_group_deploy.py new file mode 100644 index 0000000..6b98f95 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_group_deploy/site_group_deploy.py @@ -0,0 +1,154 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + + +class SiteGroupDeploy(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.site_group_deploy_app.site_group_deploy_app import ( + SiteGroupDeployApp, + ) + + apps: DF.Table[SiteGroupDeployApp] + bench: DF.Link | None + cluster: DF.Link + release_group: DF.Link | None + site: DF.Link | None + status: DF.Literal[ + "Pending", + "Deploying Bench", + "Bench Deployed", + "Bench Deploy Failed", + "Creating Site", + "Site Created", + "Site Creation Failed", + ] + subdomain: DF.Data + team: DF.Link + version: DF.Link | None + # end: auto-generated types + + dashboard_fields = ("status", "site", "release_group") + + def before_insert(self): + self.set_latest_version() + self.check_if_rg_or_site_exists() + + def after_insert(self): + if self.release_group: + return + + group = self.create_release_group() + + self.status = "Deploying Bench" + self.save() + + group.initial_deploy() + + def set_latest_version(self): + if self.version: + return + + self.version = jingrow.db.get_value("Jingrow Version", {"status": "stable"}, order_by="number desc") + + def check_if_rg_or_site_exists(self): + from jcloud.jcloud.pagetype.site.site import Site + + if jingrow.db.exists("Release Group", {"title": self.subdomain}): + jingrow.throw(f"Release Group with title {self.subdomain} already exists") + + domain = jingrow.db.get_single_value("Jcloud Settings", "domain") + if Site.exists(self.subdomain, domain): + jingrow.throw(f"Site with subdomain {self.subdomain} already exists") + + def create_release_group(self): + from jcloud.jcloud.pagetype.release_group.release_group import new_release_group + + apps = [{"app": app.app, "source": app.source} for app in self.apps] + + group = new_release_group( + title=self.subdomain, + version=self.version, + apps=apps, + team=self.team, + cluster=self.cluster, + ) + + self.release_group = group.name + self.save() + + return group + + def create_site(self): + cheapest_private_bench_plan = jingrow.db.get_value( + "Site Plan", + { + "private_benches": 1, + "document_type": "Site", + "price_cny": ["!=", 0], + "price_usd": ["!=", 0], + }, + order_by="price_cny asc", + ) + + apps = [{"app": app.app} for app in self.apps] + app_plan_map = {app.app: {"name": app.plan} for app in self.apps if app.plan} + + try: + site = jingrow.get_pg( + { + "pagetype": "Site", + "team": self.team, + "subdomain": self.subdomain, + "apps": apps, + "cluster": self.cluster, + "release_group": self.release_group, + "bench": self.bench, + "domain": jingrow.db.get_single_value("Jcloud Settings", "domain"), + "subscription_plan": cheapest_private_bench_plan, + "app_plans": app_plan_map, + } + ).insert() + + self.site = site.name + self.status = "Creating Site" + + except jingrow.exceptions.ValidationError: + self.status = "Site Creation Failed" + + self.save() + + def update_site_group_deploy_on_deploy_failure(self, deploy): + if deploy and deploy.status == "Failure": + self.status = "Bench Deploy Failed" + self.save() + + def update_site_group_deploy_on_process_job(self, job): + if job.job_type == "New Bench": + if job.status == "Success": + self.bench = job.bench + self.status = "Bench Deployed" + self.save() + self.create_site() + + elif job.status == "Failure": + self.status = "Bench Deploy Failed" + self.save() + + elif job.job_type == "New Site": + if job.status == "Success": + self.status = "Site Created" + self.save() + elif job.status == "Failure": + self.status = "Site Creation Failed" + self.save() diff --git a/jcloud/jcloud/pagetype/site_group_deploy/test_site_group_deploy.py b/jcloud/jcloud/pagetype/site_group_deploy/test_site_group_deploy.py new file mode 100644 index 0000000..10e223b --- /dev/null +++ b/jcloud/jcloud/pagetype/site_group_deploy/test_site_group_deploy.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestSiteGroupDeploy(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/site_group_deploy_app/__init__.py b/jcloud/jcloud/pagetype/site_group_deploy_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_group_deploy_app/site_group_deploy_app.json b/jcloud/jcloud/pagetype/site_group_deploy_app/site_group_deploy_app.json new file mode 100644 index 0000000..5478fb7 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_group_deploy_app/site_group_deploy_app.json @@ -0,0 +1,52 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-08-28 16:22:16.613931", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "app", + "source", + "title", + "plan" + ], + "fields": [ + { + "fieldname": "app", + "fieldtype": "Link", + "label": "App", + "options": "App" + }, + { + "fieldname": "source", + "fieldtype": "Link", + "label": "Source", + "options": "App Source" + }, + { + "fetch_from": "app.title", + "fieldname": "title", + "fieldtype": "Data", + "label": "Title" + }, + { + "fieldname": "plan", + "fieldtype": "Link", + "label": "Plan", + "options": "Marketplace App Plan" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-08-28 20:11:22.334225", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Group Deploy App", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_group_deploy_app/site_group_deploy_app.py b/jcloud/jcloud/pagetype/site_group_deploy_app/site_group_deploy_app.py new file mode 100644 index 0000000..fbe0999 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_group_deploy_app/site_group_deploy_app.py @@ -0,0 +1,26 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SiteGroupDeployApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + plan: DF.Link | None + source: DF.Link | None + title: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/site_migration/__init__.py b/jcloud/jcloud/pagetype/site_migration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_migration/site_migration.js b/jcloud/jcloud/pagetype/site_migration/site_migration.js new file mode 100644 index 0000000..a60f4e7 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_migration/site_migration.js @@ -0,0 +1,58 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Migration', { + refresh: function (frm) { + frm.set_query('site', () => { + return { + filters: { + status: 'Active', + }, + }; + }); + frm.set_query('source_bench', () => { + return { + filters: { + status: 'Active', + }, + }; + }); + frm.set_query('destination_bench', () => { + return { + filters: { + status: 'Active', + }, + }; + }); + if (frm.pg.status === 'Failure') { + frm.add_custom_button(__('Continue'), () => { + jingrow.confirm( + `Are you sure you want to continue from next Pending step?
+ + Note: This could cause data loss if you don't know what you're doing`, + () => frm.call('continue_from_next_pending'), + ); + }); + } else if (frm.pg.status === 'Scheduled') { + frm.add_custom_button(__('Start'), () => { + jingrow.confirm( + `Are you sure you want to start the migration?
+ + Note: This will start downtime`, + () => frm.call('start'), + ); + }); + } else if (frm.pg.status === 'Running') { + frm.add_custom_button(__('Cleanup and fail'), () => { + jingrow.confirm( + `Are you sure you want to skip pending steps and fail the migration?
+ + This will attempt to stop the migration and put everything back to the original state.
+ + Note: This could cause data loss if you don't know what you're doing`, + () => frm.call('cleanup_and_fail'), + ); + }); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/site_migration/site_migration.json b/jcloud/jcloud/pagetype/site_migration/site_migration.json new file mode 100644 index 0000000..482982a --- /dev/null +++ b/jcloud/jcloud/pagetype/site_migration/site_migration.json @@ -0,0 +1,214 @@ +{ + "actions": [], + "creation": "2021-01-19 15:10:34.077966", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "source_bench", + "scheduled_time", + "migration_type", + "column_break_3", + "status", + "destination_bench", + "backup", + "skip_failing_patches", + "section_break_5", + "source_server", + "source_cluster", + "column_break_5", + "destination_server", + "destination_cluster", + "section_break_13", + "steps" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Site", + "options": "Site", + "reqd": 1, + "search_index": 1 + }, + { + "fetch_from": "site.bench", + "fetch_if_empty": 1, + "fieldname": "source_bench", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Source Bench", + "options": "Bench", + "read_only": 1, + "reqd": 1, + "set_only_once": 1 + }, + { + "fetch_from": "site.server", + "fetch_if_empty": 1, + "fieldname": "source_server", + "fieldtype": "Link", + "label": "Source Server", + "options": "Server", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "site.cluster", + "fetch_if_empty": 1, + "fieldname": "source_cluster", + "fieldtype": "Link", + "label": "Source Cluster", + "options": "Cluster", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_5", + "fieldtype": "Column Break" + }, + { + "fieldname": "destination_bench", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Destination Bench", + "options": "Bench", + "reqd": 1 + }, + { + "fetch_from": "destination_bench.server", + "fieldname": "destination_server", + "fieldtype": "Link", + "label": "Destination Server", + "options": "Server", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "destination_bench.cluster", + "fieldname": "destination_cluster", + "fieldtype": "Link", + "label": "Destination Cluster", + "options": "Cluster", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "migration_type", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Migration Type", + "options": "\nBench\nServer\nCluster", + "read_only": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Scheduled\nPending\nRunning\nSuccess\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_5", + "fieldtype": "Section Break" + }, + { + "fieldname": "section_break_13", + "fieldtype": "Section Break" + }, + { + "fieldname": "steps", + "fieldtype": "Table", + "label": "Steps", + "options": "Site Migration Step" + }, + { + "fieldname": "backup", + "fieldtype": "Link", + "label": "Backup", + "options": "Site Backup", + "read_only": 1 + }, + { + "fieldname": "scheduled_time", + "fieldtype": "Datetime", + "label": "Scheduled Time" + }, + { + "default": "0", + "fieldname": "skip_failing_patches", + "fieldtype": "Check", + "label": "Skip Failing Patches" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-11-15 16:53:44.667863", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Migration", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Site Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "site", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_migration/site_migration.py b/jcloud/jcloud/pagetype/site_migration/site_migration.py new file mode 100644 index 0000000..b1768f1 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_migration/site_migration.py @@ -0,0 +1,719 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import jingrow +from jingrow.core.utils import find +from jingrow.model.document import Document + +from jcloud.agent import Agent +from jcloud.exceptions import ( + CannotChangePlan, + InactiveDomains, + InsufficientSpaceOnServer, + MissingAppsInBench, + OngoingAgentJob, + SiteAlreadyArchived, +) +from jcloud.jcloud.pagetype.jcloud_notification.jcloud_notification import ( + create_new_notification, +) +from jcloud.jcloud.pagetype.site_backup.site_backup import ( + SiteBackup, + process_backup_site_job_update, +) +from jcloud.utils import log_error +from jcloud.utils.dns import create_dns_record + +if TYPE_CHECKING: + from jingrow.types.DF import Link + + from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob + from jcloud.jcloud.pagetype.server.server import Server + from jcloud.jcloud.pagetype.site.site import Site + + +def get_ongoing_migration(site: Link, scheduled=False): + """ + Return ongoing Site Migration for site. + + Used to redirect agent job callbacks + """ + ongoing_statuses = ["Pending", "Running"] + if scheduled: + ongoing_statuses.append("Scheduled") + return jingrow.db.exists("Site Migration", {"site": site, "status": ("in", ongoing_statuses)}) + + +class SiteMigration(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.site_migration_step.site_migration_step import SiteMigrationStep + + backup: DF.Link | None + destination_bench: DF.Link + destination_cluster: DF.Link + destination_server: DF.Link + migration_type: DF.Literal["", "Bench", "Server", "Cluster"] + scheduled_time: DF.Datetime | None + site: DF.Link + skip_failing_patches: DF.Check + source_bench: DF.Link + source_cluster: DF.Link + source_server: DF.Link + status: DF.Literal["Scheduled", "Pending", "Running", "Success", "Failure"] + steps: DF.Table[SiteMigrationStep] + # end: auto-generated types + + def before_insert(self): + self.validate_apps() + self.validate_bench() + self.check_for_inactive_domains() + self.check_enough_space_on_destination_server() + if get_ongoing_migration(self.site, scheduled=True): + jingrow.throw(f"Ongoing/Scheduled Site Migration for the site {jingrow.bold(self.site)} exists.") + site: Site = jingrow.get_pg("Site", self.site) + site.check_move_scheduled() + + def validate_bench(self): + if jingrow.db.get_value("Bench", self.destination_bench, "status", for_update=True) != "Active": + jingrow.throw("Destination bench does not exist") + + def check_enough_space_on_destination_server(self): + try: + backup: SiteBackup = jingrow.get_last_pg( # approximation with last backup + "Site Backup", + { + "site": self.site, + "with_files": True, + "offsite": True, + "status": "Success", + "files_availability": "Available", + }, + ) + except jingrow.DoesNotExistError: + pass + else: + site: "Site" = jingrow.get_pg("Site", self.site) + site.server = self.destination_server + site.remote_database_file = backup.remote_database_file + site.remote_public_file = backup.remote_public_file + site.remote_private_file = backup.remote_private_file + site.check_enough_space_on_server() + + def after_insert(self): + self.set_migration_type() + self.add_steps() + self.save() + + def validate_apps(self): + site_apps = [app.app for app in jingrow.get_pg("Site", self.site).apps] + bench_apps = [app.app for app in jingrow.get_pg("Bench", self.destination_bench).apps] + + if diff := set(site_apps) - set(bench_apps): + jingrow.throw( + f"Bench {self.destination_bench} doesn't have some of the apps installed on {self.site}: {', '.join(diff)}", + MissingAppsInBench, + ) + + def check_for_inactive_domains(self): + if domains := jingrow.db.get_all( + "Site Domain", {"site": self.site, "status": ("!=", "Active")}, pluck="name" + ): + jingrow.throw( + f"Inactive custom domains exist: {','.join(domains)}. Please remove or fix the same.", + InactiveDomains, + ) + + @jingrow.whitelist() + def start(self): + self.status = "Pending" + self.save() + self.check_for_ongoing_agent_jobs() + self.check_for_inactive_domains() + self.validate_apps() + self.check_enough_space_on_destination_server() + site: Site = jingrow.get_pg("Site", self.site) + try: + site.ready_for_move() + except SiteAlreadyArchived: + self.status = "Failure" + self.save() + return + self.run_next_step() + + @jingrow.whitelist() + def continue_from_next_pending(self): + self.remove_archive_on_destination_step_if_exists() + self.run_next_step() + + def remove_archive_on_destination_step_if_exists(self): + """Remove Archive on Destination step if exists""" + archive_on_destination_step = find( + self.steps, + lambda x: x.method_name == self.archive_site_on_destination_server.__name__, + ) + if archive_on_destination_step: + self.steps.remove(archive_on_destination_step) + + def check_for_ongoing_agent_jobs(self): + if jingrow.db.exists( + "Agent Job", + { + "status": ("in", ["Pending", "Running"]), + "site": self.site, + "creation": (">", jingrow.utils.add_to_date(None, hours=-24)), + }, + ): + jingrow.throw("Ongoing Agent Job for site exists", OngoingAgentJob) + + def set_migration_type(self): + if self.source_cluster != self.destination_cluster: + migration_type = "Cluster" + elif self.source_server != self.destination_server: + migration_type = "Server" + else: + migration_type = "Bench" + self.migration_type = migration_type + + def add_steps(self): + """Populate steps child table with steps for migration.""" + if self.migration_type == "Cluster": + self.add_steps_for_cluster_migration() + self.add_steps_for_domains() + elif self.migration_type == "Server": + source_db = jingrow.db.get_value("Server", self.source_server, "database_server") + destination_db = jingrow.db.get_value("Server", self.destination_server, "database_server") + if source_db == destination_db: + raise NotImplementedError + # TODO: switch order of steps here (archive before restore) + self.add_steps_for_server_migration() + else: + # TODO: Call site update for bench only migration with popup with link to site update job + raise NotImplementedError + + def remove_domain_hosts_from_source(self): + """Remove domain hosts from source""" + domains = jingrow.get_all("Site Domain", {"site": self.site}, pluck="name") + + proxy_server = jingrow.db.get_value("Server", self.source_server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + + for domain in domains: + site_domain = jingrow.get_pg("Site Domain", domain) + agent.remove_host(site_domain) + + def _add_remove_host_from_source_proxy_step(self, domain: str): + step = { + "step_title": f"Remove host {domain} from source proxy", + "status": "Pending", + "method_name": self.remove_host_from_source_proxy.__name__, + "method_arg": domain, + } + self.append("steps", step) + + def _add_add_host_to_destination_proxy_step(self, domain: str): + step = { + "step_title": f"Add host {domain} to destination proxy", + "status": "Pending", + "method_name": self.add_host_to_destination_proxy.__name__, + "method_arg": domain, + } + self.append("steps", step) + + def add_host_to_destination_proxy(self, domain): + site_domain = jingrow.get_pg("Site Domain", domain) + proxy_server = jingrow.db.get_value("Server", self.destination_server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + + if site_domain.has_root_tls_certificate: + return agent.add_domain_to_upstream( + server=self.destination_server, site=site_domain.site, domain=site_domain.domain + ) + + return agent.new_host(site_domain) + + def remove_host_from_source_proxy(self, domain): + site_domain = jingrow.get_pg("Site Domain", domain) + proxy_server = jingrow.db.get_value("Server", self.source_server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + return agent.remove_host(site_domain) + + def _add_setup_redirects_step(self): + step = { + "step_title": self.setup_redirects.__pg__, + "status": "Pending", + "method_name": self.setup_redirects.__name__, + } + self.append("steps", step) + + def setup_redirects(self): + """Setup redirects of site in proxy""" + site: "Site" = jingrow.get_pg("Site", self.site) + ret = site._update_redirects_for_all_site_domains() + if ret: + # could be no jobs + return ret + self.update_next_step_status("Skipped") + return self.run_next_step() + + def add_steps_for_domains(self): + domains = jingrow.get_all("Site Domain", {"site": self.site}, pluck="name") + for domain in domains: + site_domain = jingrow.get_pg("Site Domain", domain) + if site_domain.default: + continue + self._add_remove_host_from_source_proxy_step(domain) + self._add_add_host_to_destination_proxy_step(domain) + if len(domains) > 1: + self._add_setup_redirects_step() + + @property + def next_step(self) -> SiteMigrationStep | None: + """Get next step to execute or update.""" + return find(self.steps, lambda step: step.status in ["Pending", "Running"]) + + @jingrow.whitelist() + def run_next_step(self): + self.status = "Running" + + next_step = self.next_step + if not next_step: + self.succeed() + return + next_method: str = next_step.method_name + # right now only single argument possible + method_arg: str = next_step.method_arg + method = getattr(self, next_method) + + if method_arg: + next_step.step_job = getattr(method(method_arg), "name", None) + else: + next_step.step_job = getattr(method(), "name", None) + self.save() + + def update_next_step_status(self, status: str): + self.next_step.status = status + self.save() + + @property + def possibly_archived_site_on_source(self) -> bool: + return find(self.steps, lambda x: x.method_name == self.archive_site_on_source.__name__).status in [ + "Success", + "Failure", + ] + + def set_pending_steps_to_skipped(self): + for step in self.steps: + if step.status == "Pending": + step.status = "Skipped" + self.save() + + @property + def restore_on_destination_happened(self) -> bool: + return find( + self.steps, + lambda x: x.method_name == self.restore_site_on_destination_server.__name__, + ).status in ["Success", "Failure"] + + def cleanup_if_appropriate(self): + self.set_pending_steps_to_skipped() + if self.possibly_archived_site_on_source or not self.restore_on_destination_happened: + return False + self.append( + "steps", + { + "step_title": self.archive_site_on_destination_server.__pg__, + "method_name": self.archive_site_on_destination_server.__name__, + "status": "Pending", + }, + ) + self.run_next_step() + return True + + @jingrow.whitelist() + def cleanup_and_fail(self, *args, **kwargs): + if self.cleanup_if_appropriate(): + return # callback will trigger fail + self.fail(*args, **kwargs) + + def fail(self, reason: str | None = None, force_activate: bool = False): + self.status = "Failure" + self.save() + self.send_fail_notification(reason) + self.activate_site_if_appropriate(force=force_activate) + + @property + def failed_step(self): + return find(self.steps, lambda x: x.status == "Failure") + + def activate_site_if_appropriate(self, force=False): + site: "Site" = jingrow.get_pg("Site", self.site) + failed_step_method_name = (self.failed_step or {}).get("method_name", "__NOT_SET__") + if force or ( + failed_step_method_name + in [ + self.backup_source_site.__name__, + self.restore_site_on_destination_server.__name__, + self.restore_site_on_destination_proxy.__name__, + ] + and site.status_before_update != "Inactive" + ): + site.activate() + if self.migration_type == "Cluster": + site.create_dns_record() + + def send_fail_notification(self, reason: str | None = None): + from jcloud.jcloud.pagetype.agent_job.agent_job_notifications import create_job_failed_notification + + site = jingrow.get_pg("Site", self.site) + message = f"Site Migration ({self.migration_type}) for site {site.host_name} failed" + if reason: + message += f" due to {reason}" + agent_job_id = None + + create_new_notification( + site.team, + "Site Migrate", + "Agent Job", + agent_job_id, + message, + ) + else: + agent_job_id = find(self.steps, lambda x: x.status == "Failure").get("step_job") + + job = jingrow.get_pg("Agent Job", agent_job_id) + create_job_failed_notification(job, site.team, "Site Migrate", "Site Migrate", message) + + def succeed(self): + self.status = "Success" + self.save() + self.send_success_notification() + + def send_success_notification(self): + site = jingrow.get_pg("Site", self.site) + + message = ( + f"Site Migration ({self.migration_type}) for site {site.host_name} completed successfully" + ) + agent_job_id = find(self.steps, lambda x: x.step_title == "Restore site on destination").step_job + + create_new_notification( + site.team, + "Site Migrate", + "Agent Job", + agent_job_id, + message, + ) + + def add_steps_for_cluster_migration(self): + steps = [ + { + "step_title": self.deactivate_site_on_source_server.__pg__, + "method_name": self.deactivate_site_on_source_server.__name__, + "status": "Pending", + }, + { + "step_title": self.backup_source_site.__pg__, + "method_name": self.backup_source_site.__name__, + "status": "Pending", + }, + { + "step_title": self.restore_site_on_destination_server.__pg__, + "method_name": self.restore_site_on_destination_server.__name__, + "status": "Pending", + }, + { + "step_title": self.restore_site_on_destination_proxy.__pg__, + "method_name": self.restore_site_on_destination_proxy.__name__, + "status": "Pending", + }, + { + "step_title": self.remove_site_from_source_proxy.__pg__, + "method_name": self.remove_site_from_source_proxy.__name__, + "status": "Pending", + }, + { + "step_title": self.archive_site_on_source.__pg__, + "method_name": self.archive_site_on_source.__name__, + "status": "Pending", + }, + { + "step_title": self.update_site_record_fields.__pg__, + "method_name": self.update_site_record_fields.__name__, + "status": "Pending", + }, + { + "step_title": self.reset_site_status_on_destination.__pg__, + "method_name": self.reset_site_status_on_destination.__name__, + "status": "Pending", + }, + { + "step_title": self.adjust_plan_if_required.__pg__, + "method_name": self.adjust_plan_if_required.__name__, + "status": "Pending", + }, + ] + for step in steps: + self.append("steps", step) + + def add_steps_for_server_migration(self): + steps = [ + { + "step_title": self.deactivate_site_on_source_server.__pg__, + "method_name": self.deactivate_site_on_source_server.__name__, + "status": "Pending", + }, + { + "step_title": self.backup_source_site.__pg__, + "method_name": self.backup_source_site.__name__, + "status": "Pending", + }, + { + "step_title": self.restore_site_on_destination_server.__pg__, + "method_name": self.restore_site_on_destination_server.__name__, + "status": "Pending", + }, + { + "step_title": self.archive_site_on_source.__pg__, + "method_name": self.archive_site_on_source.__name__, + "status": "Pending", + }, + { + "step_title": self.remove_site_from_source_proxy.__pg__, + "method_name": self.remove_site_from_source_proxy.__name__, + "status": "Pending", + }, + { + "step_title": self.restore_site_on_destination_proxy.__pg__, + "method_name": self.restore_site_on_destination_proxy.__name__, + "status": "Pending", + }, + { + "step_title": self.update_site_record_fields.__pg__, + "method_name": self.update_site_record_fields.__name__, + "status": "Pending", + }, + { + "step_title": self.reset_site_status_on_destination.__pg__, + "method_name": self.reset_site_status_on_destination.__name__, + "status": "Pending", + }, + { + "step_title": self.adjust_plan_if_required.__pg__, + "method_name": self.adjust_plan_if_required.__name__, + "status": "Pending", + }, + ] + for step in steps: + self.append("steps", step) + + def deactivate_site_on_source_server(self): + """Deactivate site on source""" + site: Site = jingrow.get_pg("Site", self.site) + site.status = "Pending" + return site.update_site_config({"maintenance_mode": 1}) # saves pg + + def deactivate_site_on_source_proxy(self): + """Deactivate site on source proxy""" + site = jingrow.get_pg("Site", self.site) + return site.update_site_status_on_proxy("deactivated") + + def backup_source_site(self): + """Backup site on source""" + site = jingrow.get_pg("Site", self.site) + + backup = site.backup(with_files=True, offsite=True, force=True) + backup.reload() + self.backup = backup.name + self.save() + + return jingrow.get_pg("Agent Job", backup.job) + + def archive_site_on_destination_server(self): + """Archive site on destination (case of failure)""" + agent = Agent(self.destination_server) + site = jingrow.get_pg("Site", self.site) + site.bench = self.destination_bench + return agent.archive_site(site, force=True) + + def restore_site_on_destination_server(self): + """Restore site on destination""" + agent = Agent(self.destination_server) + site = jingrow.get_pg("Site", self.site) + backup = jingrow.get_pg("Site Backup", self.backup) + site.remote_database_file = backup.remote_database_file + site.remote_public_file = backup.remote_public_file + site.remote_private_file = backup.remote_private_file + site.remote_config_file = "" # Use site config from jcloud only + site.bench = self.destination_bench + site.cluster = self.destination_cluster + site.server = self.destination_server + if self.migration_type == "Cluster": + create_dns_record(site, record_name=site._get_site_name(site.subdomain)) + domain = jingrow.get_pg("Root Domain", site.domain) + if self.destination_cluster == domain.default_cluster: + source_proxy = jingrow.db.get_value("Server", self.source_server, "proxy_server") + site.remove_dns_record(domain, source_proxy, site.name) + return agent.new_site_from_backup(site, skip_failing_patches=self.skip_failing_patches) + + def restore_site_on_destination_proxy(self): + """Restore site on destination proxy""" + proxy_server = jingrow.db.get_value("Server", self.destination_server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + return agent.new_upstream_file(server=self.destination_server, site=self.site) + + def remove_site_from_source_proxy(self): + """Remove site from source proxy""" + proxy_server = jingrow.db.get_value("Server", self.source_server, "proxy_server") + agent = Agent(proxy_server, server_type="Proxy Server") + return agent.remove_upstream_file(server=self.source_server, site=self.site) + + def archive_site_on_source(self): + """Archive site on source""" + agent = Agent(self.source_server) + site = jingrow.get_pg("Site", self.site) + site.bench = self.source_bench # for sanity + return agent.archive_site(site) + + def update_site_record_fields(self): + """Update fields of original site record""" + site = jingrow.get_pg("Site", self.site) + site.db_set("bench", self.destination_bench) + site.db_set("server", self.destination_server) + site.db_set("cluster", self.destination_cluster) + self.update_next_step_status("Success") + self.run_next_step() + + def reset_site_status_on_destination(self): + """Reset site status on destination""" + site = jingrow.get_pg("Site", self.site) + if site.status_before_update in ["Inactive", "Suspended"]: + self.update_next_step_status("Skipped") + job = None + else: + job = site.update_site_config({"maintenance_mode": 0}) # will do run_next_step in callback + site.reload() + site.status = site.status_before_update or "Active" + site.status_before_update = None + site.save() + if job: + return job + return self.run_next_step() + + def activate_site_on_destination_proxy(self): + """Activate site on destination proxy""" + site = jingrow.get_pg("Site", self.site) + return site.update_site_status_on_proxy("activated") + + @property + def scheduled_by_consultant(self): + return self.owner.endswith("@jingrow.com") or self.owner.endswith("@jingrow.com") + + def upgrade_plan(self, site: "Site", dest_server: Server): + if not dest_server.public and site.team == dest_server.team and not site.is_on_dedicated_plan: + return site.change_plan( + "Unlimited", + ignore_card_setup=self.scheduled_by_consultant, + ) + return None + + def downgrade_plan(self, site: "Site", dest_server: Server): + if dest_server.public and site.team != dest_server.team and site.is_on_dedicated_plan: + return site.change_plan( + "USD 100", + ignore_card_setup=self.scheduled_by_consultant, + ) + return None + + def adjust_plan_if_required(self): + """Update site plan from/to Unlimited""" + site: "Site" = jingrow.get_pg("Site", self.site) + dest_server: Server = jingrow.get_pg("Server", self.destination_server) + plan_change = None + try: + plan_change = self.upgrade_plan(site, dest_server) or self.downgrade_plan(site, dest_server) + except CannotChangePlan: + self.update_next_step_status("Failure") + + if plan_change: + self.update_next_step_status("Success") + else: + self.update_next_step_status("Skipped") + self.run_next_step() + + def is_cleanup_done(self, job: "AgentJob") -> bool: + return (job.job_type == "Archive Site" and job.bench == self.destination_bench) and ( + job.status == "Success" + or ( + job.status == "Failure" and f"KeyError: '{self.site}'" in str(job.traceback) + ) # sometimes site may not even get created in destination to clean it up + ) + + +def process_required_job_callbacks(job): + if job.job_type == "Backup Site": + process_backup_site_job_update(job) + + +def job_matches_site_migration(job, site_migration_name: str): + site_migration = SiteMigration("Site Migration", site_migration_name) + next = site_migration.next_step + return job.name == next.step_job if next else False + + +def process_site_migration_job_update(job, site_migration_name: str): + site_migration = SiteMigration("Site Migration", site_migration_name) + if job.name != site_migration.next_step.step_job: + log_error("Extra Job found during Site Migration", job=job.as_dict()) + return + + process_required_job_callbacks(job) + site_migration.update_next_step_status(job.status) + + if site_migration.is_cleanup_done(job): + site_migration.fail() + return + + if job.status == "Success": + try: + site_migration.run_next_step() + except Exception as e: + log_error("Site Migration Step Error", pg=site_migration) + site_migration.cleanup_and_fail(reason=str(e), force_activate=True) + elif job.status in ["Failure", "Delivery Failure"]: + site_migration.cleanup_and_fail() + + +def run_scheduled_migrations(): + migrations = jingrow.get_all( + "Site Migration", + {"scheduled_time": ("<=", jingrow.utils.now()), "status": "Scheduled"}, + ) + for migration in migrations: + site_migration = SiteMigration("Site Migration", migration) + try: + site_migration.start() + except OngoingAgentJob: + pass # ongoing jobs will finish in some time + except MissingAppsInBench as e: + site_migration.cleanup_and_fail(reason=str(e), force_activate=True) + except InsufficientSpaceOnServer as e: + site_migration.cleanup_and_fail(reason=str(e), force_activate=True) + except InactiveDomains as e: + site_migration.cleanup_and_fail(reason=str(e), force_activate=True) + except Exception as e: + log_error("Site Migration Start Error", exception=e) + + +def on_pagetype_update(): + jingrow.db.add_index("Site Migration", ["site"]) diff --git a/jcloud/jcloud/pagetype/site_migration/test_site_migration.py b/jcloud/jcloud/pagetype/site_migration/test_site_migration.py new file mode 100644 index 0000000..6c994f9 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_migration/test_site_migration.py @@ -0,0 +1,225 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +from unittest.mock import patch + +import jingrow +from jingrow.core.utils import find +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.agent_job.agent_job import poll_pending_jobs +from jcloud.jcloud.pagetype.agent_job.test_agent_job import fake_agent_job +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.release_group.test_release_group import ( + create_test_release_group, +) +from jcloud.jcloud.pagetype.remote_file.remote_file import RemoteFile +from jcloud.jcloud.pagetype.site.site import Site +from jcloud.jcloud.pagetype.site.test_site import create_test_bench, create_test_site +from jcloud.jcloud.pagetype.site_migration.site_migration import ( + SiteMigration, + run_scheduled_migrations, +) + +BACKUP_JOB_RES = { + "backups": { + "database": { + "file": "a.sql.gz", + "path": "/home/jingrow/a.sql.gz", + "size": 1674818, + "url": "https://a.com/a.sql.gz", + }, + "public": { + "file": "b.tar", + "path": "/home/jingrow/b.tar", + "size": 1674818, + "url": "https://a.com/b.tar", + }, + "private": { + "file": "a.tar", + "path": "/home/jingrow/a.tar", + "size": 1674818, + "url": "https://a.com/a.tar", + }, + "site_config": { + "file": "a.json", + "path": "/home/jingrow/a.json", + "size": 595, + "url": "https://a.com/json", + }, + }, + "offsite": { + "a.sql.gz": "bucket.jingrow.cloud/2023-10-10/a.sql.gz", + "a.tar": "bucket.jingrow.cloud/2023-10-10/a.tar", + "b.tar": "bucket.jingrow.cloud/2023-10-10/b.tar", + "a.json": "bucket.jingrow.cloud/2023-10-10/a.json", + }, +} + + +@patch.object(RemoteFile, "download_link", new="http://test.com") +class TestSiteMigration(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_in_cluster_site_migration_goes_through_all_steps_and_updates_site(self): + with patch.object(Site, "after_insert"), patch.object(Site, "on_update"): + """Patching these methods as its creating issue with duplicate agent job check""" + site = create_test_site() + + bench = create_test_bench() + site_migration = jingrow.get_pg( + { + "pagetype": "Site Migration", + "site": site.name, + "destination_bench": bench.name, + } + ).insert() + + with fake_agent_job("Update Site Configuration", "Success"), fake_agent_job( + "Backup Site", + data=BACKUP_JOB_RES, + ), fake_agent_job("New Site from Backup"), fake_agent_job("Archive Site"), fake_agent_job( + "Remove Site from Upstream" + ), fake_agent_job("Add Site to Upstream"), fake_agent_job("Update Site Configuration"): + site_migration.start() + poll_pending_jobs() + poll_pending_jobs() + poll_pending_jobs() + site_migration.reload() + self.assertEqual(site_migration.status, "Running") + + poll_pending_jobs() + poll_pending_jobs() + poll_pending_jobs() + site_migration.reload() + poll_pending_jobs() + site_migration.reload() + self.assertEqual(site_migration.status, "Success") + site.reload() + self.assertEqual(site.status, "Active") + self.assertEqual(site.bench, bench.name) + self.assertEqual(site.server, bench.server) + + def test_site_is_activated_on_failure_when_possible(self): + with patch.object(Site, "after_insert"), patch.object(Site, "on_update"): + """Patching these methods as its creating issue with duplicate agent job check""" + site = create_test_site() + bench = create_test_bench() + site_migration: SiteMigration = jingrow.get_pg( + { + "pagetype": "Site Migration", + "site": site.name, + "destination_bench": bench.name, + } + ).insert() + + with fake_agent_job("Update Site Configuration"), fake_agent_job( + "Backup Site", + data=BACKUP_JOB_RES, + ), fake_agent_job("New Site from Backup", "Failure"), fake_agent_job("Archive Site"): + site_migration.start() + poll_pending_jobs() + poll_pending_jobs() + poll_pending_jobs() + poll_pending_jobs() + site_migration.reload() + self.assertEqual(site_migration.status, "Failure") + site.reload() + self.assertEqual(site.status, "Active") + + def test_site_archived_on_destination_on_failure(self): + site = create_test_site() + bench = create_test_bench() + site_migration: SiteMigration = jingrow.get_pg( + { + "pagetype": "Site Migration", + "site": site.name, + "destination_bench": bench.name, + } + ).insert() + + with fake_agent_job("Update Site Configuration"), fake_agent_job( + "Backup Site", + data=BACKUP_JOB_RES, + ), fake_agent_job("New Site from Backup", "Failure"), fake_agent_job( + "Archive Site", + ), fake_agent_job("Update Site Configuration"): + site_migration.start() + poll_pending_jobs() + poll_pending_jobs() + poll_pending_jobs() + self.assertEqual(site_migration.status, "Running") + poll_pending_jobs() # restore on destination + site_migration.reload() + self.assertEqual(site_migration.status, "Failure") + archive_job_count = jingrow.db.count( + "Agent Job", {"job_type": "Archive Site", "site": site.name, "server": bench.server} + ) + self.assertEqual(archive_job_count, 1) + + def test_site_not_archived_on_destination_on_failure_if_site_archived_on_source(self): + site = create_test_site() + bench = create_test_bench() + site_migration: SiteMigration = jingrow.get_pg( + { + "pagetype": "Site Migration", + "site": site.name, + "destination_bench": bench.name, + } + ).insert() + + with fake_agent_job("Update Site Configuration"), fake_agent_job( + "Backup Site", + data=BACKUP_JOB_RES, + ), fake_agent_job("New Site from Backup"), fake_agent_job( + "Archive Site", # both archives + ), fake_agent_job("Remove Site from Upstream"), fake_agent_job("Add Site to Upstream", "Failure"): + site_migration.start() + poll_pending_jobs() + poll_pending_jobs() + poll_pending_jobs() + poll_pending_jobs() # restore on destination + poll_pending_jobs() # archive on source + poll_pending_jobs() # remove from source proxy + poll_pending_jobs() # restore on dest proxy + + site_migration.reload() + self.assertEqual(site_migration.status, "Failure") + self.assertEqual( + find( + site_migration.steps, + lambda x: x.method_name == SiteMigration.restore_site_on_destination_proxy.__name__, + ).status, + "Failure", + ) # step after archive site on source passed + self.assertFalse( + jingrow.db.exists( + "Agent Job", {"job_type": "Archive Site", "site": site.name, "server": bench.server} + ), + ) + + def test_missing_apps_in_bench_cause_site_migration_to_fail(self): + app1 = create_test_app("jingrow") + app2 = create_test_app("jerp") + + group = create_test_release_group([app1, app2]) + bench = create_test_bench(group=group) + site = create_test_site(bench=bench.name, apps=[app1.name]) + + dest_bench = create_test_bench() + site_migration: SiteMigration = jingrow.get_pg( + { + "pagetype": "Site Migration", + "site": site.name, + "destination_bench": dest_bench.name, + "scheduled_time": jingrow.utils.now_datetime(), + } + ).insert() + + site.append("apps", {"app": app2.name}) + site.save() + + run_scheduled_migrations() + site_migration.reload() + self.assertEqual(site_migration.status, "Failure") diff --git a/jcloud/jcloud/pagetype/site_migration_step/__init__.py b/jcloud/jcloud/pagetype/site_migration_step/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_migration_step/site_migration_step.js b/jcloud/jcloud/pagetype/site_migration_step/site_migration_step.js new file mode 100644 index 0000000..2174b0d --- /dev/null +++ b/jcloud/jcloud/pagetype/site_migration_step/site_migration_step.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Migration Step', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/site_migration_step/site_migration_step.json b/jcloud/jcloud/pagetype/site_migration_step/site_migration_step.json new file mode 100644 index 0000000..f29f973 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_migration_step/site_migration_step.json @@ -0,0 +1,63 @@ +{ + "actions": [], + "creation": "2021-05-03 13:57:25.931529", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "step_title", + "step_job", + "status", + "method_name", + "method_arg" + ], + "fields": [ + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nRunning\nSuccess\nFailure\nSkipped\nDelivery Failure", + "reqd": 1 + }, + { + "fieldname": "method_name", + "fieldtype": "Data", + "label": "Method Name", + "reqd": 1 + }, + { + "fieldname": "step_title", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Step Title", + "reqd": 1 + }, + { + "fieldname": "step_job", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Step Job", + "options": "Agent Job" + }, + { + "fieldname": "method_arg", + "fieldtype": "Data", + "label": "Method Arg" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-01-12 16:30:57.497115", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Migration Step", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_migration_step/site_migration_step.py b/jcloud/jcloud/pagetype/site_migration_step/site_migration_step.py new file mode 100644 index 0000000..99f5076 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_migration_step/site_migration_step.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class SiteMigrationStep(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + method_arg: DF.Data | None + method_name: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + status: DF.Literal[ + "Pending", "Running", "Success", "Failure", "Skipped", "Delivery Failure" + ] + step_job: DF.Link | None + step_title: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/site_migration_step/test_site_migration_step.py b/jcloud/jcloud/pagetype/site_migration_step/test_site_migration_step.py new file mode 100644 index 0000000..1606896 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_migration_step/test_site_migration_step.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestSiteMigrationStep(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/site_plan/__init__.py b/jcloud/jcloud/pagetype/site_plan/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_plan/plan.py b/jcloud/jcloud/pagetype/site_plan/plan.py new file mode 100644 index 0000000..50c2876 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan/plan.py @@ -0,0 +1,49 @@ +import jingrow +from jingrow.model.document import Document +from jingrow.utils import rounded + +from jcloud.utils import group_children_in_result + + +class Plan(Document): + def get_price_for_interval(self, interval, currency): + price_per_day = self.get_price_per_day(currency) + + if interval == "Daily": + return price_per_day + + if interval == "Monthly": + return rounded(price_per_day * 30) + + def get_price_per_day(self, currency): + price = self.price_cny if currency == "CNY" else self.price_usd + price_per_day = rounded(price / self.period, 2) + return price_per_day + + @property + def period(self): + return jingrow.utils.get_last_day(None).day + + @classmethod + def get_plans(cls, pagetype, fields=["*"], filters=None): + filters = filters or {} + fields.append("`tabHas Role`.role") + filters.update({"enabled": True}) + plans = jingrow.get_all( + pagetype, filters=filters, fields=fields, order_by="price_usd asc" + ) + plans = filter_by_roles(plans) + + return plans + + +def filter_by_roles(plans): + plans = group_children_in_result(plans, {"role": "roles"}) + + out = [] + for plan in plans: + if jingrow.utils.has_common(plan["roles"], jingrow.get_roles()): + plan.pop("roles", "") + out.append(plan) + + return out diff --git a/jcloud/jcloud/pagetype/site_plan/site_plan.js b/jcloud/jcloud/pagetype/site_plan/site_plan.js new file mode 100644 index 0000000..c18e5c3 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan/site_plan.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Site Plan", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/site_plan/site_plan.json b/jcloud/jcloud/pagetype/site_plan/site_plan.json new file mode 100644 index 0000000..461e5a0 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan/site_plan.json @@ -0,0 +1,288 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "Prompt", + "creation": "2022-01-28 20:07:37.055861", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "enabled", + "section_break_2", + "document_type", + "plan_title", + "interval", + "column_break_5", + "price_cny", + "price_usd", + "allow_downgrading_from_other_plan", + "features_section", + "cpu_time_per_day", + "max_database_usage", + "max_storage_usage", + "column_break_13", + "is_trial_plan", + "offsite_backups", + "private_benches", + "database_access", + "monitor_access", + "support_included", + "dedicated_server_plan", + "is_jingrow_plan", + "customization_for_bench_section", + "release_groups", + "allowed_apps", + "servers", + "cluster", + "instance_type", + "column_break_21", + "vcpu", + "memory", + "disk", + "roles_section", + "roles" + ], + "fields": [ + { + "default": "1", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "section_break_2", + "fieldtype": "Section Break" + }, + { + "fieldname": "document_type", + "fieldtype": "Link", + "label": "Document Type", + "options": "PageType", + "reqd": 1 + }, + { + "fieldname": "plan_title", + "fieldtype": "Data", + "label": "Plan Title" + }, + { + "fieldname": "interval", + "fieldtype": "Select", + "label": "Interval", + "options": "Daily\nMonthly\nAnnually" + }, + { + "fieldname": "column_break_5", + "fieldtype": "Column Break" + }, + { + "fieldname": "price_cny", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Price (CNY)", + "options": "CNY", + "reqd": 1 + }, + { + "fieldname": "price_usd", + "fieldtype": "Currency", + "in_list_view": 1, + "label": "Price (USD)", + "options": "USD", + "reqd": 1 + }, + { + "depends_on": "eval:pg.document_type == 'Site'", + "fieldname": "features_section", + "fieldtype": "Section Break", + "label": "Site Features" + }, + { + "fieldname": "cpu_time_per_day", + "fieldtype": "Float", + "in_list_view": 1, + "label": "CPU Time Per Day" + }, + { + "fieldname": "max_database_usage", + "fieldtype": "Int", + "label": "Max Database Usage (MiB)" + }, + { + "fieldname": "max_storage_usage", + "fieldtype": "Int", + "label": "Max Storage Usage (MiB)" + }, + { + "fieldname": "column_break_13", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "is_trial_plan", + "fieldtype": "Check", + "label": "Is Trial Plan" + }, + { + "default": "0", + "fieldname": "offsite_backups", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Offsite Backups" + }, + { + "default": "0", + "fieldname": "private_benches", + "fieldtype": "Check", + "label": "Private Benches" + }, + { + "default": "0", + "fieldname": "database_access", + "fieldtype": "Check", + "label": "Database Access" + }, + { + "default": "0", + "fieldname": "monitor_access", + "fieldtype": "Check", + "label": "Monitor Access" + }, + { + "default": "0", + "fieldname": "support_included", + "fieldtype": "Check", + "label": "Support Included" + }, + { + "default": "0", + "fieldname": "dedicated_server_plan", + "fieldtype": "Check", + "label": "Dedicated Server Plan" + }, + { + "default": "0", + "description": "Enterprise, Central, Jingrow Team etc", + "fieldname": "is_jingrow_plan", + "fieldtype": "Check", + "label": "Is Jingrow Plan" + }, + { + "fieldname": "servers", + "fieldtype": "Section Break", + "label": "Servers" + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster" + }, + { + "fieldname": "instance_type", + "fieldtype": "Data", + "label": "Instance Type" + }, + { + "fieldname": "column_break_21", + "fieldtype": "Column Break" + }, + { + "fieldname": "vcpu", + "fieldtype": "Int", + "label": "vCPU" + }, + { + "fieldname": "memory", + "fieldtype": "Int", + "label": "Memory" + }, + { + "fieldname": "disk", + "fieldtype": "Int", + "label": "Disk" + }, + { + "fieldname": "roles_section", + "fieldtype": "Section Break", + "label": "Roles" + }, + { + "fieldname": "roles", + "fieldtype": "Table", + "label": "Roles", + "options": "Has Role" + }, + { + "description": "Leave this table empty to schedule deployment on any release group", + "fieldname": "release_groups", + "fieldtype": "Table", + "label": "Release Groups", + "options": "Site Plan Release Group" + }, + { + "fieldname": "customization_for_bench_section", + "fieldtype": "Section Break", + "label": "Customization For Bench" + }, + { + "description": "Leave this table empty to allow any app to install for the site", + "fieldname": "allowed_apps", + "fieldtype": "Table", + "label": "Allowed Apps", + "options": "Site Plan Allowed App" + }, + { + "default": "1", + "fieldname": "allow_downgrading_from_other_plan", + "fieldtype": "Check", + "label": "Allow Downgrading From Other Plan" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-07-12 13:36:14.737846", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Plan", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1 + }, + { + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1 + } + ], + "quick_entry": 1, + "sort_field": "price_usd", + "sort_order": "ASC", + "states": [], + "title_field": "plan_title", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_plan/site_plan.py b/jcloud/jcloud/pagetype/site_plan/site_plan.py new file mode 100644 index 0000000..7657666 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan/site_plan.py @@ -0,0 +1,101 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow + +from jcloud.jcloud.pagetype.site_plan.plan import Plan + +UNLIMITED_PLANS = ["Unlimited", "Unlimited - Supported"] + + +class SitePlan(Plan): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.core.pagetype.has_role.has_role import HasRole + from jingrow.types import DF + + from jcloud.jcloud.pagetype.site_plan_allowed_app.site_plan_allowed_app import ( + SitePlanAllowedApp, + ) + from jcloud.jcloud.pagetype.site_plan_release_group.site_plan_release_group import ( + SitePlanReleaseGroup, + ) + + allow_downgrading_from_other_plan: DF.Check + allowed_apps: DF.Table[SitePlanAllowedApp] + cluster: DF.Link | None + cpu_time_per_day: DF.Float + database_access: DF.Check + dedicated_server_plan: DF.Check + disk: DF.Int + document_type: DF.Link + enabled: DF.Check + instance_type: DF.Data | None + interval: DF.Literal["Daily", "Monthly", "Annually"] + is_jingrow_plan: DF.Check + is_trial_plan: DF.Check + max_database_usage: DF.Int + max_storage_usage: DF.Int + memory: DF.Int + monitor_access: DF.Check + offsite_backups: DF.Check + plan_title: DF.Data | None + price_cny: DF.Currency + price_usd: DF.Currency + private_benches: DF.Check + release_groups: DF.Table[SitePlanReleaseGroup] + roles: DF.Table[HasRole] + support_included: DF.Check + vcpu: DF.Int + # end: auto-generated types + + dashboard_fields = ( + "name", + "plan_title", + "interval", + "document_type", + "document_name", + "price_cny", + "price_usd", + "period", + "cpu_time_per_day", + "max_database_usage", + "max_storage_usage", + "database_access", + "support_included", + "private_benches", + "monitor_access", + "is_trial_plan", + ) + + def get_pg(self, pg): + pg["price_per_day_cny"] = self.get_price_per_day("CNY") + pg["price_per_day_usd"] = self.get_price_per_day("USD") + return pg + + @classmethod + def get_ones_without_offsite_backups(cls) -> list[str]: + return jingrow.get_all("Site Plan", filters={"offsite_backups": False}, pluck="name") + + +def get_plan_config(name): + limits = jingrow.db.get_value( + "Site Plan", + name, + ["cpu_time_per_day", "max_database_usage", "max_storage_usage"], + as_dict=True, + ) + if limits and limits.get("cpu_time_per_day", 0) > 0: + return { + "rate_limit": {"limit": limits.cpu_time_per_day * 3600, "window": 86400}, + "plan_limit": { + "max_database_usage": limits.max_database_usage, + "max_storage_usage": limits.max_storage_usage, + }, + } + return {} diff --git a/jcloud/jcloud/pagetype/site_plan/test_site_plan.py b/jcloud/jcloud/pagetype/site_plan/test_site_plan.py new file mode 100644 index 0000000..ff2852e --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan/test_site_plan.py @@ -0,0 +1,90 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +from __future__ import annotations + +from datetime import date +from unittest.mock import patch + +import jingrow +from jingrow.model.naming import make_autoname +from jingrow.tests.utils import JingrowTestCase + + +def create_test_plan( + document_type: str, + price_usd: float = 10.0, + price_cny: float = 750.0, + cpu_time: int = 1, + plan_title: str | None = None, + plan_name: str | None = None, + allow_downgrading_from_other_plan: bool = True, + allowed_apps: list[str] | None = None, + release_groups: list[str] | None = None, + private_benches: bool = False, + is_trial_plan: bool = False, +): + """Create test Plan pg.""" + plan_name = plan_name or f"Test {document_type} plan {make_autoname('.#')}" + plan_title = plan_name + plan = jingrow.get_pg( + { + "pagetype": "Site Plan", + "document_type": "Site", + "name": plan_name, + "plan_title": plan_title, + "price_cny": price_cny, + "price_usd": price_usd, + "cpu_time_per_day": cpu_time, + "allow_downgrading_from_other_plan": allow_downgrading_from_other_plan, + "disk": 50, + "instance_type": "t2.micro", + "private_benches": private_benches, + "is_trial_plan": is_trial_plan, + } + ) + if allowed_apps: + for app in allowed_apps: + plan.append("allowed_apps", {"app": app}) + if release_groups: + for release_group in release_groups: + plan.append("release_groups", {"release_group": release_group}) + + plan.insert(ignore_if_duplicate=True) + plan.reload() + return plan + + +class TestSitePlan(JingrowTestCase): + def setUp(self): + self.plan = create_test_plan("Site") + + def tearDown(self): + jingrow.db.rollback() + + def test_period_int(self): + self.assertIsInstance(self.plan.period, int) + + def test_per_day_difference(self): + per_day_usd = self.plan.get_price_per_day("USD") + per_day_cny = self.plan.get_price_per_day("CNY") + self.assertIsInstance(per_day_cny, (int, float)) + self.assertIsInstance(per_day_usd, (int, float)) + self.assertNotEqual(per_day_cny, per_day_usd) + + def test_dynamic_period(self): + month_with_29_days = jingrow.utils.get_last_day(date(2020, 2, 3)) + month_with_30_days = jingrow.utils.get_last_day(date(1997, 4, 3)) + + with patch.object(jingrow.utils, "get_last_day", return_value=month_with_30_days): + self.assertEqual(self.plan.period, 30) + per_day_for_30_usd = self.plan.get_price_per_day("USD") + per_day_for_30_cny = self.plan.get_price_per_day("CNY") + + with patch.object(jingrow.utils, "get_last_day", return_value=month_with_29_days): + self.assertEqual(self.plan.period, 29) + per_day_for_29_usd = self.plan.get_price_per_day("USD") + per_day_for_29_cny = self.plan.get_price_per_day("CNY") + + self.assertNotEqual(per_day_for_29_usd, per_day_for_30_usd) + self.assertNotEqual(per_day_for_29_cny, per_day_for_30_cny) diff --git a/jcloud/jcloud/pagetype/site_plan_allowed_app/__init__.py b/jcloud/jcloud/pagetype/site_plan_allowed_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_plan_allowed_app/site_plan_allowed_app.json b/jcloud/jcloud/pagetype/site_plan_allowed_app/site_plan_allowed_app.json new file mode 100644 index 0000000..abfb51b --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan_allowed_app/site_plan_allowed_app.json @@ -0,0 +1,33 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-07-09 23:27:13.919845", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "app" + ], + "fields": [ + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App", + "options": "App", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-07-09 23:27:30.587794", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Plan Allowed App", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_plan_allowed_app/site_plan_allowed_app.py b/jcloud/jcloud/pagetype/site_plan_allowed_app/site_plan_allowed_app.py new file mode 100644 index 0000000..2a73d71 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan_allowed_app/site_plan_allowed_app.py @@ -0,0 +1,22 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SitePlanAllowedApp(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + app: DF.Link + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + pass diff --git a/jcloud/jcloud/pagetype/site_plan_change/__init__.py b/jcloud/jcloud/pagetype/site_plan_change/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_plan_change/site_plan_change.js b/jcloud/jcloud/pagetype/site_plan_change/site_plan_change.js new file mode 100644 index 0000000..35b0d96 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan_change/site_plan_change.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Plan Change', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/site_plan_change/site_plan_change.json b/jcloud/jcloud/pagetype/site_plan_change/site_plan_change.json new file mode 100644 index 0000000..fc32b1e --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan_change/site_plan_change.json @@ -0,0 +1,110 @@ +{ + "actions": [], + "creation": "2020-05-05 16:37:23.906627", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "team", + "from_plan", + "to_plan", + "column_break_5", + "type", + "timestamp" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Site", + "options": "Site", + "reqd": 1 + }, + { + "fetch_from": "site.team", + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team" + }, + { + "fieldname": "from_plan", + "fieldtype": "Link", + "label": "From Plan", + "options": "Site Plan" + }, + { + "fieldname": "to_plan", + "fieldtype": "Link", + "in_list_view": 1, + "label": "To Plan", + "options": "Site Plan", + "reqd": 1 + }, + { + "fieldname": "column_break_5", + "fieldtype": "Column Break" + }, + { + "fieldname": "type", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Type", + "options": "\nInitial Plan\nUpgrade\nDowngrade" + }, + { + "default": "Now", + "fieldname": "timestamp", + "fieldtype": "Datetime", + "label": "Timestamp" + } + ], + "links": [], + "modified": "2024-02-05 23:05:20.377096", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Plan Change", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin" + }, + { + "create": 1, + "role": "Jcloud Member" + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Site Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "site", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_plan_change/site_plan_change.py b/jcloud/jcloud/pagetype/site_plan_change/site_plan_change.py new file mode 100644 index 0000000..af00fce --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan_change/site_plan_change.py @@ -0,0 +1,101 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import jingrow +from jingrow import _ +from jingrow.model.document import Document + +from jcloud.utils.webhook import create_webhook_event + + +class SitePlanChange(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from_plan: DF.Link | None + site: DF.Link + team: DF.Link | None + timestamp: DF.Datetime | None + to_plan: DF.Link + type: DF.Literal["", "Initial Plan", "Upgrade", "Downgrade"] + # end: auto-generated types + + dashboard_fields = ("from_plan", "to_plan", "type", "site", "timestamp") + + def validate(self): + if not self.from_plan and self.to_plan: + self.type = "Initial Plan" + + if self.from_plan and not self.type: + from_plan_value = jingrow.db.get_value("Site Plan", self.from_plan, "price_usd") + to_plan_value = jingrow.db.get_value("Site Plan", self.to_plan, "price_usd") + self.type = "Downgrade" if from_plan_value > to_plan_value else "Upgrade" + + if ( + self.from_plan + and self.to_plan + and self.type == "Downgrade" + and not jingrow.db.get_value("Site Plan", self.to_plan, "allow_downgrading_from_other_plan") + ): + jingrow.throw(f"Sorry, you cannot downgrade to {self.to_plan} from {self.from_plan}") + + if self.type == "Initial Plan": + self.from_plan = "" + + def after_insert(self): + if self.team != "Administrator": + create_webhook_event("Site Plan Change", self, self.team) + + if self.type == "Initial Plan": + self.create_subscription() + return + + # move this code to Server Scripts + # if self.type == "Downgrade": + # last_plan_change = jingrow.get_last_pg( + # "Site Plan Change", filters={"site": self.site, "team": self.team} + # ) + # # check if last site plan change was made before 48 hours + # if last_plan_change.creation > jingrow.utils.add_days(None, -2): + # jingrow.throw("Cannot downgrade plan within 48 hours") + + self.change_subscription_plan() + + def create_subscription(self): + jingrow.get_pg( + pagetype="Subscription", + team=self.team, + document_type="Site", + document_name=self.site, + plan_type="Site Plan", + plan=self.to_plan, + ).insert() + + def change_subscription_plan(self): + site = jingrow.get_pg("Site", self.site) + subscription = site.subscription + if not subscription: + jingrow.throw(f"No subscription for site {site.name}") + + if self.from_plan and self.from_plan != subscription.plan: + jingrow.throw( + _("Site {0} is currently on {1} plan and not {2}").format( + site.name, subscription.plan, self.from_plan + ) + ) + + subscription.plan = self.to_plan + subscription.flags.updater_reference = { + "pagetype": self.pagetype, + "docname": self.name, + "label": _("via Site Plan Change"), + } + subscription.enabled = 1 + subscription.save() diff --git a/jcloud/jcloud/pagetype/site_plan_change/test_site_plan_change.py b/jcloud/jcloud/pagetype/site_plan_change/test_site_plan_change.py new file mode 100644 index 0000000..5179ff3 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan_change/test_site_plan_change.py @@ -0,0 +1,80 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +import unittest + +import jingrow + +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.site_plan.test_site_plan import create_test_plan + + +class TestSitePlanChange(unittest.TestCase): + def setUp(self): + self.tiny_plan = create_test_plan( + "Site", + plan_name="Tiny Plan", + allow_downgrading_from_other_plan=False, + price_usd=5.0, + price_cny=375.0, + ) + self.nano_plan = create_test_plan( + "Site", + plan_name="Nano Plan", + allow_downgrading_from_other_plan=True, + price_usd=7.0, + price_cny=525.0, + ) + self.unlimited_plan = create_test_plan( + "Site", + plan_name="Unlimited Plan", + allow_downgrading_from_other_plan=True, + price_usd=10.0, + price_cny=750.0, + ) + self.site = create_test_site(subdomain="testsite") + + def tearDown(self): + jingrow.db.rollback() + + def test_raise_error_while_downgrading_to_plan_in_which__allow_downgrading_from_other_plan__flag_is_disabled( + self, + ): + # Initially Set `Unlimited Plan` to site + self.site._create_initial_site_plan_change(self.unlimited_plan.name) + self.site.reload() + self.assertEqual(self.site.plan, self.unlimited_plan.name) + # Try to downgrade to `Tiny Plan` from `Unlimited Plan` + with self.assertRaises(jingrow.exceptions.ValidationError) as context: + jingrow.get_pg( + { + "pagetype": "Site Plan Change", + "site": self.site.name, + "from_plan": self.unlimited_plan.name, + "to_plan": self.tiny_plan.name, + } + ).insert(ignore_permissions=True) + + self.assertTrue("you cannot downgrade" in str(context.exception)) + + def test_allowed_to_downgrade_while__allow_downgrading_from_other_plan__flag_is_enabled( + self, + ): + # Initially Set `Unlimited Plan` to site + self.site._create_initial_site_plan_change(self.unlimited_plan.name) + self.site.reload() + self.assertEqual(self.site.plan, self.unlimited_plan.name) + # Try to downgrade to `Nano Plan` from `Unlimited Plan` + jingrow.get_pg( + { + "pagetype": "Site Plan Change", + "site": self.site.name, + "from_plan": self.unlimited_plan.name, + "to_plan": self.nano_plan.name, + } + ).insert(ignore_permissions=True) + self.assertEqual( + jingrow.db.get_value("Site", self.site.name, "plan"), self.nano_plan.name + ) diff --git a/jcloud/jcloud/pagetype/site_plan_release_group/__init__.py b/jcloud/jcloud/pagetype/site_plan_release_group/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_plan_release_group/site_plan_release_group.json b/jcloud/jcloud/pagetype/site_plan_release_group/site_plan_release_group.json new file mode 100644 index 0000000..97c1e7b --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan_release_group/site_plan_release_group.json @@ -0,0 +1,33 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-07-09 12:42:51.268467", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "release_group" + ], + "fields": [ + { + "fieldname": "release_group", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Release Group", + "options": "Release Group", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-07-09 12:43:12.168927", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Plan Release Group", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_plan_release_group/site_plan_release_group.py b/jcloud/jcloud/pagetype/site_plan_release_group/site_plan_release_group.py new file mode 100644 index 0000000..442af55 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_plan_release_group/site_plan_release_group.py @@ -0,0 +1,22 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SitePlanReleaseGroup(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + release_group: DF.Link + # end: auto-generated types + pass diff --git a/jcloud/jcloud/pagetype/site_replication/__init__.py b/jcloud/jcloud/pagetype/site_replication/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_replication/site_replication.js b/jcloud/jcloud/pagetype/site_replication/site_replication.js new file mode 100644 index 0000000..ebe0c0d --- /dev/null +++ b/jcloud/jcloud/pagetype/site_replication/site_replication.js @@ -0,0 +1,14 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Replication', { + setup: function (frm) { + frm.set_query('site', function () { + return { + filters: { + status: 'Active', + }, + }; + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/site_replication/site_replication.json b/jcloud/jcloud/pagetype/site_replication/site_replication.json new file mode 100644 index 0000000..4d1978d --- /dev/null +++ b/jcloud/jcloud/pagetype/site_replication/site_replication.json @@ -0,0 +1,102 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "format: SR-{#####}", + "creation": "2022-12-13 15:28:46.433511", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "subdomain", + "bench", + "server", + "column_break_4", + "status", + "release_group", + "new_site" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "label": "Site", + "options": "Site" + }, + { + "fetch_from": "site.bench", + "fieldname": "bench", + "fieldtype": "Link", + "label": "Bench", + "options": "Bench", + "read_only": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "default": "Not Started", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Not Started\nRunning\nSuccess\nFailure" + }, + { + "fetch_from": "site.server", + "fieldname": "server", + "fieldtype": "Link", + "label": "Server", + "options": "Server", + "read_only": 1 + }, + { + "fetch_from": "site.group", + "fieldname": "release_group", + "fieldtype": "Link", + "label": "Release Group", + "options": "Release Group", + "read_only": 1 + }, + { + "fieldname": "new_site", + "fieldtype": "Link", + "label": "New Site", + "options": "Site", + "read_only": 1 + }, + { + "fieldname": "subdomain", + "fieldtype": "Data", + "label": "New Subdomain" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-12-14 17:23:02.252242", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Replication", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "new_site" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_replication/site_replication.py b/jcloud/jcloud/pagetype/site_replication/site_replication.py new file mode 100644 index 0000000..ec681b3 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_replication/site_replication.py @@ -0,0 +1,93 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +from typing import List + +import jingrow +from jingrow.model.document import Document + +from jcloud.api.site import _new +from jcloud.jcloud.pagetype.site.site import prepare_site + + +class SiteReplication(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + bench: DF.Link | None + new_site: DF.Link | None + release_group: DF.Link | None + server: DF.Link | None + site: DF.Link | None + status: DF.Literal["Not Started", "Running", "Success", "Failure"] + subdomain: DF.Data | None + # end: auto-generated types + + pagetype = "Site Replication" + + def validate(self): + self.validate_duplicate() + self.validate_site_name() + + def validate_duplicate(self): + # check for already running site replication + site_reps = jingrow.get_all( + "Site Replication", + dict( + site=self.site, subdomain=self.subdomain, status="Running", name=("!=", self.name) + ), + pluck="name", + ) + if site_reps: + jingrow.throw(f"Site Replication for {self.site} is already running.") + + def validate_site_name(self): + # check if there is an non-archived site with same name + domain = jingrow.get_pg("Site", self.site).domain + new_sitename = self.subdomain + "." + domain + sites = jingrow.get_all( + "Site", dict(status=["!=", "Archived"], name=new_sitename), pluck="name" + ) + + if sites: + jingrow.throw( + f"Site {self.new_site} already exists. Please choose another subdomain." + ) + + def after_insert(self): + self.status = "Running" + site_dict = prepare_site(self.site, self.subdomain) + try: + site_job = _new(site_dict, self.server) + self.new_site = site_job.get("site") + self.save() + except Exception: + jingrow.log_error("Site Replication Error") + + @classmethod + def get_all_running_site_replications(cls) -> List[Document]: + replications = jingrow.get_all(cls.pagetype, dict(status="Running"), pluck="name") + return cls.get_docs(replications) + + @classmethod + def get_docs(cls, names: List[str]) -> List[Document]: + return [jingrow.get_pg(cls.pagetype, name) for name in names] + + +def update_from_site(): + ongoing_replications = SiteReplication.get_all_running_site_replications() + for replication in ongoing_replications: + site_pg = jingrow.get_pg("Site", replication.new_site) + site_status = { + "Broken": "Failure", + "Active": "Success", + "Pending": "Running", + "Installing": "Running", + } + replication.status = site_status[site_pg.status] + replication.save() diff --git a/jcloud/jcloud/pagetype/site_replication/test_site_replication.py b/jcloud/jcloud/pagetype/site_replication/test_site_replication.py new file mode 100644 index 0000000..2a08857 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_replication/test_site_replication.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestSiteReplication(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/site_update/__init__.py b/jcloud/jcloud/pagetype/site_update/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_update/scheduled_auto_updates.py b/jcloud/jcloud/pagetype/site_update/scheduled_auto_updates.py new file mode 100644 index 0000000..0e3ac2c --- /dev/null +++ b/jcloud/jcloud/pagetype/site_update/scheduled_auto_updates.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +from calendar import monthrange + +import jingrow +from jingrow.utils import get_datetime, get_time, now_datetime + +from jcloud.jcloud.pagetype.site.site import Site +from jcloud.jcloud.pagetype.site_update.site_update import benches_with_available_update +from jcloud.utils import log_error + + +def trigger(): + """Will be triggered every 30 minutes""" + # Get all ["Active", "Inactive"] sites + # with auto updates scheduled + sites_with_scheduled_updates = jingrow.get_all( + "Site", + filters={ + "status": ("in", ("Active", "Inactive")), + "only_update_at_specified_time": True, + "skip_auto_updates": False, + "bench": ( + "in", + benches_with_available_update(), # An update should be available for this site + ), + }, + fields=[ + "name", + "auto_update_last_triggered_on", + "update_trigger_time", + "update_trigger_frequency", + "update_on_weekday", + "update_end_of_month", + "update_on_day_of_month", + ], + ) + + trigger_for_sites = list(filter(should_update_trigger, sites_with_scheduled_updates)) + + for site in trigger_for_sites: + auto_update_log = jingrow.get_pg( + { + "pagetype": "Scheduled Auto Update Log", + "document_type": "Site", + "document_name": site.name, + "status": "Success", + } + ) + + # Set the frequency details in log + set_schedule_details(auto_update_log, site) + + try: + site_pg: Site = jingrow.get_pg("Site", site.name) + site_pg.schedule_update() + site_pg.auto_update_last_triggered_on = now_datetime() + site_pg.save() + except Exception: + traceback = "
" + jingrow.get_traceback() + "
" + + # Update log pg + auto_update_log.status = "Failed" + auto_update_log.error = traceback + + log_error("Scheduled Auto Update Failed", site=site, traceback=traceback) + finally: + auto_update_log.insert(ignore_permissions=True) + + +def should_update_trigger(pg): + """ + Returns `True` if the pg update should be triggered. + """ + # Return based on the set frequency + if pg.update_trigger_frequency == "Daily": + return should_update_trigger_for_daily(pg) + elif pg.update_trigger_frequency == "Weekly": + return should_update_trigger_for_weekly(pg) + elif pg.update_trigger_frequency == "Monthly": + return should_update_trigger_for_monthly(pg) + + return False + + +def should_update_trigger_for_daily(pg, current_datetime=None): + """Takes `current_datetime` to make testing easier.""" + current_datetime = current_datetime or get_datetime() + auto_update_last_triggered_on = pg.auto_update_last_triggered_on + + if ( + auto_update_last_triggered_on + and auto_update_last_triggered_on.date() == current_datetime.date() + and get_time(pg.update_trigger_time) <= get_time(auto_update_last_triggered_on) + ): + return False + elif get_time(pg.update_trigger_time) <= get_time(current_datetime): + return True + + return False + + +def should_update_trigger_for_weekly(pg, current_datetime=None): + """Takes `current_datetime` to make testing easier.""" + current_datetime = current_datetime or get_datetime() + if pg.update_on_weekday != current_datetime.strftime("%A"): + return False + + auto_update_last_triggered_on = pg.auto_update_last_triggered_on + + # Today is `update_on_weekday` + if ( + auto_update_last_triggered_on + and auto_update_last_triggered_on.date() == current_datetime.date() + and get_time(pg.update_trigger_time) <= get_time(auto_update_last_triggered_on) + ): + return False + + if get_time(pg.update_trigger_time) <= get_time(current_datetime): + return True + + return False + + +def should_update_trigger_for_monthly(pg, current_datetime=None): + """Takes `current_datetime` to make testing easier.""" + current_datetime = current_datetime or get_datetime() + if pg.update_end_of_month: + on_day_of_month = get_last_day_of_month(current_datetime.year, current_datetime.month) + else: + on_day_of_month = pg.update_on_day_of_month + + if on_day_of_month != current_datetime.day: + return False + + auto_update_last_triggered_on = pg.auto_update_last_triggered_on + + if ( + auto_update_last_triggered_on + and auto_update_last_triggered_on.date() == current_datetime.date() + and get_time(pg.update_trigger_time) <= get_time(auto_update_last_triggered_on) + ): + return False + + if get_time(pg.update_trigger_time) <= get_time(current_datetime): + return True + + return False + + +def get_last_day_of_month(year, month): + return monthrange(year, month)[1] + + +def set_schedule_details(update_log_pg, pg): + update_log_pg.was_scheduled_for_frequency = pg.update_trigger_frequency + update_log_pg.was_scheduled_for_time = pg.update_trigger_time + + if pg.update_trigger_frequency == "Weekly": + update_log_pg.was_scheduled_for_day = pg.update_on_weekday + elif pg.update_trigger_frequency == "Monthly": + update_log_pg.was_scheduled_for_month_day = str(pg.update_on_day_of_month) + update_log_pg.was_scheduled_for_month_end = pg.update_end_of_month diff --git a/jcloud/jcloud/pagetype/site_update/site_update.js b/jcloud/jcloud/pagetype/site_update/site_update.js new file mode 100644 index 0000000..d3f1f74 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_update/site_update.js @@ -0,0 +1,86 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Update', { + onload: function (frm) { + frm.set_query('destination_bench', function () { + return { + filters: { + status: 'Active', + server: frm.pg.server, + }, + }; + }); + }, + + refresh: function (frm) { + // Disable save button + frm.disable_save(); + + // Add link + frm.add_web_link( + `/dashboard/sites/${frm.pg.site}/updates/${frm.pg.name}`, + __('Visit Dashboard'), + ); + + // Add custom buttons + [ + [ + __('Trigger Recovery Job'), + 'trigger_recovery_job', + !frm.pg.recover_job, + ], + [__('Start'), 'start', ['Scheduled', 'Failure'].includes(frm.pg.status)], + [ + __('Cause of Failure is Resolved'), + 'set_cause_of_failure_is_resolved', + !frm.pg.cause_of_failure_is_resolved, + ], + ].forEach(([label, method, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()} this site update?`, + () => frm.call(method).then((r) => frm.refresh()), + ); + }, + __('Actions'), + ); + } + }); + + // Allow to change status + frm.add_custom_button( + __('Change Status'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Change Status'), + fields: [ + { + fieldtype: 'Select', + label: __('Status'), + fieldname: 'status', + options: ['Success', 'Recovered', 'Failure', 'Fatal'], + }, + ], + }); + + dialog.set_primary_action(__('Change Status'), (args) => { + frm + .call('set_status', { + status: args.status, + }) + .then((r) => { + dialog.hide(); + frm.reload_pg(); + }); + }); + + dialog.show(); + }, + __('Actions'), + ); + }, +}); diff --git a/jcloud/jcloud/pagetype/site_update/site_update.json b/jcloud/jcloud/pagetype/site_update/site_update.json new file mode 100644 index 0000000..831de7a --- /dev/null +++ b/jcloud/jcloud/pagetype/site_update/site_update.json @@ -0,0 +1,363 @@ +{ + "actions": [], + "creation": "2022-01-28 20:07:33.955528", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "server", + "source_bench", + "source_candidate", + "group", + "team", + "column_break_4", + "status", + "destination_bench", + "destination_candidate", + "destination_group", + "scheduled_time", + "section_break_8", + "difference", + "difference_deploy_type", + "deploy_type", + "backup_type", + "column_break_14", + "site_backup", + "skipped_backups", + "skipped_failing_patches", + "section_break_luvm", + "deactivate_site_job", + "update_job", + "activate_site_job", + "column_break_rcyp", + "recover_job", + "cause_of_failure_is_resolved", + "physical_backup_restoration", + "section_break_gmrz", + "update_start", + "column_break_jtqs", + "update_end", + "column_break_ellx", + "update_duration", + "section_break_tpap", + "touched_tables" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "label": "Site", + "options": "Site" + }, + { + "fetch_from": "site.bench", + "fieldname": "source_bench", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Source Bench", + "options": "Bench" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break", + "hide_days": 1, + "hide_seconds": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "hide_days": 1, + "hide_seconds": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "no_copy": 1, + "options": "Pending\nRunning\nSuccess\nFailure\nRecovering\nRecovered\nFatal\nScheduled" + }, + { + "fieldname": "destination_bench", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "label": "Destination Bench", + "options": "Bench" + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break", + "hide_days": 1, + "hide_seconds": 1 + }, + { + "fieldname": "deploy_type", + "fieldtype": "Select", + "hide_days": 1, + "hide_seconds": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Deploy Type", + "no_copy": 1, + "options": "\nPull\nMigrate" + }, + { + "fetch_from": "source_bench.candidate", + "fieldname": "source_candidate", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "label": "Source Deploy Candidate", + "no_copy": 1, + "options": "Deploy Candidate" + }, + { + "fetch_from": "destination_bench.candidate", + "fieldname": "destination_candidate", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "label": "Destination Deploy Candidate", + "no_copy": 1, + "options": "Deploy Candidate" + }, + { + "fieldname": "difference", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "label": "Deploy Candidate Difference", + "options": "Deploy Candidate Difference" + }, + { + "fetch_from": "source_bench.group", + "fieldname": "group", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "label": "Source Group", + "no_copy": 1, + "options": "Release Group" + }, + { + "fetch_from": "site.server", + "fieldname": "server", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Server", + "options": "Server" + }, + { + "fieldname": "update_job", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "label": "Update Job", + "no_copy": 1, + "options": "Agent Job", + "read_only": 1, + "search_index": 1 + }, + { + "default": "0", + "fieldname": "cause_of_failure_is_resolved", + "fieldtype": "Check", + "hide_days": 1, + "hide_seconds": 1, + "label": "Cause of Failure is Resolved", + "no_copy": 1 + }, + { + "fieldname": "column_break_14", + "fieldtype": "Column Break", + "hide_days": 1, + "hide_seconds": 1 + }, + { + "fieldname": "recover_job", + "fieldtype": "Link", + "hide_days": 1, + "hide_seconds": 1, + "label": "Recover Job", + "no_copy": 1, + "options": "Agent Job", + "read_only": 1, + "search_index": 1 + }, + { + "fetch_from": "difference.deploy_type", + "fieldname": "difference_deploy_type", + "fieldtype": "Select", + "label": "Difference Deploy Type", + "options": "\nPull\nMigrate" + }, + { + "default": "0", + "fieldname": "skipped_failing_patches", + "fieldtype": "Check", + "label": "Skipped Failing Patches", + "no_copy": 1, + "read_only": 1 + }, + { + "fetch_from": "destination_bench.group", + "fieldname": "destination_group", + "fieldtype": "Link", + "label": "Destination Group", + "no_copy": 1, + "options": "Release Group" + }, + { + "default": "0", + "fieldname": "skipped_backups", + "fieldtype": "Check", + "label": "Skipped Backups", + "no_copy": 1, + "read_only": 1 + }, + { + "fieldname": "scheduled_time", + "fieldtype": "Datetime", + "label": "Scheduled Time" + }, + { + "fetch_from": "site.team", + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "default": "Logical", + "fieldname": "backup_type", + "fieldtype": "Select", + "label": "Backup Type", + "options": "Logical\nPhysical", + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.backup_type == \"Physical\"", + "fieldname": "site_backup", + "fieldtype": "Link", + "label": "Site Backup", + "options": "Site Backup", + "read_only": 1, + "search_index": 1 + }, + { + "fieldname": "activate_site_job", + "fieldtype": "Link", + "label": "Activate Site Job", + "options": "Agent Job", + "read_only": 1, + "search_index": 1 + }, + { + "fieldname": "deactivate_site_job", + "fieldtype": "Link", + "label": "Deactivate Site Job", + "options": "Agent Job", + "read_only": 1, + "search_index": 1 + }, + { + "fieldname": "section_break_luvm", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_rcyp", + "fieldtype": "Column Break" + }, + { + "fieldname": "physical_backup_restoration", + "fieldtype": "Link", + "label": "Physical Backup Restoration", + "options": "Physical Backup Restoration", + "read_only": 1, + "search_index": 1 + }, + { + "fieldname": "section_break_tpap", + "fieldtype": "Section Break" + }, + { + "fieldname": "touched_tables", + "fieldtype": "Code", + "label": "Touched Tables", + "read_only": 1 + }, + { + "fieldname": "section_break_gmrz", + "fieldtype": "Section Break" + }, + { + "fieldname": "update_start", + "fieldtype": "Datetime", + "label": "Update Start", + "read_only": 1 + }, + { + "fieldname": "column_break_jtqs", + "fieldtype": "Column Break" + }, + { + "fieldname": "update_end", + "fieldtype": "Datetime", + "label": "Update End", + "read_only": 1 + }, + { + "fieldname": "column_break_ellx", + "fieldtype": "Column Break" + }, + { + "fieldname": "update_duration", + "fieldtype": "Duration", + "label": "Update Duration", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-01-28 14:12:55.667576", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Update", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "site", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_update/site_update.py b/jcloud/jcloud/pagetype/site_update/site_update.py new file mode 100644 index 0000000..7f640c7 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_update/site_update.py @@ -0,0 +1,915 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json +import random +from datetime import datetime +from typing import TYPE_CHECKING, ClassVar + +import jingrow +import jingrow.utils +import pytz +from jingrow.core.utils import find +from jingrow.model.document import Document +from jingrow.utils import convert_utc_to_system_timezone +from jingrow.utils.caching import site_cache +from jingrow.utils.data import cint + +from jcloud.agent import Agent +from jcloud.api.client import dashboard_whitelist +from jcloud.jcloud.pagetype.physical_backup_restoration.physical_backup_restoration import ( + get_physical_backup_restoration_steps, +) +from jcloud.utils import log_error + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob + from jcloud.jcloud.pagetype.physical_backup_restoration.physical_backup_restoration import ( + PhysicalBackupRestoration, + ) + from jcloud.jcloud.pagetype.site.site import Site + + +class SiteUpdate(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + activate_site_job: DF.Link | None + backup_type: DF.Literal["Logical", "Physical"] + cause_of_failure_is_resolved: DF.Check + deactivate_site_job: DF.Link | None + deploy_type: DF.Literal["", "Pull", "Migrate"] + destination_bench: DF.Link | None + destination_candidate: DF.Link | None + destination_group: DF.Link | None + difference: DF.Link | None + difference_deploy_type: DF.Literal["", "Pull", "Migrate"] + group: DF.Link | None + physical_backup_restoration: DF.Link | None + recover_job: DF.Link | None + scheduled_time: DF.Datetime | None + server: DF.Link | None + site: DF.Link | None + site_backup: DF.Link | None + skipped_backups: DF.Check + skipped_failing_patches: DF.Check + source_bench: DF.Link | None + source_candidate: DF.Link | None + status: DF.Literal[ + "Pending", "Running", "Success", "Failure", "Recovering", "Recovered", "Fatal", "Scheduled" + ] + team: DF.Link | None + touched_tables: DF.Code | None + update_duration: DF.Duration | None + update_end: DF.Datetime | None + update_job: DF.Link | None + update_start: DF.Datetime | None + # end: auto-generated types + + dashboard_fields: ClassVar = [ + "status", + "site", + "destination_bench", + "source_bench", + "deploy_type", + "difference", + "scheduled_time", + "creation", + "skipped_backups", + "skipped_failing_patches", + "backup_type", + "physical_backup_restoration", + "activate_site_job", + "deactivate_site_job", + "update_job", + "recover_job", + "update_start", + "update_end", + "update_duration", + ] + + @staticmethod + def get_list_query(query): + results = query.run(as_dict=True) + for result in results: + if result.updated_on: + result.updated_on = convert_utc_to_system_timezone(result.updated_on).replace(tzinfo=None) + + return results + + def get_pg(self, pg): + pg.steps = self.get_steps() + return pg + + def validate(self): + if not self.is_new(): + return + + # Assume same-group migration if destination_group isn't set + if not self.destination_group: + self.destination_group = self.group + + if self.group == self.destination_group: + differences = jingrow.get_all( + "Deploy Candidate Difference", + fields=["name", "destination", "deploy_type"], + filters={"group": self.group, "source": self.source_candidate}, + ) + if not differences: + jingrow.throw("Could not find suitable Destination Bench", jingrow.ValidationError) + + self.validate_destination_bench(differences) + self.validate_deploy_candidate_difference(differences) + else: + self.validate_destination_bench([]) + # Forcefully migrate since we can't compute deploy_type reasonably + self.deploy_type = "Migrate" + + self.validate_apps() + self.validate_pending_updates() + self.validate_past_failed_updates() + self.set_physical_backup_mode_if_eligible() + + def validate_destination_bench(self, differences): + if not self.destination_bench: + candidates = [d.destination for d in differences] + try: + filters = { + "server": self.server, + "status": "Active", + "group": self.destination_group, + } + if differences: + filters["candidate"] = ("in", candidates) + filters["name"] = ("!=", self.source_bench) + + destination_bench = jingrow.get_all( + "Bench", fields=["name", "candidate"], filters=filters, order_by="creation desc" + )[0] + self.destination_bench = destination_bench.name + self.destination_candidate = destination_bench.candidate + except Exception: + jingrow.throw("Could not find suitable Destination Bench", jingrow.ValidationError) + + def validate_deploy_candidate_difference(self, differences): + try: + difference = find(differences, lambda x: x.destination == self.destination_candidate) + self.difference = difference.name + self.deploy_type = "Pull" + difference_pg = jingrow.get_pg("Deploy Candidate Difference", self.difference) + site_pg: "Site" = jingrow.get_pg("Site", self.site) + for site_app in site_pg.apps: + difference_app = find(difference_pg.apps, lambda x: x.app == site_app.app) + if difference_app and difference_app.deploy_type == "Migrate": + self.deploy_type = "Migrate" + + except Exception: + jingrow.throw( + f"Could not find Deploy Candidate Difference from {self.source_bench}" + f" to {self.destination_bench}", + jingrow.ValidationError, + ) + + def validate_pending_updates(self): + if self.has_pending_updates(): + jingrow.throw("An update is already pending for this site", jingrow.ValidationError) + + @property + def triggered_by_user(self): + return jingrow.session.user != "Administrator" + + @property + def use_physical_backup(self): + return self.backup_type == "Physical" and not self.skipped_backups + + def validate_past_failed_updates(self): + if getattr(self, "ignore_past_failures", False): + return + + if self.triggered_by_user: + return # Allow user to trigger update for same source and destination + + if not self.skipped_failing_patches and self.have_past_updates_failed(): + jingrow.throw( + f"Update from Source Candidate {self.source_candidate} to Destination" + f" Candidate {self.destination_candidate} has failed in the past.", + jingrow.ValidationError, + ) + + def validate_apps(self): + site_apps = [app.app for app in jingrow.get_pg("Site", self.site).apps] + bench_apps = [app.app for app in jingrow.get_pg("Bench", self.destination_bench).apps] + + if diff := set(site_apps) - set(bench_apps): + jingrow.throw( + f"Destination Bench {self.destination_bench} doesn't have some of the apps installed on {self.site}: {', '.join(diff)}", + jingrow.ValidationError, + ) + + def before_insert(self): + self.backup_type = "Logical" + site: "Site" = jingrow.get_cached_pg("Site", self.site) + site.check_move_scheduled() + + def after_insert(self): + if not self.scheduled_time: + self.start() + + def set_physical_backup_mode_if_eligible(self): # noqa: C901 + if self.skipped_backups: + return + + if self.deploy_type != "Migrate": + return + + # Check if physical backup is disabled globally from Jcloud Settings + if jingrow.utils.cint(jingrow.get_value("Jcloud Settings", None, "disable_physical_backup")): + return + + database_server = jingrow.get_value("Server", self.server, "database_server") + if not database_server: + # It might be the case of configured RDS server and no self hosted database server + return + + # Check if physical backup is enabled on the database server + enable_physical_backup = jingrow.get_value( + "Database Server", database_server, "enable_physical_backup" + ) + if not enable_physical_backup: + return + + # Sanity check - Provider should be AWS EC2 + provider = jingrow.get_value("Database Server", database_server, "provider") + if provider != "AWS EC2": + return + + # Check for last logical backup + last_logical_site_backups = jingrow.db.get_list( + "Site Backup", + filters={"site": self.site, "physical": False}, + pluck="database_size", + limit=1, + order_by="creation desc", + ignore_permissions=True, + ) + db_backup_size = 0 + if len(last_logical_site_backups) > 0: + db_backup_size = cint(last_logical_site_backups[0]) + + # If last logical backup size is greater than 250MB and less than 1GB + # Then only take physical backup + if db_backup_size > 262144000 and db_backup_size < 1073741824: + self.backup_type = "Physical" + + @dashboard_whitelist() + def start(self): + self.status = "Pending" + self.update_start = jingrow.utils.now() + self.save() + site: "Site" = jingrow.get_cached_pg("Site", self.site) + site.ready_for_move() + if self.use_physical_backup: + self.deactivate_site() + else: + self.create_update_site_agent_request() + + def get_before_migrate_scripts(self, rollback=False): + site_apps = [app.app for app in jingrow.get_pg("Site", self.site).apps] + + script_field = "before_migrate_script" + if rollback: + script_field = "rollback_script" + + scripts = {} + for app_rename in jingrow.get_all( + "App Rename", {"new_name": ["in", site_apps]}, ["old_name", "new_name", script_field] + ): + scripts[app_rename.old_name] = app_rename.get(script_field) + + return scripts + + @property + def is_destination_above_v12(self): + version = jingrow.get_cached_value("Release Group", self.destination_group, "version") + return jingrow.get_cached_value("Jingrow Version", version, "number") > 12 + + def create_update_site_agent_request(self): + agent = Agent(self.server) + site = jingrow.get_pg("Site", self.site) + job = agent.update_site( + site, + self.destination_bench, + self.deploy_type, + skip_failing_patches=self.skipped_failing_patches, + skip_backups=self.skipped_backups, # In physical backup also take logical backup for failover case + before_migrate_scripts=self.get_before_migrate_scripts(), + skip_search_index=self.is_destination_above_v12, + ) + self.set_update_job_value(job) + + def set_update_job_value(self, job): + jingrow.db.set_value("Site Update", self.name, "update_job", job.name) + site_activity = jingrow.db.get_value( + "Site Activity", + { + "site": self.site, + "action": "Update", + "job": ("is", "not set"), + }, + order_by="creation desc", + ) + if site_activity: + jingrow.db.set_value("Site Activity", site_activity, "job", job.name) + + def activate_site(self): + agent = Agent(self.server) + job = agent.activate_site( + jingrow.get_pg("Site", self.site), reference_pagetype="Site Update", reference_name=self.name + ) + jingrow.db.set_value("Site Update", self.name, "activate_site_job", job.name) + + def deactivate_site(self): + agent = Agent(self.server) + job = agent.deactivate_site( + jingrow.get_pg("Site", self.site), reference_pagetype="Site Update", reference_name=self.name + ) + jingrow.db.set_value("Site Update", self.name, "deactivate_site_job", job.name) + + def create_physical_backup(self): + site = jingrow.get_pg("Site", self.site) + jingrow.db.set_value("Site Update", self.name, "site_backup", site.physical_backup().name) + + def have_past_updates_failed(self): + return jingrow.db.exists( + "Site Update", + { + "site": self.site, + "source_candidate": self.source_candidate, + "destination_candidate": self.destination_candidate, + "cause_of_failure_is_resolved": False, + }, + ) + + def has_pending_updates(self): + return jingrow.db.exists( + "Site Update", + { + "site": self.site, + "status": ("in", ("Pending", "Running", "Failure", "Scheduled")), + }, + ) + + def is_workload_diff_high(self) -> bool: + site_plan = jingrow.get_value("Site", self.site, "plan") + cpu = jingrow.get_value("Site Plan", site_plan, "cpu_time_per_day") or 0 # if plan not set, assume 0 + + THRESHOLD = 8 # USD 100 site equivalent. (Since workload is based off of CPU) + + workload_diff_high = cpu >= THRESHOLD + + if not workload_diff_high: + source_bench = jingrow.get_pg("Bench", self.source_bench) + dest_bench = jingrow.get_pg("Bench", self.destination_bench) + workload_diff_high = (dest_bench.workload - source_bench.workload) > THRESHOLD + + return workload_diff_high + + def reallocate_workers(self): + """ + Reallocate workers on source and destination benches + + Do it for private benches only now as there'll be too many worker updates for public benches + """ + group = jingrow.get_pg("Release Group", self.destination_group) + + if group.public or group.central_bench or not self.is_workload_diff_high(): + return + + jingrow.enqueue_pg( + "Server", + self.server, + method="auto_scale_workers", + job_id=f"auto_scale_workers:{self.server}", + deduplicate=True, + enqueue_after_commit=True, + at_front=True, + ) + + @property + def touched_tables_list(self): + try: + return json.loads(self.touched_tables) + except Exception: + return [] + + @jingrow.whitelist() + def trigger_recovery_job(self): # noqa: C901 + if self.recover_job: + return + agent = Agent(self.server) + site: "Site" = jingrow.get_pg("Site", self.site) + job = None + if site.bench == self.destination_bench: + # The site is already on destination bench + update_status(self.name, "Recovering") + + # If physical backup is enabled, we need to first perform physical backup restoration + if self.use_physical_backup and not self.physical_backup_restoration: + # Perform Physical Backup Restoration if not already done + pg: PhysicalBackupRestoration = jingrow.get_pg( + { + "pagetype": "Physical Backup Restoration", + "site": self.site, + "status": "Pending", + "site_backup": self.site_backup, + "source_database": site.database_name, + "destination_database": site.database_name, + "destination_server": jingrow.get_value("Server", site.server, "database_server"), + "restore_specific_tables": len(self.touched_tables_list) > 0, + "tables_to_restore": json.dumps(self.touched_tables_list), + } + ) + pg.insert(ignore_permissions=True) + jingrow.db.set_value(self.pagetype, self.name, "physical_backup_restoration", pg.name) + pg.execute() + # After physical backup restoration, that will trigger recovery job again + # via site_update.process_physical_backup_restoration_status_update(...) method + return + + restore_touched_tables = not self.skipped_backups + if not self.skipped_backups and self.physical_backup_restoration: + physical_backup_restoration_status = jingrow.get_value( + "Physical Backup Restoration", self.physical_backup_restoration, "status" + ) + if physical_backup_restoration_status == "Success": + restore_touched_tables = False + elif physical_backup_restoration_status == "Failure": + restore_touched_tables = True + else: + # just to be safe + jingrow.throw("Physical Backup Restoration is still in progress") + + # Attempt to move site to source bench + + # Disable maintenance mode for active sites + activate = site.status_before_update == "Active" + job = agent.update_site_recover_move( + site, + self.source_bench, + self.deploy_type, + activate, + rollback_scripts=self.get_before_migrate_scripts(rollback=True), + restore_touched_tables=restore_touched_tables, + ) + else: + # Site is already on the source bench + if site.status_before_update == "Active": + # Disable maintenance mode for active sites + job = agent.update_site_recover(site) + else: + # Site is already on source bench and maintenance mode is on + # No need to do anything + site.reset_previous_status() + if job: + jingrow.db.set_value("Site Update", self.name, "recover_job", job.name) + + def delete_backup_snapshot(self): + if self.site_backup: + snapshot = jingrow.get_value("Site Backup", self.site_backup, "database_snapshot") + if snapshot: + jingrow.get_pg("Virtual Disk Snapshot", snapshot).delete_snapshot() + + @dashboard_whitelist() + def get_steps(self): + """ + { + "title": "Step Name", + "status": "Success", + "output": "Output", + } + TODO > Add duration of each step + + Expand the steps of job + - Steps of Deactivate Job [if exists] + - Steps of Physical Backup Job [Site Backup] [if exists] + - Steps of Update Job + - Steps of Physical Restore Job [if exists] + - Steps of Recovery Job [if exists] + - Steps of Activate Job [if exists] + """ + steps = [] + if self.deactivate_site_job: + steps.extend(self.get_job_steps(self.deactivate_site_job, "Deactivate Site")) + if self.backup_type == "Physical" and self.site_backup: + agent_job = jingrow.get_value("Site Backup", self.site_backup, "job") + steps.extend(self.get_job_steps(agent_job, "Backup Site")) + if self.update_job: + steps.extend(self.get_job_steps(self.update_job, "Update Site")) + if self.physical_backup_restoration: + steps.extend(get_physical_backup_restoration_steps(self.physical_backup_restoration)) + if self.recover_job: + steps.extend(self.get_job_steps(self.recover_job, "Recover Site")) + if self.activate_site_job: + steps.extend(self.get_job_steps(self.activate_site_job, "Activate Site")) + return steps + + def get_job_steps(self, job: str, stage: str): + agent_steps = jingrow.get_all( + "Agent Job Step", + filters={"agent_job": job}, + fields=["output", "step_name", "status", "name"], + order_by="creation asc", + ) + return [ + { + "name": step.get("name"), + "title": step.get("step_name"), + "status": step.get("status"), + "output": step.get("output"), + "stage": stage, + } + for step in agent_steps + ] + + @jingrow.whitelist() + def set_cause_of_failure_is_resolved(self): + jingrow.db.set_value("Site Update", self.name, "cause_of_failure_is_resolved", 1) + + @jingrow.whitelist() + def set_status(self, status): + jingrow.db.set_value("Site Update", self.name, "status", status) + + +def update_status(name, status): + jingrow.db.set_value("Site Update", name, "status", status) + if status in ("Success", "Failure", "Fatal", "Recovered"): + jingrow.db.set_value("Site Update", name, "update_end", jingrow.utils.now()) + update_start = jingrow.db.get_value("Site Update", name, "update_start") + if update_start: + jingrow.db.set_value( + "Site Update", + name, + "update_duration", + jingrow.utils.cint( + jingrow.utils.time_diff_in_seconds(jingrow.utils.now_datetime(), update_start) + ), + ) + if status in ["Success", "Recovered"]: + backup_type = jingrow.db.get_value("Site Update", name, "backup_type") + if backup_type == "Physical": + # Remove the snapshot + jingrow.enqueue_pg( + "Site Update", + name, + "delete_backup_snapshot", + enqueue_after_commit=True, + ) + + +@site_cache(ttl=60) +def benches_with_available_update(site=None, server=None): + site_bench = jingrow.db.get_value("Site", site, "bench") if site else None + values = {} + if site: + values["site_bench"] = site_bench + if server: + values["server"] = server + source_benches_info = jingrow.db.sql( + f""" + SELECT sb.name AS source_bench, sb.candidate AS source_candidate, sb.server AS server, dcd.destination AS destination_candidate + FROM `tabBench` sb, `tabDeploy Candidate Difference` dcd + WHERE sb.status IN ('Active', 'Broken') AND sb.candidate = dcd.source + {"AND sb.name = %(site_bench)s" if site else ""} + {"AND sb.server = %(server)s" if server else ""} + """, + values=values, + as_dict=True, + ) + + destination_candidates = list(set(d["destination_candidate"] for d in source_benches_info)) + + destination_benches_info = jingrow.get_all( + "Bench", + filters={"status": "Active", "candidate": ("in", destination_candidates)}, + fields=["candidate AS destination_candidate", "name AS destination_bench", "server"], + ignore_ifnull=True, + ) + + destinations = set() + for bench in destination_benches_info: + destinations.add((bench.destination_candidate, bench.server)) + + updates_available_for_benches = [] + for bench in source_benches_info: + if (bench.destination_candidate, bench.server) in destinations: + updates_available_for_benches.append(bench) + + return list(set([bench.source_bench for bench in updates_available_for_benches])) + + +@jingrow.whitelist() +def sites_with_available_update(server=None): + benches = benches_with_available_update(server=server) + return jingrow.get_all( + "Site", + filters={ + "status": ("in", ("Active", "Inactive", "Suspended")), + "bench": ("in", benches), + "only_update_at_specified_time": False, # will be taken care of by another scheduled job + "skip_auto_updates": False, + }, + fields=["name", "timezone", "bench", "server", "status"], + ) + + +def schedule_updates(): + servers = jingrow.get_all("Server", {"status": "Active"}, pluck="name") + for server in servers: + jingrow.enqueue( + "jcloud.jcloud.pagetype.site_update.site_update.schedule_updates_server", + server=server, + job_id=f"schedule_updates:{server}", + deduplicate=True, + queue="long", + ) + + +def schedule_updates_server(server): + # Prevent flooding the queue + queue_size = jingrow.db.get_single_value("Jcloud Settings", "auto_update_queue_size") + pending_update_count = jingrow.db.count( + "Site Update", + { + "status": ("in", ("Pending", "Running")), + "server": server, + "creation": (">", jingrow.utils.add_to_date(None, hours=-4)), + }, + ) + if pending_update_count > queue_size: + return + + sites = sites_with_available_update(server) + sites = list(filter(is_site_in_deploy_hours, sites)) + + # If a site can't be updated for some reason, then we shouldn't get stuck + # Shuffle sites list, to achieve this + random.shuffle(sites) + + benches = {} + update_triggered_count = 0 + for site in sites: + if site.bench in benches: + continue + if update_triggered_count > queue_size: + break + if not should_try_update(site) or jingrow.db.exists( + "Site Update", + { + "site": site.name, + "status": ("in", ("Pending", "Running", "Failure", "Scheduled")), + }, + ): + continue + + try: + site = jingrow.get_pg("Site", site.name) + if site.site_migration_scheduled(): + continue + site.schedule_update() + update_triggered_count += 1 + jingrow.db.commit() + benches[site.bench] = True + except Exception: + log_error("Site Update Exception", site=site) + jingrow.db.rollback() + + +def should_try_update(site): + source = jingrow.db.get_value("Bench", site.bench, "candidate") + candidates = jingrow.get_all( + "Deploy Candidate Difference", filters={"source": source}, pluck="destination" + ) + + source_apps = [app.app for app in jingrow.get_cached_pg("Site", site.name).apps] + dest_apps = [] + destinations = jingrow.get_all( + "Bench", + ["name", "candidate"], + { + "candidate": ("in", candidates), + "status": "Active", + "server": site.server, + }, + limit=1, + ignore_ifnull=True, + order_by="creation DESC", + ) + # Most recent active bench is the destination bench + if not destinations: + return False + + destination_bench = jingrow.get_cached_pg("Bench", destinations[0].name) + dest_apps = [app.app for app in destination_bench.apps] + + if set(source_apps) - set(dest_apps): + return False + + return not jingrow.db.exists( + "Site Update", + { + "site": site.name, + "source_candidate": source, + "destination_candidate": destination_bench.candidate, + "cause_of_failure_is_resolved": False, + }, + ) + + +def is_site_in_deploy_hours(site): + if site.status in ("Inactive", "Suspended"): + return True + server_time = datetime.now() + timezone = site.timezone or "Asia/Kolkata" + site_timezone = pytz.timezone(timezone) + site_time = server_time.astimezone(site_timezone) + deploy_hours = jingrow.get_hooks("deploy_hours") + + if site_time.hour in deploy_hours: + return True + return False + + +def process_physical_backup_restoration_status_update(name: str): + site_backup_name = jingrow.db.exists( + "Site Update", + { + "physical_backup_restoration": name, + }, + ) + if site_backup_name: + site_update: SiteUpdate = jingrow.get_pg("Site Update", site_backup_name) + physical_backup_restoration: PhysicalBackupRestoration = jingrow.get_pg( + "Physical Backup Restoration", name + ) + if physical_backup_restoration.status in ["Success", "Failure"]: + site_update.trigger_recovery_job() + + +def process_activate_site_job_update(job): + if job.reference_pagetype != "Site Update": + return + if job.status == "Success": + # Mark the site as active + jingrow.db.set_value("Site", job.site, "status", "Active") + elif job.status == "Failure": + # Mark the site as broken + jingrow.db.set_value("Site", job.site, "status", "Broken") + update_status(job.reference_name, "Fatal") + + +def process_deactivate_site_job_update(job): + if job.reference_pagetype != "Site Update": + return + if job.status == "Success": + # proceed to backup stage + site_update = jingrow.get_pg("Site Update", job.reference_name) + site_update.create_physical_backup() + elif job.status == "Failure": + # mark Site Update as Fatal + update_status(job.reference_name, "Fatal") + # Run the activate site to ensure site is active + site_update = jingrow.get_pg("Site Update", job.reference_name) + site_update.activate_site() + + +def process_update_site_job_update(job: AgentJob): # noqa: C901 + updated_status = job.status + site_update = jingrow.get_all( + "Site Update", + fields=["name", "status", "destination_bench", "destination_group"], + filters={"update_job": job.name}, + ) + + if not site_update: + return + + site_update = site_update[0] + + if updated_status != site_update.status: + site_bench = jingrow.db.get_value("Site", job.site, "bench") + move_site_step_status = jingrow.db.get_value( + "Agent Job Step", {"step_name": "Move Site", "agent_job": job.name}, "status" + ) + if site_bench != site_update.destination_bench and move_site_step_status == "Success": + jingrow.db.set_value("Site", job.site, "bench", site_update.destination_bench) + jingrow.db.set_value("Site", job.site, "group", site_update.destination_group) + site_enable_step_status = jingrow.db.get_value( + "Agent Job Step", + {"step_name": "Disable Maintenance Mode", "agent_job": job.name}, + "status", + ) + log_touched_tables_step = jingrow.db.get_value( + "Agent Job Step", + {"step_name": "Log Touched Tables", "agent_job": job.name}, + ["status", "data"], + as_dict=True, + ) + if site_enable_step_status == "Success": + SiteUpdate("Site Update", site_update.name).reallocate_workers() + + update_status(site_update.name, updated_status) + if log_touched_tables_step and log_touched_tables_step.status == "Success": + jingrow.db.set_value( + "Site Update", site_update.name, "touched_tables", log_touched_tables_step.data + ) + if updated_status == "Running": + jingrow.db.set_value("Site", job.site, "status", "Updating") + elif updated_status == "Success": + jingrow.get_pg("Site", job.site).reset_previous_status(fix_broken=True) + elif updated_status == "Delivery Failure": + jingrow.get_pg("Site", job.site).reset_previous_status() + elif updated_status == "Failure": + jingrow.db.set_value("Site", job.site, "status", "Broken") + jingrow.db.set_value( + "Site Update", + site_update.name, + "cause_of_failure_is_resolved", + job.failed_because_of_agent_update, + ) + if not jingrow.db.get_value("Site Update", site_update.name, "skipped_backups"): + pg = jingrow.get_pg("Site Update", site_update.name) + pg.trigger_recovery_job() + else: + update_status(site_update.name, "Fatal") + SiteUpdate("Site Update", site_update.name).reallocate_workers() + + +def process_update_site_recover_job_update(job): + updated_status = { + "Pending": "Pending", + "Running": "Running", + "Success": "Recovered", + "Failure": "Fatal", + "Delivery Failure": "Fatal", + }[job.status] + site_update = jingrow.get_all( + "Site Update", + fields=["name", "status", "source_bench", "group"], + filters={"recover_job": job.name}, + )[0] + if updated_status != site_update.status: + site_bench = jingrow.db.get_value("Site", job.site, "bench") + move_site_step_status = jingrow.db.get_value( + "Agent Job Step", {"step_name": "Move Site", "agent_job": job.name}, "status" + ) + if site_bench != site_update.source_bench and move_site_step_status == "Success": + jingrow.db.set_value("Site", job.site, "bench", site_update.source_bench) + jingrow.db.set_value("Site", job.site, "group", site_update.group) + + update_status(site_update.name, updated_status) + if updated_status == "Recovered": + jingrow.get_pg("Site", job.site).reset_previous_status() + elif updated_status == "Fatal": + jingrow.db.set_value("Site", job.site, "status", "Broken") + + +def mark_stuck_updates_as_fatal(): + jingrow.db.set_value( + "Site Update", + { + "status": ("in", ["Pending", "Running", "Failure"]), + "modified": ("<", jingrow.utils.add_days(None, -2)), + }, + "status", + "Fatal", + ) + + +def run_scheduled_updates(): + updates = jingrow.get_all( + "Site Update", + {"scheduled_time": ("<=", jingrow.utils.now()), "status": "Scheduled"}, + pluck="name", + ) + + for update in updates: + try: + pg = jingrow.get_pg("Site Update", update) + pg.validate() + pg.start() + jingrow.db.commit() + except Exception: + log_error("Scheduled Site Update Error", update=update) + jingrow.db.rollback() + + +def on_pagetype_update(): + jingrow.db.add_index("Site Update", ["site", "source_candidate", "destination_candidate"]) + jingrow.db.add_index("Site Update", ["server", "status"]) diff --git a/jcloud/jcloud/pagetype/site_update/test_site_update.py b/jcloud/jcloud/pagetype/site_update/test_site_update.py new file mode 100644 index 0000000..be2cd55 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_update/test_site_update.py @@ -0,0 +1,158 @@ +# Copyright (c) 2020, JINGROW +# See license.txt + + +import json +from unittest.mock import MagicMock, Mock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob, poll_pending_jobs +from jcloud.jcloud.pagetype.agent_job.test_agent_job import fake_agent_job +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.app_release.test_app_release import create_test_app_release +from jcloud.jcloud.pagetype.app_source.test_app_source import create_test_app_source +from jcloud.jcloud.pagetype.deploy_candidate_difference.test_deploy_candidate_difference import ( + create_test_deploy_candidate_differences, +) +from jcloud.jcloud.pagetype.release_group.test_release_group import ( + create_test_release_group, +) +from jcloud.jcloud.pagetype.site.test_site import create_test_bench, create_test_site +from jcloud.jcloud.pagetype.site_plan.test_site_plan import create_test_plan +from jcloud.jcloud.pagetype.site_update.site_update import SiteUpdate +from jcloud.jcloud.pagetype.subscription.test_subscription import create_test_subscription + + +@patch.object(SiteUpdate, "start", new=Mock()) +def create_test_site_update(site: str, destination_group: str, status: str): + return jingrow.get_pg( + dict(pagetype="Site Update", site=site, destination_group=destination_group, status=status) + ).insert(ignore_if_duplicate=True) + + +class TestSiteUpdate(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_update_of_v12_site_skips_search_index(self): + version = "Version 12" + app = create_test_app() + app_source = create_test_app_source(version=version, app=app) + group = create_test_release_group([app], jingrow_version=version) + bench1 = create_test_bench(group=group) + + create_test_app_release( + app_source=app_source + ) # creates pull type release diff only but args are same + + bench2 = create_test_bench(group=group, server=bench1.server) + self.assertNotEqual(bench1, bench2) + + create_test_deploy_candidate_differences(bench2.candidate) # for site update to be available + + site = create_test_site(bench=bench1.name) + site.schedule_update() + + agent_job = jingrow.get_last_pg("Agent Job", dict(job_type=("like", "Update Site %"))) + self.assertLess(dict(skip_search_index=False).items(), json.loads(agent_job.request_data).items()) + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_update_of_non_v12_site_doesnt_skip_search_index(self): + version = "Version 13" + app = create_test_app() + app_source = create_test_app_source(version=version, app=app) + group = create_test_release_group([app], jingrow_version=version) + bench1 = create_test_bench(group=group) + + create_test_app_release( + app_source=app_source + ) # creates pull type release diff only but args are same + + bench2 = create_test_bench(group=group, server=bench1.server) + self.assertNotEqual(bench1, bench2) + + create_test_deploy_candidate_differences(bench2.candidate) # for site update to be available + + site = create_test_site(bench=bench1.name) + site.schedule_update() + + agent_job = jingrow.get_last_pg("Agent Job", dict(job_type=("like", "Update Site %"))) + self.assertLess(dict(skip_search_index=True).items(), json.loads(agent_job.request_data).items()) + + @patch.object(AgentJob, "enqueue_http_request", new=Mock()) + def test_site_update_throws_when_destination_doesnt_have_all_the_apps_in_the_site( + self, + ): + app1 = create_test_app() # jingrow + app2 = create_test_app("app2", "App 2") + app3 = create_test_app("app3", "App 3") + + group = create_test_release_group([app1, app2, app3]) + bench1 = create_test_bench(group=group) + bench2 = create_test_bench(group=group, server=bench1.server) + + bench2.apps.pop() + bench2.apps.pop() + bench2.save() + + create_test_deploy_candidate_differences(bench2.candidate) # for site update to be available + + site = create_test_site(bench=bench1.name) + + self.assertRaisesRegex( + jingrow.ValidationError, + f".*apps installed on {site.name}: app., app.$", + site.schedule_update, + ) + + @patch("jcloud.jcloud.pagetype.server.server.jingrow.db.commit", new=MagicMock) + def test_site_update_callback_reallocates_workers_after_disable_maintenance_mode_job( + self, + ): + app1 = create_test_app() # jingrow + app2 = create_test_app("app2", "App 2") + app3 = create_test_app("app3", "App 3") + + group = create_test_release_group([app1, app2, app3]) + bench1 = create_test_bench(group=group) + bench2 = create_test_bench(group=group, server=bench1.server) + + create_test_deploy_candidate_differences(bench2.candidate) # for site update to be available + + site = create_test_site(bench=bench1.name) + plan = create_test_plan(site.pagetype, cpu_time=8) + create_test_subscription(site.name, plan.name, site.team) + site.reload() + + server = jingrow.get_pg("Server", bench1.server) + server.disable_agent_job_auto_retry = True + server.save() + server.auto_scale_workers() + bench1.reload() + bench2.reload() + self.assertEqual(site.bench, bench1.name) + self.assertGreater(bench1.gunicorn_workers, 2) + self.assertGreater(bench1.background_workers, 1) + self.assertEqual(bench2.gunicorn_workers, 2) + self.assertEqual(bench2.background_workers, 1) + + with fake_agent_job( + "Update Site Pull", + "Success", + steps=[{"name": "Disable Maintenance Mode", "status": "Success"}], + ): + site.schedule_update() + poll_pending_jobs() + + bench1.reload() + bench2.reload() + site.reload() + + self.assertEqual(site.bench, bench2.name) + self.assertEqual(bench1.gunicorn_workers, 2) + self.assertEqual(bench1.background_workers, 1) + self.assertGreater(bench2.gunicorn_workers, 2) + self.assertGreater(bench2.background_workers, 1) diff --git a/jcloud/jcloud/pagetype/site_usage/__init__.py b/jcloud/jcloud/pagetype/site_usage/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_usage/site_usage.js b/jcloud/jcloud/pagetype/site_usage/site_usage.js new file mode 100644 index 0000000..3f062bb --- /dev/null +++ b/jcloud/jcloud/pagetype/site_usage/site_usage.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Site Usage', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/site_usage/site_usage.json b/jcloud/jcloud/pagetype/site_usage/site_usage.json new file mode 100644 index 0000000..d38cfb6 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_usage/site_usage.json @@ -0,0 +1,89 @@ +{ + "actions": [], + "creation": "2020-08-18 13:35:32.151920", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "database", + "backups", + "public", + "private", + "database_free_tables", + "database_free" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_filter": 1, + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Site", + "options": "Site", + "read_only": 1 + }, + { + "fieldname": "database", + "fieldtype": "Int", + "label": "Database", + "read_only": 1 + }, + { + "fieldname": "public", + "fieldtype": "Int", + "label": "Public", + "read_only": 1 + }, + { + "fieldname": "private", + "fieldtype": "Int", + "label": "Private", + "read_only": 1 + }, + { + "fieldname": "backups", + "fieldtype": "Int", + "label": "Backups", + "read_only": 1 + }, + { + "fieldname": "database_free_tables", + "fieldtype": "Code", + "label": "Database Free Tables", + "read_only": 1 + }, + { + "fieldname": "database_free", + "fieldtype": "Int", + "label": "Database Free", + "read_only": 1 + } + ], + "links": [], + "modified": "2023-10-19 14:31:13.374011", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site Usage", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "site" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_usage/site_usage.py b/jcloud/jcloud/pagetype/site_usage/site_usage.py new file mode 100644 index 0000000..02fd690 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_usage/site_usage.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.document import Document +from jingrow.query_builder import Interval +from jingrow.query_builder.functions import Now + + +class SiteUsage(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + backups: DF.Int + database: DF.Int + database_free: DF.Int + database_free_tables: DF.Code | None + private: DF.Int + public: DF.Int + site: DF.Link | None + # end: auto-generated types + + @staticmethod + def clear_old_logs(days=60): + table = jingrow.qb.PageType("Site Usage") + jingrow.db.delete(table, filters=(table.creation < (Now() - Interval(days=days)))) diff --git a/jcloud/jcloud/pagetype/site_usage/test_site_usage.py b/jcloud/jcloud/pagetype/site_usage/test_site_usage.py new file mode 100644 index 0000000..6f00603 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_usage/test_site_usage.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestSiteUsage(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/site_user/__init__.py b/jcloud/jcloud/pagetype/site_user/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_user/site_user.js b/jcloud/jcloud/pagetype/site_user/site_user.js new file mode 100644 index 0000000..ea2bfc5 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_user/site_user.js @@ -0,0 +1,8 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Site User", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/site_user/site_user.json b/jcloud/jcloud/pagetype/site_user/site_user.json new file mode 100644 index 0000000..5dc6cfb --- /dev/null +++ b/jcloud/jcloud/pagetype/site_user/site_user.json @@ -0,0 +1,59 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-01-16 15:02:28.939437", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "user", + "site", + "enabled" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Data", + "in_list_view": 1, + "label": "User", + "reqd": 1 + }, + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Site", + "options": "Site", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-02-19 16:35:29.861290", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site User", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_user/site_user.py b/jcloud/jcloud/pagetype/site_user/site_user.py new file mode 100644 index 0000000..4e9cd67 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_user/site_user.py @@ -0,0 +1,47 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + + +class SiteUser(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + enabled: DF.Check + site: DF.Link + user: DF.Data + # end: auto-generated types + + def login_to_site(self): + """Login to the site.""" + if not self.enabled: + jingrow.throw("User is disabled") + + site = jingrow.get_pg("Site", self.site) + return site.login_as_user(self.user) + + +def create_user_for_product_site(site, data): + analytics = data["analytics"] + users_data = analytics.get("users", []) + for user_data in users_data: + user_mail = user_data.get("email") + enabled = user_data.get("enabled") + if jingrow.db.exists("Site User", {"site": site, "user": user_mail}): + user = jingrow.db.get_value( + "Site User", {"site": site, "user": user_mail}, ["name", "enabled"], as_dict=True + ) + if user.enabled != enabled: + jingrow.db.set_value("Site User", user.name, "enabled", enabled) + else: + user = jingrow.get_pg( + {"pagetype": "Site User", "site": site, "user": user_mail, "enabled": enabled} + ) + user.insert(ignore_permissions=True) diff --git a/jcloud/jcloud/pagetype/site_user/test_site_user.py b/jcloud/jcloud/pagetype/site_user/test_site_user.py new file mode 100644 index 0000000..786ff7b --- /dev/null +++ b/jcloud/jcloud/pagetype/site_user/test_site_user.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record dependencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestSiteUser(UnitTestCase): + """ + Unit tests for SiteUser. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestSiteUser(IntegrationTestCase): + """ + Integration tests for SiteUser. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/site_user_session/__init__.py b/jcloud/jcloud/pagetype/site_user_session/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/site_user_session/site_user_session.js b/jcloud/jcloud/pagetype/site_user_session/site_user_session.js new file mode 100644 index 0000000..dbcbffb --- /dev/null +++ b/jcloud/jcloud/pagetype/site_user_session/site_user_session.js @@ -0,0 +1,8 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Site User Session", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/site_user_session/site_user_session.json b/jcloud/jcloud/pagetype/site_user_session/site_user_session.json new file mode 100644 index 0000000..5eb0154 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_user_session/site_user_session.json @@ -0,0 +1,66 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-01-26 21:54:39.279966", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "user", + "session_id", + "otp", + "verified", + "otp_generated_at" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Data", + "label": "User" + }, + { + "fieldname": "otp", + "fieldtype": "Data", + "label": "OTP" + }, + { + "fieldname": "session_id", + "fieldtype": "Data", + "label": "Session ID" + }, + { + "default": "0", + "fieldname": "verified", + "fieldtype": "Check", + "label": "Verified" + }, + { + "fieldname": "otp_generated_at", + "fieldtype": "Datetime", + "label": "OTP Generated at" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-02-17 12:19:46.639929", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Site User Session", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/site_user_session/site_user_session.py b/jcloud/jcloud/pagetype/site_user_session/site_user_session.py new file mode 100644 index 0000000..b01d6e9 --- /dev/null +++ b/jcloud/jcloud/pagetype/site_user_session/site_user_session.py @@ -0,0 +1,61 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + + +class SiteUserSession(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + otp: DF.Data | None + otp_generated_at: DF.Datetime | None + session_id: DF.Data | None + user: DF.Data | None + verified: DF.Check + # end: auto-generated types + + def send_otp(self): + """Send OTP to the user for site login.""" + + from jcloud.utils.otp import generate_otp + + self.otp = generate_otp() + self.session_id = jingrow.generate_hash() + self.otp_generated_at = jingrow.utils.now_datetime() + if jingrow.conf.developer_mode and jingrow.local.dev_server: + self.otp = 111111 + self.save() + + if jingrow.conf.developer_mode: + print(f"\nOTP for {self.user} for site login:") + print() + print(self.otp) + print() + return + + subject = f"{self.otp} - OTP for Jingrow Site Login" + args = {} + + args.update( + { + "otp": self.otp, + "image_path": "http://git.jingrow.com:3000/jingrow/gameplan/assets/9355208/447035d0-0686-41d2-910a-a3d21928ab94", + } + ) + + jingrow.sendmail( + recipients=self.user, + subject=subject, + template="verify_account_for_site_login", + args=args, + now=True, + ) diff --git a/jcloud/jcloud/pagetype/site_user_session/test_site_user_session.py b/jcloud/jcloud/pagetype/site_user_session/test_site_user_session.py new file mode 100644 index 0000000..316d4db --- /dev/null +++ b/jcloud/jcloud/pagetype/site_user_session/test_site_user_session.py @@ -0,0 +1,29 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import IntegrationTestCase, UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record dependencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class UnitTestSiteUserSession(UnitTestCase): + """ + Unit tests for SiteUserSession. + Use this class for testing individual functions and methods. + """ + + pass + + +class IntegrationTestSiteUserSession(IntegrationTestCase): + """ + Integration tests for SiteUserSession. + Use this class for testing interactions between multiple components. + """ + + pass diff --git a/jcloud/jcloud/pagetype/sql_playground_log/__init__.py b/jcloud/jcloud/pagetype/sql_playground_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/sql_playground_log/sql_playground_log.js b/jcloud/jcloud/pagetype/sql_playground_log/sql_playground_log.js new file mode 100644 index 0000000..f972190 --- /dev/null +++ b/jcloud/jcloud/pagetype/sql_playground_log/sql_playground_log.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("SQL Playground Log", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/sql_playground_log/sql_playground_log.json b/jcloud/jcloud/pagetype/sql_playground_log/sql_playground_log.json new file mode 100644 index 0000000..804d7d3 --- /dev/null +++ b/jcloud/jcloud/pagetype/sql_playground_log/sql_playground_log.json @@ -0,0 +1,100 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-10-16 16:18:15.770106", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "site", + "column_break_mgop", + "team", + "section_break_fnmu", + "query", + "committed" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Site", + "options": "Site", + "read_only": 1 + }, + { + "fieldname": "column_break_mgop", + "fieldtype": "Column Break" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1 + }, + { + "fieldname": "section_break_fnmu", + "fieldtype": "Section Break" + }, + { + "fieldname": "query", + "fieldtype": "Small Text", + "label": "Query", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "committed", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Committed", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-10-24 12:18:15.580077", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "SQL Playground Log", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/sql_playground_log/sql_playground_log.py b/jcloud/jcloud/pagetype/sql_playground_log/sql_playground_log.py new file mode 100644 index 0000000..a2f8935 --- /dev/null +++ b/jcloud/jcloud/pagetype/sql_playground_log/sql_playground_log.py @@ -0,0 +1,33 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +from jingrow.model.document import Document + +from jcloud.overrides import get_permission_query_conditions_for_pagetype + + +class SQLPlaygroundLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + committed: DF.Check + query: DF.SmallText | None + site: DF.Link | None + team: DF.Link | None + # end: auto-generated types + + PAGETYPE = "SQL Playground Log" + dashboard_fields = ( + "site", + "query", + "committed", + ) + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("SQL Playground Log") diff --git a/jcloud/jcloud/pagetype/sql_playground_log/test_sql_playground_log.py b/jcloud/jcloud/pagetype/sql_playground_log/test_sql_playground_log.py new file mode 100644 index 0000000..e744699 --- /dev/null +++ b/jcloud/jcloud/pagetype/sql_playground_log/test_sql_playground_log.py @@ -0,0 +1,20 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests import UnitTestCase + +# On IntegrationTestCase, the pagetype test records and all +# link-field test record depdendencies are recursively loaded +# Use these module variables to add/remove to/from that list +EXTRA_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] +IGNORE_TEST_RECORD_DEPENDENCIES = [] # eg. ["User"] + + +class TestSQLPlaygroundLog(UnitTestCase): + """ + Unit tests for SQLPlaygroundLog. + Use this class for testing individual functions and methods. + """ + + pass diff --git a/jcloud/jcloud/pagetype/ssh_certificate/__init__.py b/jcloud/jcloud/pagetype/ssh_certificate/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/ssh_certificate/ssh_certificate.js b/jcloud/jcloud/pagetype/ssh_certificate/ssh_certificate.js new file mode 100644 index 0000000..b40c43c --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_certificate/ssh_certificate.js @@ -0,0 +1,12 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('SSH Certificate', { + refresh: function (frm) { + frm.set_query('user_ssh_key', () => { + return { + filters: { user: frm.pg.user }, + }; + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/ssh_certificate/ssh_certificate.json b/jcloud/jcloud/pagetype/ssh_certificate/ssh_certificate.json new file mode 100644 index 0000000..8226aad --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_certificate/ssh_certificate.json @@ -0,0 +1,200 @@ +{ + "actions": [], + "autoname": "CERT-.########", + "creation": "2022-01-28 20:07:35.389554", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "certificate_type", + "column_break_2", + "group", + "section_break_2", + "user", + "user_ssh_key", + "validity", + "column_break_4", + "key_type", + "valid_until", + "ssh_fingerprint", + "section_break_8", + "ssh_certificate_authority", + "ssh_certificate_authority_public_key", + "section_break_15", + "ssh_public_key", + "certificate_details", + "ssh_certificate", + "serial_number" + ], + "fields": [ + { + "fieldname": "section_break_2", + "fieldtype": "Section Break" + }, + { + "depends_on": "eval:pg.certificate_type == \"User\"", + "fieldname": "user", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "User", + "mandatory_depends_on": "eval:pg.certificate_type == \"User\"", + "options": "User", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.certificate_type == \"User\"", + "fieldname": "user_ssh_key", + "fieldtype": "Link", + "label": "User SSH Key", + "mandatory_depends_on": "eval:pg.certificate_type == \"User\"", + "options": "User SSH Key", + "set_only_once": 1 + }, + { + "default": "1h", + "fieldname": "validity", + "fieldtype": "Select", + "label": "Validity", + "options": "1h\n3h\n6h\n30d", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fieldname": "valid_until", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "Valid Until", + "read_only": 1 + }, + { + "depends_on": "eval:!(pg.__islocal)", + "fieldname": "ssh_fingerprint", + "fieldtype": "Data", + "label": "SSH Fingerprint", + "read_only": 1 + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break" + }, + { + "fetch_from": "user_ssh_key.ssh_public_key", + "fieldname": "ssh_public_key", + "fieldtype": "Code", + "label": "SSH Public Key", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "certificate_details", + "fieldtype": "Code", + "label": "Certificate Details", + "read_only": 1 + }, + { + "fieldname": "ssh_certificate", + "fieldtype": "Code", + "label": "SSH Certificate", + "read_only": 1 + }, + { + "depends_on": "eval:pg.certificate_type == \"User\"", + "fieldname": "group", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Group", + "mandatory_depends_on": "eval:pg.certificate_type == \"User\"", + "options": "Release Group", + "set_only_once": 1 + }, + { + "fieldname": "certificate_type", + "fieldtype": "Select", + "label": "Certificate Type", + "options": "User\nHost", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "ssh_certificate_authority", + "fieldtype": "Link", + "label": "SSH Certificate Authority", + "options": "SSH Certificate Authority", + "set_only_once": 1 + }, + { + "fieldname": "section_break_15", + "fieldtype": "Section Break" + }, + { + "fieldname": "serial_number", + "fieldtype": "Int", + "label": "Serial Number", + "read_only": 1 + }, + { + "fieldname": "key_type", + "fieldtype": "Data", + "label": "Key Type", + "read_only": 1 + }, + { + "fetch_from": "ssh_certificate_authority.public_key", + "fieldname": "ssh_certificate_authority_public_key", + "fieldtype": "Code", + "label": "SSH Certificate Authority Public Key", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-01-31 10:46:17.476664", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "SSH Certificate", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "if_owner": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "if_owner": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "user", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/ssh_certificate/ssh_certificate.py b/jcloud/jcloud/pagetype/ssh_certificate/ssh_certificate.py new file mode 100644 index 0000000..1f1881a --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_certificate/ssh_certificate.py @@ -0,0 +1,144 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import base64 +import binascii +import hashlib +import os +import re +import shlex +import subprocess +from typing import TYPE_CHECKING + +import jingrow +from jingrow.model.document import Document + +from jcloud.utils import log_error + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.ssh_certificate_authority.ssh_certificate_authority import ( + SSHCertificateAuthority, + ) + + +class SSHCertificate(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + certificate_details: DF.Code | None + certificate_type: DF.Literal["User", "Host"] + group: DF.Link | None + key_type: DF.Data | None + serial_number: DF.Int + ssh_certificate: DF.Code | None + ssh_certificate_authority: DF.Link | None + ssh_certificate_authority_public_key: DF.Code | None + ssh_fingerprint: DF.Data | None + ssh_public_key: DF.Code + user: DF.Link | None + user_ssh_key: DF.Link | None + valid_until: DF.Datetime | None + validity: DF.Literal["1h", "3h", "6h", "30d"] + # end: auto-generated types + + def validate(self): + self.validate_public_key() + self.validate_existing_certificates() + self.validate_validity() + self.validate_certificate_authority() + + def validate_public_key(self): + try: + ssh_key_b64 = base64.b64decode(self.ssh_public_key.strip().split()[1]) + sha256_sum = hashlib.sha256() + sha256_sum.update(ssh_key_b64) + self.ssh_fingerprint = base64.b64encode(sha256_sum.digest()).decode() + self.key_type = self.ssh_public_key.strip().split()[0].split("-")[1] + except binascii.Error: + jingrow.throw("Attached text is a not valid public key") + + self.key_type = self.ssh_public_key.strip().split()[0].split("-")[1] + if not self.key_type: + jingrow.throw("Could not guess the key type. Please check your public key.") + + def validate_validity(self): + if self.certificate_type == "User" and self.validity not in ("1h", "3h", "6h"): + jingrow.throw("User certificates can only be valid for a short duration") + + def validate_certificate_authority(self): + if not self.ssh_certificate_authority: + self.ssh_certificate_authority = jingrow.db.get_single_value( + "Jcloud Settings", "ssh_certificate_authority" + ) + if not self.ssh_certificate_authority: + jingrow.throw( + "SSH Certificate Authority is required to generate SSH certificates", + jingrow.ValidationError, + ) + + def validate_existing_certificates(self): + if jingrow.get_all( + "SSH Certificate", + { + "user_ssh_key": self.user_ssh_key, + "valid_until": [">", jingrow.utils.now()], + "group": self.group, + }, + ): + jingrow.throw("A valid certificate already exists.") + + def create_public_key_file(self): + with open(self.public_key_file, "w") as file: + file.write(self.ssh_public_key) + + def after_insert(self): + self.serial_number = self.name[5:] + self.create_public_key_file() + self.generate_certificate() + self.extract_certificate_details() + jingrow.local.role_permissions = {} + self.save() + + def generate_certificate(self): + ca: "SSHCertificateAuthority" = jingrow.get_pg( + "SSH Certificate Authority", self.ssh_certificate_authority + ) + ca.sign( + self.user, + [self.group], + f"+{self.validity}", + self.public_key_file, + self.serial_number, + ) + self.read_certificate() + + def run(self, command): + try: + return subprocess.check_output( + shlex.split(command), stderr=subprocess.STDOUT + ).decode() + except subprocess.CalledProcessError as e: + log_error("Command failed", output={e.output.decode()}, pg=self) + raise + + def extract_certificate_details(self): + self.certificate_details = self.run(f"ssh-keygen -Lf {self.certificate_file}") + regex = re.compile("Valid:.*\n") + self.valid_until = regex.findall(self.certificate_details)[0].strip().split()[-1] + + def read_certificate(self): + with open(self.certificate_file) as file: + self.ssh_certificate = file.read() + + @property + def public_key_file(self): + return os.path.join("/tmp", f"id_{self.key_type}-{self.serial_number}.pub") + + @property + def certificate_file(self): + return os.path.join("/tmp", f"id_{self.key_type}-{self.serial_number}-cert.pub") diff --git a/jcloud/jcloud/pagetype/ssh_certificate/test_ssh_certificate.py b/jcloud/jcloud/pagetype/ssh_certificate/test_ssh_certificate.py new file mode 100644 index 0000000..61e6a8b --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_certificate/test_ssh_certificate.py @@ -0,0 +1,43 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +import unittest +from unittest.mock import Mock, patch + +import jingrow + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.site.test_site import create_test_bench +from jcloud.jcloud.pagetype.ssh_certificate.ssh_certificate import SSHCertificate +from jcloud.jcloud.pagetype.team.test_team import create_test_jcloud_admin_team +from jcloud.jcloud.pagetype.user_ssh_key.test_user_ssh_key import create_test_user_ssh_key + + +@patch.object(SSHCertificate, "validate_certificate_authority", new=Mock()) +@patch.object(SSHCertificate, "generate_certificate", new=Mock()) +@patch.object(SSHCertificate, "extract_certificate_details", new=Mock()) +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +class TestSSHCertificate(unittest.TestCase): + def setUp(self): + self.team = create_test_jcloud_admin_team() + self.user = self.team.user + + def tearDown(self): + jingrow.db.rollback() + jingrow.set_user("Administrator") + + def test_jcloud_admin_user_can_create_ssh_certificate(self): + bench = create_test_bench(self.user) + group = bench.group + jingrow.set_user(self.user) + user_ssh_key = create_test_user_ssh_key(user=self.user) + return jingrow.get_pg( + { + "pagetype": "SSH Certificate", + "certificate_type": "User", + "group": group, + "user": jingrow.session.user, + "user_ssh_key": user_ssh_key, + "validity": "6h", + } + ).insert() diff --git a/jcloud/jcloud/pagetype/ssh_certificate_authority/__init__.py b/jcloud/jcloud/pagetype/ssh_certificate_authority/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/ssh_certificate_authority/ssh_certificate_authority.js b/jcloud/jcloud/pagetype/ssh_certificate_authority/ssh_certificate_authority.js new file mode 100644 index 0000000..a92cb93 --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_certificate_authority/ssh_certificate_authority.js @@ -0,0 +1,14 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('SSH Certificate Authority', { + refresh: function (frm) { + frm.add_custom_button( + __('Build Image'), + () => { + frm.call('build_image').then((r) => frm.refresh()); + }, + __('Actions'), + ); + }, +}); diff --git a/jcloud/jcloud/pagetype/ssh_certificate_authority/ssh_certificate_authority.json b/jcloud/jcloud/pagetype/ssh_certificate_authority/ssh_certificate_authority.json new file mode 100644 index 0000000..21598ec --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_certificate_authority/ssh_certificate_authority.json @@ -0,0 +1,87 @@ +{ + "actions": [], + "autoname": "Prompt", + "creation": "2022-01-12 12:54:04.864137", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "directory", + "public_key", + "key_fingerprint", + "section_break_4", + "docker_image", + "docker_image_repository", + "docker_image_tag" + ], + "fields": [ + { + "fieldname": "directory", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Directory", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "public_key", + "fieldtype": "Code", + "label": "Public Key", + "read_only": 1 + }, + { + "fieldname": "key_fingerprint", + "fieldtype": "Code", + "label": "Key Fingerprint", + "read_only": 1 + }, + { + "fieldname": "section_break_4", + "fieldtype": "Section Break" + }, + { + "fieldname": "docker_image", + "fieldtype": "Data", + "label": "Docker Image", + "read_only": 1 + }, + { + "fieldname": "docker_image_repository", + "fieldtype": "Data", + "label": "Docker Image Repository", + "read_only": 1 + }, + { + "fieldname": "docker_image_tag", + "fieldtype": "Int", + "label": "Docker Image Tag", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-01-16 13:05:32.106981", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "SSH Certificate Authority", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/ssh_certificate_authority/ssh_certificate_authority.py b/jcloud/jcloud/pagetype/ssh_certificate_authority/ssh_certificate_authority.py new file mode 100644 index 0000000..d8dd32f --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_certificate_authority/ssh_certificate_authority.py @@ -0,0 +1,191 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import os +import shlex +import shutil +import subprocess + +import docker +import jingrow +from jingrow.model.document import Document +from jingrow.utils import cint + +from jcloud.utils import log_error + + +class SSHCertificateAuthority(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + directory: DF.Data + docker_image: DF.Data | None + docker_image_repository: DF.Data | None + docker_image_tag: DF.Int + key_fingerprint: DF.Code | None + public_key: DF.Code | None + # end: auto-generated types + + def after_insert(self): + self.setup_directory() + self.generate_key_pair() + self.extract_public_key() + self.extract_key_fingerprint() + self.save() + + def setup_directory(self): + if not os.path.exists(self.directory): + os.mkdir(self.directory) + + def run(self, command, directory, environment=None): + try: + return subprocess.check_output( + shlex.split(command), cwd=directory, env=environment, stderr=subprocess.STDOUT + ).decode() + except subprocess.CalledProcessError as e: + log_error("Command failed", output={e.output.decode()}, pg=self) + raise + + def generate_key_pair(self): + if not os.path.exists(self.private_key_file) and not os.path.exists( + self.public_key_file + ): + domain = jingrow.db.get_value("Jcloud Settings", None, "domain") + self.run( + f"ssh-keygen -C ca@{domain} -t rsa -b 4096 -f ca -N ''", directory=self.directory + ) + + os.chmod(self.public_key_file, 0o400) + os.chmod(self.private_key_file, 0o400) + + def extract_public_key(self): + with open(self.public_key_file) as f: + self.public_key = f.read() + + def extract_key_fingerprint(self): + self.key_fingerprint = self.run("ssh-keygen -l -f ca.pub", directory=self.directory) + + def on_trash(self): + if os.path.exists(self.directory): + shutil.rmtree(self.directory) + + def sign( + self, identity, principals, duration, public_key_path, serial_number, host_key=False + ): + if principals is None: + principals = [] + + host_flag = "-h " if host_key else "" + principals_argument = f"-n {','.join(principals)} " if principals else "" + self.run( + f"ssh-keygen -s ca -t rsa-sha2-512 -I {identity} {host_flag}" + f" {principals_argument} -z {serial_number} -O no-port-forwarding -O" + " no-user-rc -O no-x11-forwarding -O no-agent-forwarding -O permit-pty -V" + f" {duration} {public_key_path}", + directory=self.directory, + ) + + @property + def private_key_file(self): + return os.path.join(self.directory, "ca") + + @property + def public_key_file(self): + return os.path.join(self.directory, "ca.pub") + + @property + def build_directory(self): + return os.path.join(self.directory, "build") + + @jingrow.whitelist() + def build_image(self): + jingrow.enqueue_pg(self.pagetype, self.name, "_build_image", timeout=2400) + + def _build_image(self): + self._prepare_build_directory() + self._prepare_build_context() + self._run_docker_build() + self._push_docker_image() + + def _prepare_build_directory(self): + if os.path.exists(self.build_directory): + shutil.rmtree(self.build_directory) + os.mkdir(self.build_directory) + + def _prepare_build_context(self): + for target in ["sshd_config", "Dockerfile"]: + shutil.copy( + os.path.join(jingrow.get_app_path("jcloud", "docker", "ssh_proxy"), target), + self.build_directory, + ) + + public_key_file = os.path.join(self.build_directory, "ca.pub") + with open(public_key_file, "w") as f: + f.write(self.public_key) + + known_hosts_file = os.path.join(self.build_directory, "known_hosts") + with open(known_hosts_file, "w") as f: + f.write(f"@cert-authority * {self.public_key}") + + self.run( + "ssh-keygen -t rsa -b 4096 -N '' -f ssh_host_rsa_key", directory=self.build_directory + ) + + host_key_path = os.path.join(self.build_directory, "ssh_host_rsa_key.pub") + + domain = jingrow.db.get_value("Jcloud Settings", None, "domain") + self.sign( + domain, None, "+52w", host_key_path, cint(self.docker_image_tag) + 1, host_key=True + ) + + def _run_docker_build(self): + environment = os.environ + environment.update( + {"DOCKER_BUILDKIT": "1", "BUILDKIT_PROGRESS": "plain", "PROGRESS_NO_TRUNC": "1"} + ) + + settings = jingrow.db.get_value( + "Jcloud Settings", + None, + ["domain", "docker_registry_url", "docker_registry_namespace"], + as_dict=True, + ) + + if settings.docker_registry_namespace: + namespace = f"{settings.docker_registry_namespace}/{settings.domain}" + else: + namespace = f"{settings.domain}" + + self.docker_image_repository = f"{settings.docker_registry_url}/{namespace}/ssh" + + self.docker_image_tag = cint(self.docker_image_tag) + 1 + self.docker_image = f"{self.docker_image_repository}:{self.docker_image_tag}" + self.save() + jingrow.db.commit() + + self.run(f"docker build -t {self.docker_image} .", self.build_directory, environment) + + def _push_docker_image(self): + settings = jingrow.db.get_value( + "Jcloud Settings", + None, + ["docker_registry_url", "docker_registry_username", "docker_registry_password"], + as_dict=True, + ) + + client = docker.from_env() + client.login( + registry=settings.docker_registry_url, + username=settings.docker_registry_username, + password=settings.docker_registry_password, + ) + + for line in client.images.push( + self.docker_image_repository, self.docker_image_tag, stream=True, decode=True + ): + print(line) diff --git a/jcloud/jcloud/pagetype/ssh_certificate_authority/test_ssh_certificate_authority.py b/jcloud/jcloud/pagetype/ssh_certificate_authority/test_ssh_certificate_authority.py new file mode 100644 index 0000000..54a0e9f --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_certificate_authority/test_ssh_certificate_authority.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestSSHCertificateAuthority(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/ssh_key/__init__.py b/jcloud/jcloud/pagetype/ssh_key/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/ssh_key/ssh_key.js b/jcloud/jcloud/pagetype/ssh_key/ssh_key.js new file mode 100644 index 0000000..f162dd0 --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_key/ssh_key.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('SSH Key', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/ssh_key/ssh_key.json b/jcloud/jcloud/pagetype/ssh_key/ssh_key.json new file mode 100644 index 0000000..65e0a7e --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_key/ssh_key.json @@ -0,0 +1,62 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "Prompt", + "creation": "2021-09-08 12:43:09.290608", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "enabled", + "default", + "public_key" + ], + "fields": [ + { + "fieldname": "public_key", + "fieldtype": "Text", + "in_list_view": 1, + "label": "Public Key", + "reqd": 1, + "set_only_once": 1 + }, + { + "default": "1", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "default": "0", + "fieldname": "default", + "fieldtype": "Check", + "label": "Default" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-02-05 15:49:59.460166", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "SSH Key", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/ssh_key/ssh_key.py b/jcloud/jcloud/pagetype/ssh_key/ssh_key.py new file mode 100644 index 0000000..e589d6f --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_key/ssh_key.py @@ -0,0 +1,22 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class SSHKey(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + default: DF.Check + enabled: DF.Check + public_key: DF.Text + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/ssh_key/test_ssh_key.py b/jcloud/jcloud/pagetype/ssh_key/test_ssh_key.py new file mode 100644 index 0000000..e9e9913 --- /dev/null +++ b/jcloud/jcloud/pagetype/ssh_key/test_ssh_key.py @@ -0,0 +1,22 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +import unittest + +import jingrow + +from jcloud.jcloud.pagetype.ssh_key.ssh_key import SSHKey + + +def create_test_ssh_key() -> SSHKey: + return jingrow.get_pg( + { + "pagetype": "SSH Key", + "name": "Test SSH Key", + "public_key": "ssh-rsa AAAAB3", + } + ).insert(ignore_if_duplicate=True) + + +class TestSSHKey(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/storage_integration_bucket/__init__.py b/jcloud/jcloud/pagetype/storage_integration_bucket/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/storage_integration_bucket/storage_integration_bucket.js b/jcloud/jcloud/pagetype/storage_integration_bucket/storage_integration_bucket.js new file mode 100644 index 0000000..0fbf6a8 --- /dev/null +++ b/jcloud/jcloud/pagetype/storage_integration_bucket/storage_integration_bucket.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Storage Integration Bucket', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/storage_integration_bucket/storage_integration_bucket.json b/jcloud/jcloud/pagetype/storage_integration_bucket/storage_integration_bucket.json new file mode 100644 index 0000000..63cafb9 --- /dev/null +++ b/jcloud/jcloud/pagetype/storage_integration_bucket/storage_integration_bucket.json @@ -0,0 +1,66 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "field:minio_server_on", + "creation": "2022-02-14 21:03:35.091019", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "minio_server_on", + "minio_host_ip", + "bucket_name", + "region" + ], + "fields": [ + { + "fieldname": "bucket_name", + "fieldtype": "Data", + "label": "Bucket name" + }, + { + "fieldname": "region", + "fieldtype": "Data", + "label": "Region" + }, + { + "fieldname": "minio_server_on", + "fieldtype": "Link", + "label": "Minio Server on", + "options": "Proxy Server", + "unique": 1 + }, + { + "fetch_from": "minio_server_on.private_ip", + "fieldname": "minio_host_ip", + "fieldtype": "Data", + "label": "Minio Host IP", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-02-21 21:36:04.024443", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Storage Integration Bucket", + "naming_rule": "By fieldname", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/storage_integration_bucket/storage_integration_bucket.py b/jcloud/jcloud/pagetype/storage_integration_bucket/storage_integration_bucket.py new file mode 100644 index 0000000..a752fe4 --- /dev/null +++ b/jcloud/jcloud/pagetype/storage_integration_bucket/storage_integration_bucket.py @@ -0,0 +1,23 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class StorageIntegrationBucket(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + bucket_name: DF.Data | None + minio_host_ip: DF.Data | None + minio_server_on: DF.Link | None + region: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/storage_integration_bucket/test_storage_integration_bucket.py b/jcloud/jcloud/pagetype/storage_integration_bucket/test_storage_integration_bucket.py new file mode 100644 index 0000000..dc542bb --- /dev/null +++ b/jcloud/jcloud/pagetype/storage_integration_bucket/test_storage_integration_bucket.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestStorageIntegrationBucket(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/storage_integration_subscription/__init__.py b/jcloud/jcloud/pagetype/storage_integration_subscription/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/storage_integration_subscription/storage_integration_subscription.js b/jcloud/jcloud/pagetype/storage_integration_subscription/storage_integration_subscription.js new file mode 100644 index 0000000..508118d --- /dev/null +++ b/jcloud/jcloud/pagetype/storage_integration_subscription/storage_integration_subscription.js @@ -0,0 +1,18 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Storage Integration Subscription', { + // refresh: function(frm) { + + // } + enabled(frm, cdt, cdn) { + jingrow.call({ + method: + 'jcloud.jcloud.pagetype.storage_integration_subscription.storage_integration_subscription.toggle_user_status', + args: { + docname: frm.docname, + status: frm.fields_dict.enabled.value, + }, + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/storage_integration_subscription/storage_integration_subscription.json b/jcloud/jcloud/pagetype/storage_integration_subscription/storage_integration_subscription.json new file mode 100644 index 0000000..6526550 --- /dev/null +++ b/jcloud/jcloud/pagetype/storage_integration_subscription/storage_integration_subscription.json @@ -0,0 +1,120 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-03-06 20:52:37.142455", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "plan_section", + "enabled", + "site", + "team", + "limit", + "usage", + "s3_and_minio_column", + "minio_server_on", + "access_key", + "secret_key", + "policy_name", + "policy_json" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "label": "site", + "options": "Site" + }, + { + "fieldname": "limit", + "fieldtype": "Data", + "label": "Limit" + }, + { + "fieldname": "access_key", + "fieldtype": "Data", + "label": "Access Key", + "read_only": 1, + "unique": 1 + }, + { + "fieldname": "secret_key", + "fieldtype": "Data", + "label": "Secret Key", + "read_only": 1 + }, + { + "fieldname": "policy_name", + "fieldtype": "Data", + "label": "Policy Name", + "read_only": 1 + }, + { + "fieldname": "policy_json", + "fieldtype": "Code", + "label": "Policy Json", + "options": "JSON", + "read_only": 1 + }, + { + "default": "1", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "minio_server_on", + "fieldtype": "Data", + "label": "Minio Server on" + }, + { + "fieldname": "usage", + "fieldtype": "Data", + "label": "Usage", + "read_only": 1 + }, + { + "fieldname": "s3_and_minio_column", + "fieldtype": "Section Break", + "label": "S3 and Minio" + }, + { + "fieldname": "plan_section", + "fieldtype": "Section Break", + "label": "Plan" + }, + { + "fetch_from": "site.team", + "fieldname": "team", + "fieldtype": "Data", + "label": "Team", + "options": "Email", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-03-15 09:41:07.473261", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Storage Integration Subscription", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/storage_integration_subscription/storage_integration_subscription.py b/jcloud/jcloud/pagetype/storage_integration_subscription/storage_integration_subscription.py new file mode 100644 index 0000000..0db17b7 --- /dev/null +++ b/jcloud/jcloud/pagetype/storage_integration_subscription/storage_integration_subscription.py @@ -0,0 +1,222 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import json +import math + +import boto3 +import jingrow +from jingrow.model.document import Document +from jingrow.utils.password import get_decrypted_password + +from jcloud.agent import Agent + + +class StorageIntegrationSubscription(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + access_key: DF.Data | None + enabled: DF.Check + limit: DF.Data | None + minio_server_on: DF.Data | None + policy_json: DF.Code | None + policy_name: DF.Data | None + secret_key: DF.Data | None + site: DF.Link | None + team: DF.Data | None + usage: DF.Data | None + # end: auto-generated types + + SERVER_TYPE = "Proxy Server" + + def after_insert(self): + self.create_user() + + def validate(self): + self.set_minio_server_on() + self.set_access_key_and_policy_name() + self.set_secret_key() + self.set_policy_json() + + def set_access_key_and_policy_name(self): + # site.jingrow.cloud -> site_jingrow_cloud + self.access_key = self.name + self.policy_name = self.access_key + "_policy" + + def set_secret_key(self): + self.secret_key = jingrow.generate_hash(length=40) + + def set_policy_json(self): + bucket_name = jingrow.db.get_value( + "Storage Integration Bucket", self.minio_server_on, "bucket_name" + ) + data = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": ["s3:GetObject", "s3:PutObject", "s3:DeleteObject"], + "Resource": f"arn:aws:s3:::{bucket_name}/{self.site}/*", + } + ], + } + self.policy_json = json.dumps(data, indent=4) + + def set_minio_server_on(self): + server = jingrow.db.get_value("Site", self.site, "server") + self.minio_server_on = jingrow.db.get_value("Server", server, "proxy_server") + + def create_user(self): + agent = Agent(server_type=self.SERVER_TYPE, server=self.minio_server_on) + data = { + "access_key": self.access_key, + "secret_key": self.secret_key, + "policy_name": self.policy_name, + "policy_json": self.policy_json, + } + + return agent.create_agent_job( + "Create Minio User", + "minio/users", + method="POST", + data=data, + ) + + def toggle_user(self, action): + """ + param op_type: type of operation 'enable' or 'disable' + """ + agent = Agent(server_type=self.SERVER_TYPE, server=self.minio_server_on) + + return agent.create_agent_job( + f"{action.capitalize()} Minio User", + path=f"/minio/users/{self.access_key}/toggle/{action}", + method="POST", + ) + + def remove_user(self): + agent = Agent(server_type=self.SERVER_TYPE, server=self.minio_server_on) + + return agent.create_agent_job( + "Remove Minio User", + method="DELETE", + path=f"minio/users/{self.access_key}", + ) + + +def create_after_insert(pg, method): + if not pg.site: + return + + if jingrow.db.get_value("Site", pg.site, "standby_for"): + return + + if pg.app == "storage_integration": + sub_exists = jingrow.db.exists( + {"pagetype": "Storage Integration Subscription", "site": pg.site} + ) + if sub_exists: + return + + jingrow.get_pg( + {"pagetype": "Storage Integration Subscription", "site": pg.site} + ).insert(ignore_permissions=True) + + if pg.app == "email_delivery_service": + # TODO: add a separate pagetype to track email service setup completion + from jcloud.api.email import setup + + setup(pg.site) + + +size_name = ("B", "KB", "MB", "GB", "TB", "PB") + + +def monitor_storage(): + active_subs = jingrow.get_all( + "Storage Integration Subscription", fields=["site", "name"], filters={"enabled": 1} + ) + access_key = jingrow.db.get_value("Add On Settings", None, "aws_access_key") + secret_key = get_decrypted_password( + "Add On Settings", "Add On Settings", "aws_secret_key" + ) + + for sub in active_subs: + usage, unit_u = get_size("bucket_name", sub["site"], access_key, secret_key) + # not used yet + if usage == 0: + break + + pg = jingrow.get_pg("Storage Integration Subscription", sub["name"]) + limit, unit_l = pg.limit.split(" ") + + # TODO: Add size_name index change when there are very higher plans + if unit_u == unit_l and usage >= int(limit): + # send emails maybe? + pg.toggle_user("disable") + pg.enabled = 0 + else: + pg.usage = f"{usage} {unit_u}" + + pg.save() + jingrow.db.commit() + + +def get_size(bucket, path, access_key, secret_key): + s3 = boto3.resource( + "s3", aws_access_key_id=access_key, aws_secret_access_key=secret_key + ) + my_bucket = s3.Bucket(bucket) + total_size = 0 + + for obj in my_bucket.objects.filter(Prefix=path): + total_size = total_size + obj.size + + return convert_size(total_size) + + +def convert_size(size_bytes): + if size_bytes == 0: + return 0, "B" + i = int(math.floor(math.log(size_bytes, 1024))) + p = math.pow(1024, i) + s = round(size_bytes / p, 2) + return s, size_name[i] + + +@jingrow.whitelist() +def toggle_user_status(docname, status): + pg = jingrow.get_pg("Storage Integration Subscription", docname) + status = int(status) + + if status == 0: + pg.toggle_user("disable") + elif status == 1: + pg.toggle_user("enable") + + jingrow.db.commit() + + +@jingrow.whitelist(allow_guest=True) +def get_analytics(**data): + from jcloud.api.developer.marketplace import get_subscription_status + + if get_subscription_status(data["secret_key"]) != "Active": + return + + site, available = jingrow.db.get_value( + "Storage Integration Subscription", data["access_key"], ["site", "limit"] + ) + access_key = jingrow.db.get_value("Add On Settings", None, "aws_access_key") + secret_key = get_decrypted_password( + "Add On Settings", "Add On Settings", "aws_secret_key" + ) + used, unit_u = get_size(data["bucket"], site, access_key, secret_key) + + return {"used": f"{used} {unit_u}", "available": available} diff --git a/jcloud/jcloud/pagetype/storage_integration_subscription/test_storage_integration_subscription.py b/jcloud/jcloud/pagetype/storage_integration_subscription/test_storage_integration_subscription.py new file mode 100644 index 0000000..22d1afa --- /dev/null +++ b/jcloud/jcloud/pagetype/storage_integration_subscription/test_storage_integration_subscription.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestStorageIntegrationSubscription(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/stripe_micro_charge_record/__init__.py b/jcloud/jcloud/pagetype/stripe_micro_charge_record/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/stripe_micro_charge_record/stripe_micro_charge_record.js b/jcloud/jcloud/pagetype/stripe_micro_charge_record/stripe_micro_charge_record.js new file mode 100644 index 0000000..00da48f --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_micro_charge_record/stripe_micro_charge_record.js @@ -0,0 +1,28 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Stripe Micro Charge Record', { + refresh: function (frm) { + if (!frm.pg.has_been_refunded) { + const btn = frm.add_custom_button('Refund', () => { + frm + .call({ + pg: frm.pg, + method: 'refund', + btn, + }) + .then((r) => { + if (r.message) { + jingrow.msgprint(`Refunded Successfully.`); + } + frm.refresh(); + }); + }); + } + + frm.add_web_link( + `https://dashboard.stripe.com/payments/${frm.pg.stripe_payment_intent_id}`, + 'View in Stripe Dashboard', + ); + }, +}); diff --git a/jcloud/jcloud/pagetype/stripe_micro_charge_record/stripe_micro_charge_record.json b/jcloud/jcloud/pagetype/stripe_micro_charge_record/stripe_micro_charge_record.json new file mode 100644 index 0000000..a1fee0f --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_micro_charge_record/stripe_micro_charge_record.json @@ -0,0 +1,79 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-10-14 19:22:39.833212", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "stripe_payment_method", + "column_break_3", + "has_been_refunded", + "stripe_payment_intent_id" + ], + "fields": [ + { + "fieldname": "stripe_payment_method", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Stripe Payment Method", + "options": "Stripe Payment Method", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "has_been_refunded", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Refunded?", + "read_only": 1 + }, + { + "fetch_from": "stripe_payment_method.team", + "fetch_if_empty": 1, + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "stripe_payment_intent_id", + "fieldtype": "Data", + "label": "Stripe Payment Intent ID", + "read_only": 1, + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-11-07 11:39:59.912308", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Stripe Micro Charge Record", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "team" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/stripe_micro_charge_record/stripe_micro_charge_record.py b/jcloud/jcloud/pagetype/stripe_micro_charge_record/stripe_micro_charge_record.py new file mode 100644 index 0000000..8aa0eb5 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_micro_charge_record/stripe_micro_charge_record.py @@ -0,0 +1,36 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + +from jcloud.utils.billing import get_stripe + + +class StripeMicroChargeRecord(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + has_been_refunded: DF.Check + stripe_payment_intent_id: DF.Data + stripe_payment_method: DF.Link | None + team: DF.Link | None + # end: auto-generated types + + def after_insert(self): + # Auto-refund + self.refund() + + @jingrow.whitelist() + def refund(self): + stripe = get_stripe() + refund = stripe.Refund.create(payment_intent=self.stripe_payment_intent_id) + + if refund.status == "succeeded": + self.has_been_refunded = True + self.save() diff --git a/jcloud/jcloud/pagetype/stripe_micro_charge_record/test_stripe_micro_charge_record.py b/jcloud/jcloud/pagetype/stripe_micro_charge_record/test_stripe_micro_charge_record.py new file mode 100644 index 0000000..f7db7d0 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_micro_charge_record/test_stripe_micro_charge_record.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestStripeMicroChargeRecord(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/stripe_payment_event/__init__.py b/jcloud/jcloud/pagetype/stripe_payment_event/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/stripe_payment_event/stripe_payment_event.js b/jcloud/jcloud/pagetype/stripe_payment_event/stripe_payment_event.js new file mode 100644 index 0000000..09d3d57 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_payment_event/stripe_payment_event.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Stripe Payment Event', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/stripe_payment_event/stripe_payment_event.json b/jcloud/jcloud/pagetype/stripe_payment_event/stripe_payment_event.json new file mode 100644 index 0000000..a84e3fc --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_payment_event/stripe_payment_event.json @@ -0,0 +1,84 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2021-10-31 12:05:38.633463", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "invoice", + "team", + "event_type", + "payment_status", + "stripe_invoice_object", + "stripe_invoice_id" + ], + "fields": [ + { + "fieldname": "invoice", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Invoice", + "options": "Invoice" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "fieldname": "event_type", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Event Type", + "options": "Finalized\nFailed\nSucceeded", + "reqd": 1 + }, + { + "fieldname": "payment_status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Payment Status", + "options": "Paid\nUnpaid" + }, + { + "fieldname": "stripe_invoice_object", + "fieldtype": "Code", + "label": "Stripe Invoice Object", + "read_only": 1 + }, + { + "fieldname": "stripe_invoice_id", + "fieldtype": "Data", + "label": "Stripe Invoice Id" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2021-10-31 15:29:13.658976", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Stripe Payment Event", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "invoice", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/stripe_payment_event/stripe_payment_event.py b/jcloud/jcloud/pagetype/stripe_payment_event/stripe_payment_event.py new file mode 100644 index 0000000..9f8932e --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_payment_event/stripe_payment_event.py @@ -0,0 +1,144 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +from datetime import datetime + +import jingrow +from jingrow.model.document import Document + +from jcloud.utils.billing import convert_stripe_money +from jcloud.api.billing import get_stripe + + +class StripePaymentEvent(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + event_type: DF.Literal["Finalized", "Failed", "Succeeded"] + invoice: DF.Link | None + payment_status: DF.Literal["Paid", "Unpaid"] + stripe_invoice_id: DF.Data | None + stripe_invoice_object: DF.Code | None + team: DF.Link | None + # end: auto-generated types + + def after_insert(self): + if self.event_type == "Finalized": + self.handle_finalized() + elif self.event_type == "Succeeded": + self.handle_payment_succeeded() + elif self.event_type == "Failed": + self.handle_payment_failed() + + def handle_finalized(self): + invoice = jingrow.get_pg("Invoice", self.invoice, for_update=True) + if invoice.status == "Paid": + return + stripe_invoice = jingrow.parse_json(self.stripe_invoice_object) + + invoice.update( + { + "amount_paid": convert_stripe_money(stripe_invoice["amount_paid"]), + "stripe_invoice_url": stripe_invoice["hosted_invoice_url"], + "status": self.payment_status, + } + ) + invoice.save() + + def handle_payment_succeeded(self): + invoice = jingrow.get_pg("Invoice", self.invoice, for_update=True) + + if invoice.status == "Paid" and invoice.amount_paid == 0: + # check if invoice is already refunded + stripe = get_stripe() + inv = stripe.Invoice.retrieve(invoice.stripe_invoice_id) + payment_intent = stripe.PaymentIntent.retrieve(inv.payment_intent) + is_refunded = payment_intent["charges"]["data"][0]["refunded"] + if is_refunded: + return + # if the fc invoice is already paid via credits and the stripe payment succeeded + # issue a refund of the invoice payment + invoice.refund(reason="Payment done via credits") + invoice.add_comment( + text=( + f"Stripe Invoice {invoice.stripe_invoice_id} refunded because" + " payment is done via credits and card both." + ) + ) + return + stripe_invoice = jingrow.parse_json(self.stripe_invoice_object) + team = jingrow.get_pg("Team", self.team) + + invoice.update( + { + "payment_date": datetime.fromtimestamp(stripe_invoice["status_transitions"]["paid_at"]), + "status": "Paid", + "amount_paid": stripe_invoice["amount_paid"] / 100, + "stripe_invoice_url": stripe_invoice["hosted_invoice_url"], + } + ) + invoice.save() + invoice.reload() + + # update transaction amount, fee and exchange rate + if stripe_invoice.get("charge"): + invoice.update_transaction_details(stripe_invoice.get("charge")) + + invoice.submit() + + if ( + jingrow.db.count( + "Invoice", + { + "team": team.name, + "status": "Unpaid", + "type": "Subscription", + "docstatus": ("<", 2), + }, + ) + == 0 + ): + # unsuspend sites only if all invoices are paid + team.unsuspend_sites(reason=f"Unsuspending sites because of successful payment of {self.invoice}") + + def handle_payment_failed(self): + invoice = jingrow.get_pg("Invoice", self.invoice, for_update=True) + + if invoice.status == "Paid": + if invoice.amount_paid == 0: + # check if invoice is already voided + stripe = get_stripe() + inv = stripe.Invoice.retrieve(invoice.stripe_invoice_id) + if inv.status == "void": + return + # if the fc invoice is already paid via credits and the stripe payment failed + # mark the stripe invoice as void + invoice.change_stripe_invoice_status("Void") + invoice.add_comment( + text=( + f"Stripe Invoice {invoice.stripe_invoice_id} voided because" + " payment is done via credits." + ) + ) + return + + stripe_invoice = jingrow.parse_json(self.stripe_invoice_object) + + attempt_date = stripe_invoice.get("webhooks_delivered_at") + if attempt_date: + attempt_date = datetime.fromtimestamp(attempt_date) + attempt_count = stripe_invoice.get("attempt_count") + invoice.update( + { + "payment_attempt_count": attempt_count, + "payment_attempt_date": attempt_date, + "status": "Unpaid", + } + ) + invoice.save() diff --git a/jcloud/jcloud/pagetype/stripe_payment_event/test_stripe_payment_event.py b/jcloud/jcloud/pagetype/stripe_payment_event/test_stripe_payment_event.py new file mode 100644 index 0000000..5b255d7 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_payment_event/test_stripe_payment_event.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestStripePaymentEvent(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/stripe_payment_method/__init__.py b/jcloud/jcloud/pagetype/stripe_payment_method/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/stripe_payment_method/stripe_payment_method.js b/jcloud/jcloud/pagetype/stripe_payment_method/stripe_payment_method.js new file mode 100644 index 0000000..44083f2 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_payment_method/stripe_payment_method.js @@ -0,0 +1,13 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Stripe Payment Method', { + refresh: function (frm) { + jingrow.dynamic_link = { + pg: frm.pg, + fieldname: 'name', + pagetype: 'Stripe Payment Method', + }; + jingrow.contacts.render_address_and_contact(frm); + }, +}); diff --git a/jcloud/jcloud/pagetype/stripe_payment_method/stripe_payment_method.json b/jcloud/jcloud/pagetype/stripe_payment_method/stripe_payment_method.json new file mode 100644 index 0000000..c954215 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_payment_method/stripe_payment_method.json @@ -0,0 +1,148 @@ +{ + "actions": [], + "autoname": "format:PM{#####}", + "creation": "2020-04-10 12:24:49.998168", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "name_on_card", + "last_4", + "expiry_month", + "expiry_year", + "brand", + "stripe_customer_id", + "stripe_payment_method_id", + "is_default", + "column_break_9", + "address_html", + "is_verified_with_micro_charge", + "stripe_setup_intent_id", + "stripe_mandate_id", + "stripe_mandate_reference" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "name_on_card", + "fieldtype": "Data", + "label": "Name on Card" + }, + { + "fieldname": "last_4", + "fieldtype": "Data", + "label": "Last 4 Numbers", + "read_only": 1 + }, + { + "fieldname": "expiry_month", + "fieldtype": "Data", + "label": "Expiry Month", + "read_only": 1 + }, + { + "fieldname": "expiry_year", + "fieldtype": "Data", + "label": "Expiry Year", + "read_only": 1 + }, + { + "fieldname": "stripe_payment_method_id", + "fieldtype": "Data", + "label": "Stripe Payment Method ID", + "read_only": 1 + }, + { + "fetch_from": "team.stripe_customer_id", + "fieldname": "stripe_customer_id", + "fieldtype": "Data", + "label": "Stripe Customer ID" + }, + { + "default": "0", + "fieldname": "is_default", + "fieldtype": "Check", + "label": "Is Default" + }, + { + "fieldname": "column_break_9", + "fieldtype": "Column Break" + }, + { + "fieldname": "address_html", + "fieldtype": "HTML", + "label": "Address HTML" + }, + { + "default": "0", + "fieldname": "is_verified_with_micro_charge", + "fieldtype": "Check", + "in_list_view": 1, + "label": "Verified with Micro Charge", + "read_only": 1 + }, + { + "fieldname": "brand", + "fieldtype": "Data", + "label": "Card Brand" + }, + { + "fieldname": "stripe_mandate_id", + "fieldtype": "Data", + "label": "Stripe Mandate ID" + }, + { + "fieldname": "stripe_setup_intent_id", + "fieldtype": "Data", + "label": "Stripe Setup Intent ID" + }, + { + "fieldname": "stripe_mandate_reference", + "fieldtype": "Data", + "label": "Stripe Mandate Reference" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-06-26 10:29:16.449579", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Stripe Payment Method", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + } + ], + "quick_entry": 1, + "search_fields": "team, name_on_card, last_4, expiry_month, expiry_year", + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/stripe_payment_method/stripe_payment_method.py b/jcloud/jcloud/pagetype/stripe_payment_method/stripe_payment_method.py new file mode 100644 index 0000000..e66e426 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_payment_method/stripe_payment_method.py @@ -0,0 +1,166 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.contacts.address_and_contact import load_address_and_contact +from jingrow.model.document import Document + +from jcloud.api.billing import get_stripe +from jcloud.api.client import dashboard_whitelist +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.utils import log_error +from jcloud.utils.telemetry import capture + + +class StripePaymentMethod(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + brand: DF.Data | None + expiry_month: DF.Data | None + expiry_year: DF.Data | None + is_default: DF.Check + is_verified_with_micro_charge: DF.Check + last_4: DF.Data | None + name_on_card: DF.Data | None + stripe_customer_id: DF.Data | None + stripe_mandate_id: DF.Data | None + stripe_mandate_reference: DF.Data | None + stripe_payment_method_id: DF.Data | None + stripe_setup_intent_id: DF.Data | None + team: DF.Link + # end: auto-generated types + + dashboard_fields = [ + "is_default", + "expiry_month", + "expiry_year", + "brand", + "name_on_card", + "last_4", + "stripe_mandate_id", + ] + + def onload(self): + load_address_and_contact(self) + + @staticmethod + def get_list_query(query, filters=None, **list_args): + StripeWebhookLog = jingrow.qb.PageType("Stripe Webhook Log") + StripePaymentMethod = jingrow.qb.PageType("Stripe Payment Method") + + query = ( + query.select(StripeWebhookLog.stripe_payment_method) + .left_join(StripeWebhookLog) + .on( + (StripeWebhookLog.stripe_payment_method == StripePaymentMethod.name) + & (StripeWebhookLog.event_type == "payment_intent.payment_failed") + ) + .distinct() + ) + + return query + + @dashboard_whitelist() + def delete(self): + if webhook_logs := jingrow.get_all( + "Stripe Webhook Log", + filters={"stripe_payment_method": self.name}, + pluck="name", + ): + jingrow.db.set_value( + "Stripe Webhook Log", + {"name": ("in", webhook_logs)}, + "stripe_payment_method", + None, + ) + + super().delete() + + @dashboard_whitelist() + def set_default(self): + stripe = get_stripe() + # set default payment method on stripe + stripe.Customer.modify( + self.stripe_customer_id, + invoice_settings={"default_payment_method": self.stripe_payment_method_id}, + ) + jingrow.db.set_value( + "Stripe Payment Method", + {"team": self.team, "name": ("!=", self.name)}, + "is_default", + 0, + ) + self.is_default = 1 + self.save() + jingrow.db.set_value("Team", self.team, "default_payment_method", self.name) + if not jingrow.db.get_value("Team", self.team, "payment_mode"): + jingrow.db.set_value("Team", self.team, "payment_mode", "Card") + account_request_name = jingrow.get_value("Team", self.team, "account_request") + if account_request_name: + account_request = jingrow.get_pg("Account Request", account_request_name) + if not (account_request.is_saas_signup() or account_request.invited_by_parent_team): + capture("added_card_or_prepaid_credits", "fc_signup", account_request.email) + + def on_trash(self): + self.remove_address_links() + self.remove_micro_charge_links() + + if self.is_default: + team = jingrow.get_pg("Team", self.team) + team.default_payment_method = None + team.save() + + def remove_address_links(self): + address_links = jingrow.db.get_all( + "Dynamic Link", + {"link_pagetype": "Stripe Payment Method", "link_name": self.name}, + pluck="parent", + ) + address_links = list(set(address_links)) + for address in address_links: + found = False + pg = jingrow.get_pg("Address", address) + for link in pg.links: + print(link) + if link.link_pagetype == "Stripe Payment Method" and link.link_name == self.name: + found = True + pg.remove(link) + if found: + print(pg) + pg.save() + + def remove_micro_charge_links(self): + jingrow.db.set_value( + "Stripe Micro Charge Record", + {"stripe_payment_method": self.name}, + "stripe_payment_method", + None, + ) + + def after_delete(self): + try: + if self.stripe_payment_method_id: + stripe = get_stripe() + stripe.PaymentMethod.detach(self.stripe_payment_method_id) + except Exception as e: + log_error( + f"无法从Stripe解除绑定支付方式: {str(e)}", + f"支付方式ID: {self.stripe_payment_method_id}" + ) + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype( + "Stripe Payment Method" +) + + +def on_pagetype_update(): + jingrow.db.add_index("Stripe Payment Method", ["team", "is_verified_with_micro_charge"]) diff --git a/jcloud/jcloud/pagetype/stripe_payment_method/test_stripe_payment_method.py b/jcloud/jcloud/pagetype/stripe_payment_method/test_stripe_payment_method.py new file mode 100644 index 0000000..80249ae --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_payment_method/test_stripe_payment_method.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestStripePaymentMethod(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/stripe_webhook_log/__init__.py b/jcloud/jcloud/pagetype/stripe_webhook_log/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/stripe_webhook_log/patches/add_payment_method_for_failed_events.py b/jcloud/jcloud/pagetype/stripe_webhook_log/patches/add_payment_method_for_failed_events.py new file mode 100644 index 0000000..0e50827 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_webhook_log/patches/add_payment_method_for_failed_events.py @@ -0,0 +1,37 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + logs = jingrow.get_all( + "Stripe Webhook Log", + {"event_type": "payment_intent.payment_failed"}, + ["name", "payload", "customer_id"], + ) + + for log in logs: + payload = jingrow.parse_json(log.payload) + if payment_method_id := ( + payload.get("data", {}) + .get("object", {}) + .get("last_payment_error", {}) + .get("payment_method", {}) + .get("id") + ): + stripe_payment_method = jingrow.db.get_value( + "Stripe Payment Method", + { + "stripe_customer_id": log.customer_id, + "stripe_payment_method_id": payment_method_id, + }, + "name", + ) + jingrow.db.set_value( + "Stripe Webhook Log", + log.name, + "stripe_payment_method", + stripe_payment_method, + update_modified=False, + ) diff --git a/jcloud/jcloud/pagetype/stripe_webhook_log/stripe_webhook_log.js b/jcloud/jcloud/pagetype/stripe_webhook_log/stripe_webhook_log.js new file mode 100644 index 0000000..4f11115 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_webhook_log/stripe_webhook_log.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Stripe Webhook Log', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/stripe_webhook_log/stripe_webhook_log.json b/jcloud/jcloud/pagetype/stripe_webhook_log/stripe_webhook_log.json new file mode 100644 index 0000000..b6d876d --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_webhook_log/stripe_webhook_log.json @@ -0,0 +1,105 @@ +{ + "actions": [], + "autoname": "Prompt", + "creation": "2020-03-30 15:42:28.497505", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "event_type", + "invoice", + "team", + "column_break_hywj", + "customer_id", + "invoice_id", + "stripe_payment_method", + "stripe_payment_intent_id", + "section_break_ecbt", + "payload" + ], + "fields": [ + { + "fieldname": "event_type", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Event Type", + "read_only": 1 + }, + { + "fieldname": "payload", + "fieldtype": "Code", + "label": "Payload", + "read_only": 1 + }, + { + "fieldname": "customer_id", + "fieldtype": "Data", + "label": "Stripe Customer ID" + }, + { + "fieldname": "invoice", + "fieldtype": "Link", + "label": "Invoice", + "options": "Invoice", + "search_index": 1 + }, + { + "fieldname": "invoice_id", + "fieldtype": "Data", + "label": "Stripe Invoice ID" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "search_index": 1 + }, + { + "fieldname": "column_break_hywj", + "fieldtype": "Column Break" + }, + { + "description": "This is only shown when a payment intent fails", + "fieldname": "stripe_payment_method", + "fieldtype": "Link", + "label": "Stripe Payment Method", + "options": "Stripe Payment Method", + "read_only": 1 + }, + { + "fieldname": "section_break_ecbt", + "fieldtype": "Section Break" + }, + { + "fieldname": "stripe_payment_intent_id", + "fieldtype": "Data", + "label": "Stripe Payment Intent ID" + } + ], + "links": [], + "modified": "2024-11-29 10:44:55.011202", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Stripe Webhook Log", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/stripe_webhook_log/stripe_webhook_log.py b/jcloud/jcloud/pagetype/stripe_webhook_log/stripe_webhook_log.py new file mode 100644 index 0000000..d7e11c4 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_webhook_log/stripe_webhook_log.py @@ -0,0 +1,151 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import re +from datetime import datetime + +import jingrow +from jingrow.model.document import Document + +import jcloud.utils +from jcloud.api.billing import get_stripe + + +class InvalidStripeWebhookEvent(Exception): + http_status_code = 400 + + +class StripeWebhookLog(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + customer_id: DF.Data | None + event_type: DF.Data | None + invoice: DF.Link | None + invoice_id: DF.Data | None + payload: DF.Code | None + stripe_payment_intent_id: DF.Data | None + stripe_payment_method: DF.Link | None + team: DF.Link | None + # end: auto-generated types + + def before_insert(self): + payload = jingrow.parse_json(self.payload) + self.name = payload.get("id") + self.event_type = payload.get("type") + customer_id = get_customer_id(payload) + invoice_id = get_invoice_id(payload) + self.stripe_payment_intent_id = "" + + if self.event_type in ["payment_intent.succeeded", "payment_intent.failed", "payment_intent.requires_action"]: + self.stripe_payment_intent_id = get_intent_id(payload) + + if customer_id: + self.customer_id = customer_id + self.team = jingrow.db.get_value("Team", {"stripe_customer_id": customer_id}, "name") + + if invoice_id: + self.invoice_id = invoice_id + self.invoice = jingrow.db.get_value("Invoice", {"stripe_invoice_id": invoice_id}, "name") + + if self.event_type == "payment_intent.payment_failed": + payment_method = ( + payload.get("data", {}).get("object", {}).get("last_payment_error", {}).get("payment_method") + ) + if payment_method: + payment_method_id = payment_method.get("id") + + self.stripe_payment_method = jingrow.db.get_value( + "Stripe Payment Method", + {"stripe_customer_id": customer_id, "stripe_payment_method_id": payment_method_id}, + "name", + ) + + if ( + self.event_type == "invoice.payment_failed" + and self.invoice + and payload.get("data", {}).get("object", {}).get("next_payment_attempt") + ): + next_payment_attempt_date = datetime.fromtimestamp( + payload.get("data", {}).get("object", {}).get("next_payment_attempt") + ).strftime("%Y-%m-%d") + jingrow.db.set_value( + "Invoice", + self.invoice, + "next_payment_attempt_date", + jingrow.utils.getdate(next_payment_attempt_date), + ) + + +@jingrow.whitelist(allow_guest=True) +def stripe_webhook_handler(): + current_user = jingrow.session.user + form_dict = jingrow.local.form_dict + try: + payload = jingrow.request.get_data() + signature = jingrow.get_request_header("Stripe-Signature") + # parse payload will verify the request + event = parse_payload(payload, signature) + # set user to Administrator, to not have to do ignore_permissions everywhere + jingrow.set_user("Administrator") + jingrow.get_pg( + pagetype="Stripe Webhook Log", + payload=jingrow.as_json(event), + ).insert(ignore_if_duplicate=True) + except Exception: + jingrow.db.rollback() + jcloud.utils.log_error(title="Stripe Webhook Handler", stripe_event_id=form_dict.id) + jingrow.set_user(current_user) + raise + + +def get_intent_id(form_dict): + try: + form_dict_str = jingrow.as_json(form_dict) + intent_id = re.findall(r"pi_\w+", form_dict_str) + if intent_id: + return intent_id[1] + return None + except Exception: + jingrow.log_error(title="Failed to capture intent id from stripe webhook log") + + +def get_customer_id(form_dict): + try: + form_dict_str = jingrow.as_json(form_dict) + customer_id = re.search(r"cus_\w+", form_dict_str) + if customer_id: + return customer_id.group(0) + return None + except Exception: + jingrow.log_error(title="Failed to capture customer id from stripe webhook log") + + +def get_invoice_id(form_dict): + try: + form_dict_str = jingrow.as_json(form_dict) + invoice_id = re.search(r"in_\w+", form_dict_str) + if invoice_id: + return invoice_id.group(0) + return None + except Exception: + jingrow.log_error(title="Failed to capture invoice id from stripe webhook log") + + +def parse_payload(payload, signature): + secret = jingrow.db.get_single_value("Jcloud Settings", "stripe_webhook_secret") + stripe = get_stripe() + try: + return stripe.Webhook.construct_event(payload, signature, secret) + except ValueError: + # Invalid payload + jingrow.throw("Invalid Payload", InvalidStripeWebhookEvent) + except stripe.error.SignatureVerificationError: + # Invalid signature + jingrow.throw("Invalid Signature", InvalidStripeWebhookEvent) diff --git a/jcloud/jcloud/pagetype/stripe_webhook_log/test_stripe_webhook_log.py b/jcloud/jcloud/pagetype/stripe_webhook_log/test_stripe_webhook_log.py new file mode 100644 index 0000000..3f3a397 --- /dev/null +++ b/jcloud/jcloud/pagetype/stripe_webhook_log/test_stripe_webhook_log.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestStripeWebhookLog(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/subscription/__init__.py b/jcloud/jcloud/pagetype/subscription/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/subscription/subscription.js b/jcloud/jcloud/pagetype/subscription/subscription.js new file mode 100644 index 0000000..989e264 --- /dev/null +++ b/jcloud/jcloud/pagetype/subscription/subscription.js @@ -0,0 +1,10 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Subscription', { + refresh: function (frm) { + frm.add_custom_button('Create Usage Record', () => + frm.call('create_usage_record'), + ); + }, +}); diff --git a/jcloud/jcloud/pagetype/subscription/subscription.json b/jcloud/jcloud/pagetype/subscription/subscription.json new file mode 100644 index 0000000..efdeb6d --- /dev/null +++ b/jcloud/jcloud/pagetype/subscription/subscription.json @@ -0,0 +1,148 @@ +{ + "actions": [], + "autoname": "SUB-.YYYY.-.#####.", + "creation": "2020-10-19 19:01:35.606131", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "enabled", + "team", + "document_type", + "document_name", + "plan_type", + "plan", + "interval", + "site", + "marketplace_app_subscription", + "additional_storage", + "secret_key" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "document_type", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Document Type", + "options": "PageType", + "reqd": 1 + }, + { + "fieldname": "document_name", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Document Name", + "options": "document_type", + "reqd": 1 + }, + { + "default": "Daily", + "fieldname": "interval", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Interval", + "options": "Daily\nMonthly" + }, + { + "default": "1", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled", + "search_index": 1 + }, + { + "fieldname": "plan", + "fieldtype": "Dynamic Link", + "label": "Plan", + "options": "plan_type", + "reqd": 1 + }, + { + "fieldname": "marketplace_app_subscription", + "fieldtype": "Link", + "label": "Marketplace App Subscription", + "options": "Marketplace App Subscription" + }, + { + "allow_in_quick_entry": 1, + "fieldname": "plan_type", + "fieldtype": "Link", + "label": "Plan Type", + "options": "PageType", + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "site", + "fieldtype": "Link", + "label": "Site", + "options": "Site", + "search_index": 1 + }, + { + "depends_on": "eval:pg.plan_type === \"Server Storage Plan\"", + "fieldname": "additional_storage", + "fieldtype": "Data", + "label": "Additional Storage" + }, + { + "fieldname": "secret_key", + "fieldtype": "Data", + "label": "Secret Key" + } + ], + "index_web_pages_for_search": 1, + "links": [ + { + "link_pagetype": "Usage Record", + "link_fieldname": "subscription" + } + ], + "modified": "2024-11-06 19:02:20.960429", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Subscription", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "team", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/subscription/subscription.py b/jcloud/jcloud/pagetype/subscription/subscription.py new file mode 100644 index 0000000..587825f --- /dev/null +++ b/jcloud/jcloud/pagetype/subscription/subscription.py @@ -0,0 +1,356 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import jingrow +import rq +from jingrow.model.document import Document +from jingrow.query_builder.functions import Coalesce, Count +from jingrow.utils import cint, flt + +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.jcloud.pagetype.site_plan.site_plan import SitePlan +from jcloud.utils import log_error +from jcloud.utils.jobs import has_job_timeout_exceeded + + +class Subscription(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + additional_storage: DF.Data | None + document_name: DF.DynamicLink + document_type: DF.Link + enabled: DF.Check + interval: DF.Literal["Daily", "Monthly"] + marketplace_app_subscription: DF.Link | None + plan: DF.DynamicLink + plan_type: DF.Link + secret_key: DF.Data | None + site: DF.Link | None + team: DF.Link + # end: auto-generated types + + dashboard_fields = ( + "site", + "enabled", + "document_type", + "document_name", + "team", + ) + + @staticmethod + def get_list_query(query, **list_args): + Subscription = jingrow.qb.PageType("Subscription") + UsageRecord = jingrow.qb.PageType("Usage Record") + Plan = jingrow.qb.PageType("Marketplace App Plan") + price_field = Plan.price_cny if jingrow.local.team().currency == "CNY" else Plan.price_usd + filters = list_args.get("filters", {}) + + query = ( + jingrow.qb.from_(Subscription) + .join(Plan) + .on(Subscription.plan == Plan.name) + .left_join(UsageRecord) + .on(UsageRecord.subscription == Subscription.name) + .groupby(Subscription.name) + .select( + Subscription.site, + Subscription.enabled, + price_field.as_("price"), + Coalesce(Count(UsageRecord.subscription), 0).as_("active_for"), + ) + .where( + (Subscription.document_type == "Marketplace App") + & (Subscription.document_name == filters["document_name"]) + & (Subscription.site != "") + & (price_field > 0) + ) + .limit(list_args["limit"]) + .offset(list_args["start"]) + ) + + if filters.get("enabled"): + enabled = 1 if filters["enabled"] == "Active" else 0 + query = query.where(Subscription.enabled == enabled) + + return query.run(as_dict=True) + + def before_validate(self): + if not self.secret_key and self.document_type == "Marketplace App": + self.secret_key = jingrow.utils.generate_hash(length=40) + if not jingrow.db.exists("Site Config Key", {"key": f"sk_{self.document_name}"}): + jingrow.get_pg( + pagetype="Site Config Key", internal=True, key=f"sk_{self.document_name}" + ).insert(ignore_permissions=True) + + def validate(self): + self.validate_duplicate() + + def on_update(self): + if self.plan_type == "Server Storage Plan": + return + + pg = self.get_subscribed_document() + plan_field = pg.meta.get_field("plan") + if not (plan_field and plan_field.options in ["Site Plan", "Server Plan", "Marketplace App Plan"]): + return + + if self.enabled and pg.plan != self.plan: + pg.plan = self.plan + pg.save() + if not self.enabled and pg.plan: + pg.plan = "" + pg.save() + + def enable(self): + if self.enabled: + return + try: + self.enabled = True + self.save(ignore_permissions=True) + except Exception: + jingrow.log_error(title="Enable Subscription Error") + + def disable(self): + if not self.enabled: + return + try: + self.enabled = False + self.save(ignore_permissions=True) + except Exception: + jingrow.log_error(title="Disable Subscription Error") + + @jingrow.whitelist() + def create_usage_record(self, date: DF.Date | None = None): + cannot_charge = not self.can_charge_for_subscription() + if cannot_charge: + return None + + if self.is_usage_record_created(date): + return None + + team = jingrow.get_cached_pg("Team", self.team) + + if team.parent_team: + team = jingrow.get_cached_pg("Team", team.parent_team) + + if team.billing_team and team.payment_mode == "Paid By Partner": + team = jingrow.get_cached_pg("Team", team.billing_team) + + if not team.get_upcoming_invoice(): + team.create_upcoming_invoice() + + plan = jingrow.get_cached_pg(self.plan_type, self.plan) + + if self.additional_storage: + price = plan.price_cny if team.currency == "CNY" else plan.price_usd + price_per_day = price / plan.period # no rounding off to avoid discrepancies + amount = flt((price_per_day * cint(self.additional_storage)), 2) + else: + amount = plan.get_price_for_interval(self.interval, team.currency) + + usage_record = jingrow.get_pg( + pagetype="Usage Record", + team=team.name, + document_type=self.document_type, + document_name=self.document_name, + plan_type=self.plan_type, + plan=plan.name, + amount=amount, + date=date, + subscription=self.name, + interval=self.interval, + site=( + self.site + or jingrow.get_value("Marketplace App Subscription", self.marketplace_app_subscription, "site") + ) + if self.document_type == "Marketplace App" + else None, + ) + usage_record.insert() + usage_record.submit() + return usage_record + + def can_charge_for_subscription(self): + pg = self.get_subscribed_document() + if not pg: + return False + + if hasattr(pg, "can_charge_for_subscription"): + return pg.can_charge_for_subscription(self) + + return True + + def is_usage_record_created(self, date=None): + filters = { + "team": self.team, + "document_type": self.document_type, + "document_name": self.document_name, + "subscription": self.name, + "interval": self.interval, + "plan": self.plan, + } + + if self.interval == "Daily": + date = date or jingrow.utils.today() + filters.update({"date": date}) + + if self.interval == "Monthly": + date = jingrow.utils.getdate() + first_day = jingrow.utils.get_first_day(date) + last_day = jingrow.utils.get_last_day(date) + filters.update({"date": ("between", (first_day, last_day))}) + + result = jingrow.db.get_all("Usage Record", filters=filters, limit=1) + return bool(result) + + def validate_duplicate(self): + if not self.is_new(): + return + filters = { + "team": self.team, + "document_type": self.document_type, + "document_name": self.document_name, + "plan_type": self.plan_type, + } + if self.document_type == "Marketplace App": + filters.update({"marketplace_app_subscription": self.marketplace_app_subscription}) + + results = jingrow.db.get_all( + "Subscription", + filters, + pluck="name", + limit=1, + ignore_ifnull=True, + ) + if results: + link = jingrow.utils.get_link_to_form("Subscription", results[0]) + jingrow.throw(f"A Subscription already exists: {link}", jingrow.DuplicateEntryError) + + def get_subscribed_document(self): + if not hasattr(self, "_subscribed_document"): + self._subscribed_document = jingrow.get_pg(self.document_type, self.document_name) + return self._subscribed_document + + @classmethod + def get_sites_without_offsite_backups(cls) -> list[str]: + plans = SitePlan.get_ones_without_offsite_backups() + return jingrow.get_all( + "Subscription", + filters={"document_type": "Site", "plan": ("in", plans)}, + pluck="document_name", + ) + + +def create_usage_records(): + """ + Creates daily usage records for paid Subscriptions + """ + free_sites = sites_with_free_hosting() + settings = jingrow.get_single("Jcloud Settings") + subscriptions = jingrow.db.get_all( + "Subscription", + filters={ + "enabled": True, + "plan": ("in", paid_plans()), + "name": ("not in", created_usage_records(free_sites)), + "document_name": ("not in", free_sites), + }, + pluck="name", + order_by=None, + limit=settings.usage_record_creation_batch_size or 500, + ignore_ifnull=True, + debug=True, + ) + for name in subscriptions: + if has_job_timeout_exceeded(): + return + subscription = jingrow.get_cached_pg("Subscription", name) + try: + subscription.create_usage_record() + jingrow.db.commit() + except rq.timeouts.JobTimeoutException: + # This job took too long to execute + # We need to rollback the transaction + # Try again in the next job + jingrow.db.rollback() + return + except Exception: + jingrow.db.rollback() + log_error(title="Create Usage Record Error", name=name) + + +def paid_plans(): + paid_plans = [] + filter = { + "price_cny": (">", 0), + "enabled": 1, + } + doctypes = [ + "Site Plan", + "Marketplace App Plan", + "Server Plan", + "Server Storage Plan", + "Cluster Plan", + ] + for pagetype in doctypes: + paid_plans += jingrow.get_all(pagetype, filter, pluck="name", ignore_ifnull=True) + + return list(set(paid_plans)) + + +def sites_with_free_hosting(): + # sites marked as free + free_teams = jingrow.get_all("Team", filters={"free_account": True, "enabled": True}, pluck="name") + free_team_sites = jingrow.get_all( + "Site", + {"status": ("not in", ("Archived", "Suspended")), "team": ("in", free_teams)}, + pluck="name", + ignore_ifnull=True, + ) + return free_team_sites + jingrow.get_all( + "Site", + filters={ + "free": True, + "status": ("not in", ("Archived", "Suspended")), + "team": ("not in", free_teams), + }, + pluck="name", + ignore_ifnull=True, + ) + + +def created_usage_records(free_sites, date=None): + date = date or jingrow.utils.today() + """Returns created usage records for a particular date""" + return jingrow.get_all( + "Usage Record", + filters={ + "document_type": ( + "in", + ( + "Site", + "Server", + "Database Server", + "Self Hosted Server", + "Marketplace App", + "Cluster", + ), + ), + "date": date, + "document_name": ("not in", free_sites), + }, + pluck="subscription", + order_by=None, + ignore_ifnull=True, + ) + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Subscription") diff --git a/jcloud/jcloud/pagetype/subscription/test_subscription.py b/jcloud/jcloud/pagetype/subscription/test_subscription.py new file mode 100644 index 0000000..14fda9d --- /dev/null +++ b/jcloud/jcloud/pagetype/subscription/test_subscription.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +import unittest +from unittest.mock import patch + +import jingrow + +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.subscription.subscription import sites_with_free_hosting +from jcloud.jcloud.pagetype.team.test_team import create_test_team + + +def create_test_subscription( + document_name: str, + plan: str, + team: str, + document_type: str = "Site", + plan_type: str = "Site Plan", +): + subscription = jingrow.get_pg( + { + "pagetype": "Subscription", + "document_type": document_type, + "document_name": document_name, + "team": team, + "plan_type": plan_type, + "plan": plan, + "site": document_name if document_type == "Site" else None, + } + ).insert(ignore_if_duplicate=True) + subscription.reload() + return subscription + + +class TestSubscription(unittest.TestCase): + def setUp(self): + self.team = create_test_team() + self.team.allocate_credit_amount(1000, source="Prepaid Credits") + self.team.payment_mode = "Prepaid Credits" + self.team.save() + jingrow.set_user(self.team.user) + + def tearDown(self): + jingrow.set_user("Administrator") + jingrow.db.rollback() + + def test_subscription_daily(self): + todo = jingrow.get_pg(pagetype="ToDo", description="Test todo").insert() + plan = jingrow.get_pg( + pagetype="Site Plan", + name="Plan-10", + document_type="ToDo", + interval="Daily", + price_usd=30, + price_cny=30, + ).insert() + + subscription = jingrow.get_pg( + pagetype="Subscription", + team=self.team.name, + document_type="ToDo", + document_name=todo.name, + plan_type="Site Plan", + plan=plan.name, + ).insert() + + today = jingrow.utils.getdate() + tomorrow = jingrow.utils.add_days(today, 1) + desired_value = plan.get_price_per_day("CNY") * 2 + + is_last_day_of_month = jingrow.utils.data.get_last_day(today) == today + yesterday = jingrow.utils.add_days(today, -1) + + # Consider yesterday's and today's record instead of today and tomorrow + # Became flaky if it was last day of month because + # tomorrow went outside of this month's invoice's period + if is_last_day_of_month: + tomorrow = today + today = yesterday + + with patch.object(jingrow.utils, "today", return_value=today): + subscription.create_usage_record() + # this should not create duplicate record + subscription.create_usage_record() + + # time travel to tomorrow + with patch.object(jingrow.utils, "today", return_value=tomorrow): + subscription.create_usage_record() + + invoice = jingrow.get_pg("Invoice", {"team": self.team.name, "status": "Draft"}) + self.assertEqual(invoice.total, desired_value) + + def test_subscription_for_non_chargeable_document(self): + todo = jingrow.get_pg(pagetype="ToDo", description="Test todo").insert() + plan = jingrow.get_pg( + pagetype="Site Plan", + name="Plan-10", + document_type="ToDo", + interval="Daily", + price_usd=30, + price_cny=30, + ).insert() + + subscription = jingrow.get_pg( + pagetype="Subscription", + team=self.team.name, + document_type="ToDo", + document_name=todo.name, + plan_type="Site Plan", + plan=plan.name, + ).insert() + + def method(subscription): + return False + + # subscription calls this method when checking if it should create a usage record + todo.can_charge_for_subscription = method + + with patch.object(subscription, "get_subscribed_document", return_value=todo): + # shouldn't create a usage record + usage_record = subscription.create_usage_record() + self.assertTrue(usage_record is None) + + def test_site_in_trial(self): + self.team.create_upcoming_invoice() + + two_days_after = jingrow.utils.add_days(None, 2) + site = create_test_site() + site.trial_end_date = two_days_after + site.save() + + plan = jingrow.get_pg( + pagetype="Site Plan", + name="Plan-10", + document_type="Site", + interval="Daily", + price_usd=30, + price_cny=30, + period=30, + ).insert() + + subscription = jingrow.get_pg( + pagetype="Subscription", + team=self.team.name, + document_type="Site", + document_name=site.name, + plan_type="Site Plan", + plan=plan.name, + ).insert() + + today = jingrow.utils.getdate() + tomorrow = jingrow.utils.add_days(today, 1) + + with patch.object(jingrow.utils, "today", return_value=today): + # shouldn't create a usage record as site is in trial + subscription.create_usage_record() + + # time travel to tomorrow + with patch.object(jingrow.utils, "today", return_value=tomorrow): + # shouldn't create a usage record as site is in trial + subscription.create_usage_record() + + invoice = jingrow.get_pg("Invoice", {"team": self.team.name, "status": "Draft"}) + self.assertEqual(invoice.total, 0) + + def test_sites_with_free_hosting(self): + self.team.create_upcoming_invoice() + + site1 = create_test_site(team=self.team.name) + site1.free = 1 + site1.save() + create_test_site(team=self.team.name) + + # test: site marked as free + free_sites = sites_with_free_hosting() + self.assertEqual(len(free_sites), 1) + + self.team.free_account = True + self.team.save() + + # test: site owned by free account + free_sites = sites_with_free_hosting() + self.assertEqual(len(free_sites), 2) diff --git a/jcloud/jcloud/pagetype/team/__init__.py b/jcloud/jcloud/pagetype/team/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/team/patches/remove_invalid_email_addresses.py b/jcloud/jcloud/pagetype/team/patches/remove_invalid_email_addresses.py new file mode 100644 index 0000000..6f12e33 --- /dev/null +++ b/jcloud/jcloud/pagetype/team/patches/remove_invalid_email_addresses.py @@ -0,0 +1,21 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.utils import update_progress_bar, validate_email_address + + +def execute(): + emails = jingrow.get_all( + "Communication Email", + {"parentfield": "communication_emails", "parenttype": "Team", "value": ("is", "set")}, + ["name", "value"], + ) + + total_emails = len(emails) + for index, email in enumerate(emails): + update_progress_bar("Updating emails", index, total_emails) + if not validate_email_address(email.value): + jingrow.db.set_value( + "Communication Email", email.name, "value", "", update_modified=False + ) diff --git a/jcloud/jcloud/pagetype/team/patches/set_partner_email.py b/jcloud/jcloud/pagetype/team/patches/set_partner_email.py new file mode 100644 index 0000000..a0af939 --- /dev/null +++ b/jcloud/jcloud/pagetype/team/patches/set_partner_email.py @@ -0,0 +1,11 @@ +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "team") + jingrow.reload_pg("jcloud", "pagetype", "invoice") + + partners = jingrow.db.get_all("Team", filters={"jerp_partner": True}) + + for partner in partners: + jingrow.db.set_value("Team", partner.name, "partner_email", partner.name) diff --git a/jcloud/jcloud/pagetype/team/patches/set_payment_mode.py b/jcloud/jcloud/pagetype/team/patches/set_payment_mode.py new file mode 100644 index 0000000..fe199a3 --- /dev/null +++ b/jcloud/jcloud/pagetype/team/patches/set_payment_mode.py @@ -0,0 +1,30 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pagetype("Team") + jingrow.db.sql( + """ + UPDATE tabTeam + SET payment_mode = 'Card' + WHERE IFNULL(default_payment_method, '') != '' + """ + ) + + jingrow.db.sql( + """ + UPDATE tabTeam t + LEFT JOIN + `tabBalance Transaction` b on t.name = b.team + AND b.source in ('Prepaid Credits', 'Transferred Credits') + SET + t.payment_mode = 'Prepaid Credits' + WHERE + IFNULL(t.payment_mode, '') = '' + AND b.source in ('Prepaid Credits', 'Transferred Credits') + """ + ) diff --git a/jcloud/jcloud/pagetype/team/patches/set_referrer_id.py b/jcloud/jcloud/pagetype/team/patches/set_referrer_id.py new file mode 100644 index 0000000..1b83080 --- /dev/null +++ b/jcloud/jcloud/pagetype/team/patches/set_referrer_id.py @@ -0,0 +1,17 @@ +import jingrow +from jingrow.utils import update_progress_bar + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "team") + + teams = jingrow.db.get_all( + "Team", filters={"referrer_id": ("is", "not set")}, pluck="name" + ) + + total_teams = len(teams) + for i, team in enumerate(teams): + update_progress_bar("Updating team", i, total_teams) + team = jingrow.get_pg("Team", team) + team.set_referrer_id() + team.db_set("referrer_id", team.referrer_id, update_modified=False) diff --git a/jcloud/jcloud/pagetype/team/patches/set_team_title.py b/jcloud/jcloud/pagetype/team/patches/set_team_title.py new file mode 100644 index 0000000..b17e553 --- /dev/null +++ b/jcloud/jcloud/pagetype/team/patches/set_team_title.py @@ -0,0 +1,14 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + jingrow.reload_pagetype("Team") + jingrow.db.sql( + """ + UPDATE `tabTeam` + SET team_title = user + """ + ) diff --git a/jcloud/jcloud/pagetype/team/suspend_sites.py b/jcloud/jcloud/pagetype/team/suspend_sites.py new file mode 100644 index 0000000..1c45643 --- /dev/null +++ b/jcloud/jcloud/pagetype/team/suspend_sites.py @@ -0,0 +1,109 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +""" +Suspend Sites of defaulter accounts. + +This module deals with suspending sites of defaulters. + +Defaulters are identified based on the following conditions: +- Is not a free account +- Is not a Legacy Partner account with payment mode as Partner Credits +- Has at least one unpaid invoice +- Has an active site + +The `execute` method is the main method which is run by the scheduler on every day of the month. +""" + +import jingrow +from jingrow.utils import add_days, get_first_day, get_last_day, getdate + +from jcloud.utils import log_error + + +def execute(): + today = getdate() + first_day_of_month = get_first_day(today) + nineth_day_of_month = add_days(first_day_of_month, 8) + + if today >= first_day_of_month and today <= nineth_day_of_month: + return + + teams_with_unpaid_invoices = get_teams_with_unpaid_invoices() + + for d in teams_with_unpaid_invoices[:30]: + team = jingrow.get_pg("Team", d.team) + + # suspend sites + suspend_sites_and_send_email(team) + + +def suspend_sites_and_send_email(team): + try: + sites = team.suspend_sites(reason="Unpaid Invoices") + jingrow.db.commit() + except Exception: + log_error( + f"Error while suspending sites for team {team.name}", + traceback=jingrow.get_traceback(), + ) + jingrow.db.rollback() + # send email + if sites: + email = team.user + jingrow.sendmail( + recipients=email, + subject="Your sites have been suspended on Jingrow", + template="suspended_sites", + args={ + "subject": "Your sites have been suspended on Jingrow", + "sites": sites, + }, + ) + + +def get_teams_with_unpaid_invoices(): + """Find out teams which has active sites and unpaid invoices and not a free account""" + today = getdate() + # last day of previous month + last_day = get_last_day(jingrow.utils.add_months(today, -1)) + + plan = jingrow.qb.PageType("Site Plan") + query = ( + jingrow.qb.from_(plan) + .select(plan.name) + .where((plan.enabled == 1) & (plan.is_jingrow_plan == 1)) + ).run(as_dict=True) + jingrow_plans = [d.name for d in query] + + invoice = jingrow.qb.PageType("Invoice") + team = jingrow.qb.PageType("Team") + site = jingrow.qb.PageType("Site") + + query = ( + jingrow.qb.from_(invoice) + .inner_join(team) + .on(invoice.team == team.name) + .inner_join(site) + .on(site.team == team.name) + .where( + (site.status).isin(["Active", "Inactive"]) + & (team.enabled == 1) + & (team.free_account == 0) + & (invoice.status == "Unpaid") + & (invoice.docstatus < 2) + & (invoice.type == "Subscription") + & (site.free == 0) + & (site.plan).notin(jingrow_plans) + & (invoice.period_end <= last_day) + ) + .select(invoice.team) + .distinct() + ) + + first_day = get_first_day(today) + two_weeks = add_days(first_day, 14) # 15th day of the month + if today < two_weeks: + query = query.where(team.jerp_partner == 0) + + return query.run(as_dict=True) diff --git a/jcloud/jcloud/pagetype/team/team.js b/jcloud/jcloud/pagetype/team/team.js new file mode 100644 index 0000000..6bd999e --- /dev/null +++ b/jcloud/jcloud/pagetype/team/team.js @@ -0,0 +1,123 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Team', { + refresh: function (frm) { + jingrow.dynamic_link = { pg: frm.pg, fieldname: 'name', pagetype: 'Team' }; + jingrow.contacts.render_address_and_contact(frm); + + if (!frm.pg.jerp_partner) { + frm.add_custom_button('Enable Partner Privileges', () => + jingrow.confirm( + `Enable JERP Partner Privileges for ${frm.pg.name.bold()}? They will be allowed to create sites without adding a card and can use Partner Credits to pay for invoices.`, + () => + frm.call('enable_jerp_partner_privileges').then(() => + jingrow.msgprint({ + title: 'Note: Payment mode changed to `Partner Credits`', + message: + 'Please make sure existing prepaid credits are taken care of.', + }), + ), + ), + ); + } else { + frm.add_custom_button('Disable Partner Privileges', () => + jingrow.confirm( + `Disable JERP Partner Privileges for ${frm.pg.name.bold()}? `, + () => + frm.call('disable_jerp_partner_privileges').then(() => { + jingrow.msgprint({ + title: 'Partner Privileges Disabled', + message: + frm.pg.payment_mode === 'Partner Credits' + ? 'Payment mode is still Partner Credits. If the credits are low for setteling invoices, you can change the payment mode.' + : '', + }); + frm.refresh(); + }), + ), + ); + } + + frm.add_custom_button( + 'Suspend Sites', + () => { + jingrow.prompt( + { fieldtype: 'Data', label: 'Reason', fieldname: 'reason', reqd: 1 }, + ({ reason }) => { + frm.call('suspend_sites', { reason }).then((r) => { + const sites = r.message; + let how_many = 'No'; + if (sites) { + how_many = sites.length; + } + jingrow.show_alert(`${how_many} sites were suspended.`); + }); + }, + ); + }, + 'Actions', + ); + frm.add_custom_button( + 'Unsuspend Sites', + () => { + jingrow.prompt( + { fieldtype: 'Data', label: 'Reason', fieldname: 'reason', reqd: 1 }, + ({ reason }) => { + frm.call('unsuspend_sites', { reason }).then((r) => { + const sites = r.message; + let how_many = 'No'; + if (sites) { + how_many = sites.length; + } + jingrow.show_alert(`${how_many} sites were unsuspended.`); + }); + }, + ); + }, + 'Actions', + ); + + frm.add_custom_button('Impersonate Team', () => { + let team = frm.pg.name; + window.open(`/dashboard/impersonate/${team}`); + }); + + if (frm.pg.payment_mode === 'Partner Credits') { + frm.add_custom_button('Get Partner Credits', () => + frm.call('get_available_partner_credits').then((d) => { + jingrow.msgprint({ + title: 'Credit Balance Fetched Successfully', + message: `Available Credits on jingrow.com: ${frm.pg.currency} ${d.message}`, + indicator: 'green', + }); + }), + ); + } + }, +}); + +jingrow.ui.form.on('Team Member', { + impersonate: function (frm, pagetype, member) { + jingrow.prompt( + [ + { + fieldtype: 'HTML', + options: 'Beware! Your current session will be replaced.', + }, + { + fieldtype: 'Text Editor', + label: 'Reason', + fieldname: 'reason', + reqd: 1, + }, + ], + ({ reason }) => { + frm.call('impersonate', { reason, member }).then((r) => { + location.href = '/dashboard'; + }); + }, + 'Impersonate User', + ); + }, +}); diff --git a/jcloud/jcloud/pagetype/team/team.json b/jcloud/jcloud/pagetype/team/team.json new file mode 100644 index 0000000..6c350dd --- /dev/null +++ b/jcloud/jcloud/pagetype/team/team.json @@ -0,0 +1,571 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "hash", + "creation": "2022-01-28 20:07:37.989538", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "enabled", + "team_title", + "user", + "account_request", + "partner_email", + "billing_team", + "parent_team", + "column_break_wejg", + "jerp_partner", + "is_developer", + "is_pro", + "is_saas_user", + "is_code_server_user", + "free_account", + "via_jerp", + "enforce_2fa", + "section_break_6", + "team_members", + "section_break_tdm9", + "child_team_members", + "section_break_9", + "send_notifications", + "billing_email", + "column_break_9", + "notify_email", + "last_used_team", + "subscription_details_section", + "stripe_customer_id", + "country", + "currency", + "payment_mode", + "default_payment_method", + "billing_name", + "billing_address", + "free_credits_allocated", + "column_break_12", + "address_html", + "custom_apps_section", + "github_access_token", + "column_break_uyxo", + "mpesa_tax_id", + "mpesa_phone_number", + "partner_section", + "partner_referral_code", + "partnership_date", + "column_break_ppov", + "jingrow_partnership_date", + "partner_commission", + "feature_flags_section", + "referrer_id", + "ssh_access_enabled", + "skip_backups", + "enable_inplace_updates", + "column_break_31", + "database_access_enabled", + "enable_performance_tuning", + "razorpay_enabled", + "servers_enabled", + "code_servers_enabled", + "self_hosted_servers_enabled", + "security_portal_enabled", + "benches_enabled", + "mpesa_enabled", + "section_break_28", + "communication_emails", + "discounts_section", + "discounts", + "is_us_eu" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Link", + "label": "User", + "options": "User", + "search_index": 1 + }, + { + "fieldname": "team_members", + "fieldtype": "Table", + "label": "Team Members", + "options": "Team Member" + }, + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "section_break_6", + "fieldtype": "Section Break" + }, + { + "fieldname": "subscription_details_section", + "fieldtype": "Section Break", + "label": "Subscription Details" + }, + { + "fieldname": "stripe_customer_id", + "fieldtype": "Data", + "label": "Stripe Customer ID", + "read_only": 1 + }, + { + "fieldname": "country", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Country", + "options": "Country" + }, + { + "fieldname": "currency", + "fieldtype": "Link", + "label": "Currency", + "options": "Currency", + "read_only": 1 + }, + { + "fieldname": "default_payment_method", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Default Payment Method", + "options": "Stripe Payment Method" + }, + { + "default": "0", + "description": "If checked, this user can Transfer credits from JERP.com", + "fieldname": "jerp_partner", + "fieldtype": "Check", + "in_standard_filter": 1, + "label": "JERP Partner", + "read_only": 1 + }, + { + "default": "0", + "description": "If checked, usage data will not be sent to Stripe and they won't be charged", + "fieldname": "free_account", + "fieldtype": "Check", + "in_standard_filter": 1, + "label": "Free Account", + "search_index": 1 + }, + { + "fieldname": "address_html", + "fieldtype": "HTML", + "label": "Address HTML" + }, + { + "fieldname": "column_break_12", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "free_credits_allocated", + "fieldtype": "Check", + "label": "Free Credits Allocated" + }, + { + "fieldname": "billing_address", + "fieldtype": "Link", + "label": "\u8d26\u5355\u5730\u5740", + "options": "Address" + }, + { + "fieldname": "github_access_token", + "fieldtype": "Data", + "label": "GitHub Access Token" + }, + { + "fieldname": "feature_flags_section", + "fieldtype": "Section Break", + "label": "Feature Flags" + }, + { + "fieldname": "custom_apps_section", + "fieldtype": "Section Break", + "label": "Custom Apps" + }, + { + "fieldname": "billing_name", + "fieldtype": "Data", + "label": "\u8d26\u5355\u540d\u79f0" + }, + { + "default": "0", + "description": "Enabled if this account was created via the JERP signup form", + "fieldname": "via_jerp", + "fieldtype": "Check", + "label": "Via JERP", + "read_only": 1 + }, + { + "default": "0", + "description": "A developer creates app(s) for the marketplace", + "fieldname": "is_developer", + "fieldtype": "Check", + "in_standard_filter": 1, + "label": "Is Developer" + }, + { + "fieldname": "payment_mode", + "fieldtype": "Select", + "label": "Payment Mode", + "options": "\nCard\nPrepaid Credits\nPaid By Partner" + }, + { + "fetch_from": "user.email", + "fetch_if_empty": 1, + "fieldname": "notify_email", + "fieldtype": "Data", + "label": "Notify Email" + }, + { + "fieldname": "column_break_9", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_9", + "fieldtype": "Section Break" + }, + { + "default": "1", + "fieldname": "send_notifications", + "fieldtype": "Check", + "label": "Send Notifications" + }, + { + "fieldname": "referrer_id", + "fieldtype": "Data", + "label": "Referrer ID", + "read_only": 1 + }, + { + "fieldname": "communication_emails", + "fieldtype": "Table", + "label": "Communication Emails", + "options": "Communication Email" + }, + { + "fieldname": "section_break_28", + "fieldtype": "Section Break" + }, + { + "default": "1", + "fieldname": "ssh_access_enabled", + "fieldtype": "Check", + "label": "SSH Access Enabled" + }, + { + "default": "1", + "fieldname": "database_access_enabled", + "fieldtype": "Check", + "label": "Database Access Enabled" + }, + { + "collapsible": 1, + "fieldname": "discounts_section", + "fieldtype": "Section Break", + "label": "Discounts" + }, + { + "fieldname": "discounts", + "fieldtype": "Table", + "label": "Discounts", + "options": "Invoice Discount" + }, + { + "fieldname": "partner_email", + "fieldtype": "Data", + "label": "Partner Email" + }, + { + "fieldname": "column_break_31", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "razorpay_enabled", + "fieldtype": "Check", + "label": "RazorPay Enabled" + }, + { + "default": "0", + "fieldname": "is_us_eu", + "fieldtype": "Check", + "label": "Is US / EU" + }, + { + "default": "0", + "fieldname": "servers_enabled", + "fieldtype": "Check", + "label": "Servers Enabled" + }, + { + "fieldname": "last_used_team", + "fieldtype": "Link", + "label": "Last Used Team", + "options": "Team" + }, + { + "default": "0", + "description": "If checked, team can skip backups for site update", + "fieldname": "skip_backups", + "fieldtype": "Check", + "label": "Allow to skip Backups" + }, + { + "fieldname": "parent_team", + "fieldtype": "Link", + "label": "Parent Team", + "options": "Team", + "search_index": 1 + }, + { + "fieldname": "section_break_tdm9", + "fieldtype": "Section Break" + }, + { + "fieldname": "child_team_members", + "fieldtype": "Table", + "label": "Child Team Members", + "options": "Child Team Member" + }, + { + "fieldname": "team_title", + "fieldtype": "Data", + "label": "Team Title" + }, + { + "default": "0", + "fieldname": "self_hosted_servers_enabled", + "fieldtype": "Check", + "label": "Self Hosted Servers Enabled" + }, + { + "fieldname": "account_request", + "fieldtype": "Link", + "label": "Account Request", + "options": "Account Request" + }, + { + "default": "0", + "description": "SaaS user sees a simplified version of the dashboard", + "fieldname": "is_saas_user", + "fieldtype": "Check", + "label": "Is SaaS User" + }, + { + "default": "0", + "fieldname": "security_portal_enabled", + "fieldtype": "Check", + "label": "Security Portal Enabled" + }, + { + "default": "0", + "fieldname": "benches_enabled", + "fieldtype": "Check", + "label": "Benches Enabled" + }, + { + "default": "0", + "fieldname": "code_servers_enabled", + "fieldtype": "Check", + "label": "Code Servers Enabled" + }, + { + "depends_on": "eval: !pg.jerp_partner", + "fieldname": "billing_team", + "fieldtype": "Link", + "label": "Billing Team", + "options": "Team" + }, + { + "fieldname": "partner_referral_code", + "fieldtype": "Data", + "label": "Partner Referral Code", + "read_only": 1 + }, + { + "fieldname": "partner_section", + "fieldtype": "Section Break", + "label": "Partner" + }, + { + "depends_on": "eval:!pg.jerp_partner && pg.partner_email", + "fieldname": "partnership_date", + "fieldtype": "Date", + "label": "Customer Partnership Date" + }, + { + "fieldname": "column_break_ppov", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval:pg.jerp_partner", + "description": "Fetched from jingrow.com", + "fieldname": "jingrow_partnership_date", + "fieldtype": "Date", + "label": "Jingrow Partnership Date", + "read_only": 1 + }, + { + "default": "0", + "description": "If checked, code server is enabled on created benches.", + "fieldname": "is_code_server_user", + "fieldtype": "Check", + "label": "Is Code Server User" + }, + { + "fieldname": "column_break_wejg", + "fieldtype": "Column Break" + }, + { + "default": "false", + "fieldname": "enable_performance_tuning", + "fieldtype": "Check", + "label": "Enable Performance Tuning" + }, + { + "default": "0", + "description": "Enforces 2FA to all members", + "fieldname": "enforce_2fa", + "fieldtype": "Check", + "label": "Enforce 2FA" + }, + { + "default": "0", + "description": "Allows bypassing build to update a Bench (if conditions are met)", + "fieldname": "enable_inplace_updates", + "fieldtype": "Check", + "label": "Enable In Place Updates" + }, + { + "fieldname": "billing_email", + "fieldtype": "Data", + "label": "Billing Email" + }, + { + "default": "0", + "fieldname": "mpesa_enabled", + "fieldtype": "Check", + "label": "Mpesa Enabled" + }, + { + "fieldname": "column_break_uyxo", + "fieldtype": "Column Break" + }, + { + "fieldname": "mpesa_tax_id", + "fieldtype": "Data", + "label": "Mpesa Tax Id" + }, + { + "fieldname": "mpesa_phone_number", + "fieldtype": "Data", + "label": "Mpesa Phone Number" + }, + { + "fieldname": "partner_commission", + "fieldtype": "Percent", + "label": "Partner Commission" + }, + { + "default": "0", + "description": "\u4e13\u4e1a\u7528\u6237\u62e5\u6709\u66f4\u591a\u5b9a\u5236\u6743\u9650", + "fieldname": "is_pro", + "fieldtype": "Check", + "label": "Is Pro" + } + ], + "links": [ + { + "group": "General", + "link_pagetype": "Release Group", + "link_fieldname": "team" + }, + { + "group": "General", + "link_pagetype": "Site", + "link_fieldname": "team" + }, + { + "group": "General", + "link_pagetype": "App", + "link_fieldname": "team" + }, + { + "group": "Billing", + "link_pagetype": "Invoice", + "link_fieldname": "team" + }, + { + "group": "Billing", + "link_pagetype": "Usage Record", + "link_fieldname": "team" + }, + { + "group": "Billing", + "link_pagetype": "Subscription", + "link_fieldname": "team" + }, + { + "group": "Billing", + "link_pagetype": "Balance Transaction", + "link_fieldname": "team" + }, + { + "group": "Billing", + "link_pagetype": "Stripe Webhook Log", + "link_fieldname": "team" + }, + { + "group": "Marketplace", + "link_pagetype": "Marketplace App", + "link_fieldname": "team" + }, + { + "group": "Marketplace", + "link_pagetype": "Marketplace Publisher Profile", + "link_fieldname": "team" + }, + { + "group": "Billing", + "link_pagetype": "Stripe Payment Method", + "link_fieldname": "team" + } + ], + "modified": "2025-03-27 15:12:25.729714", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Team", + "naming_rule": "Random", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + } + ], + "quick_entry": 1, + "show_title_field_in_link": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "user", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/team/team.py b/jcloud/jcloud/pagetype/team/team.py new file mode 100644 index 0000000..5684404 --- /dev/null +++ b/jcloud/jcloud/pagetype/team/team.py @@ -0,0 +1,1517 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import os +from hashlib import blake2b + +import jingrow +from jingrow import _ +from jingrow.contacts.address_and_contact import load_address_and_contact +from jingrow.core.utils import find +from jingrow.model.document import Document +from jingrow.utils import get_fullname, get_url_to_form, random_string + +from jcloud.api.client import dashboard_whitelist +from jcloud.exceptions import JingrowioServerNotSet +from jcloud.jcloud.pagetype.telegram_message.telegram_message import TelegramMessage +from jcloud.utils import get_valid_teams_for_user, has_role, log_error +from jcloud.utils.billing import ( + get_jingrow_io_connection, + get_stripe, + process_micro_debit_test_charge, +) +from jcloud.utils.telemetry import capture + + +class Team(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + from jcloud.jcloud.pagetype.child_team_member.child_team_member import ChildTeamMember + from jcloud.jcloud.pagetype.communication_email.communication_email import CommunicationEmail + from jcloud.jcloud.pagetype.invoice_discount.invoice_discount import InvoiceDiscount + from jcloud.jcloud.pagetype.team_member.team_member import TeamMember + + account_request: DF.Link | None + benches_enabled: DF.Check + billing_address: DF.Link | None + billing_email: DF.Data | None + billing_name: DF.Data | None + billing_team: DF.Link | None + child_team_members: DF.Table[ChildTeamMember] + code_servers_enabled: DF.Check + communication_emails: DF.Table[CommunicationEmail] + country: DF.Link | None + currency: DF.Link | None + database_access_enabled: DF.Check + default_payment_method: DF.Link | None + discounts: DF.Table[InvoiceDiscount] + enable_inplace_updates: DF.Check + enable_performance_tuning: DF.Check + enabled: DF.Check + enforce_2fa: DF.Check + jerp_partner: DF.Check + jingrow_partnership_date: DF.Date | None + free_account: DF.Check + free_credits_allocated: DF.Check + github_access_token: DF.Data | None + is_code_server_user: DF.Check + is_developer: DF.Check + is_pro: DF.Check + is_saas_user: DF.Check + is_us_eu: DF.Check + last_used_team: DF.Link | None + mpesa_enabled: DF.Check + mpesa_phone_number: DF.Data | None + mpesa_tax_id: DF.Data | None + notify_email: DF.Data | None + parent_team: DF.Link | None + partner_commission: DF.Percent + partner_email: DF.Data | None + partner_referral_code: DF.Data | None + partnership_date: DF.Date | None + payment_mode: DF.Literal["", "Card", "Prepaid Credits", "Paid By Partner"] + razorpay_enabled: DF.Check + referrer_id: DF.Data | None + security_portal_enabled: DF.Check + self_hosted_servers_enabled: DF.Check + send_notifications: DF.Check + servers_enabled: DF.Check + skip_backups: DF.Check + ssh_access_enabled: DF.Check + stripe_customer_id: DF.Data | None + team_members: DF.Table[TeamMember] + team_title: DF.Data | None + user: DF.Link | None + via_jerp: DF.Check + # end: auto-generated types + + dashboard_fields = ( + "enabled", + "team_title", + "user", + "partner_email", + "jerp_partner", + "enforce_2fa", + "billing_team", + "team_members", + "child_team_members", + "notify_email", + "country", + "currency", + "payment_mode", + "default_payment_method", + "skip_backups", + "is_saas_user", + "billing_name", + "referrer_id", + "partner_referral_code", + "parent_team", + "is_developer", + "is_pro", + "enable_performance_tuning", + "enable_inplace_updates", + "servers_enabled", + "mpesa_tax_id", + "mpesa_phone_number", + "mpesa_enabled", + "account_request", + ) + + def get_pg(self, pg): + if ( + not jingrow.local.system_user() + and self.user != jingrow.session.user + and jingrow.session.user not in self.get_user_list() + ): + jingrow.throw("You are not allowed to access this document") + + user = jingrow.db.get_value( + "User", + jingrow.session.user, + ["name", "first_name", "last_name", "user_image", "user_type", "email", "api_key", "username", "mobile_no"], + as_dict=True, + ) + user.is_2fa_enabled = jingrow.db.get_value("User 2FA", {"user": user.name}, "enabled") + pg.user_info = user + pg.balance = self.get_balance() + pg.is_desk_user = user.user_type == "System User" + pg.is_support_agent = has_role("Jcloud Support Agent") + pg.valid_teams = get_valid_teams_for_user(jingrow.session.user) + pg.onboarding = self.get_onboarding() + pg.billing_info = self.billing_info() + pg.billing_details = self.billing_details() + pg.trial_sites = self.get_trial_sites() + pg.pending_site_request = self.get_pending_saas_site_request() + pg.payment_method = jingrow.db.get_value( + "Stripe Payment Method", + {"team": self.name, "name": self.default_payment_method}, + [ + "name", + "last_4", + "name_on_card", + "expiry_month", + "expiry_year", + "brand", + "stripe_mandate_id", + ], + as_dict=True, + ) + + def onload(self): + load_address_and_contact(self) + + @jingrow.whitelist() + def get_home_data(self): + return { + "sites": jingrow.db.get_all( + "Site", + {"team": self.name, "status": ["!=", "Archived"]}, + ["name", "host_name", "status"], + ), + } + + def validate(self): + self.validate_duplicate_members() + self.set_team_currency() + self.set_default_user() + self.set_billing_name() + self.set_partner_email() + self.validate_disable() + self.validate_billing_team() + + def before_insert(self): + + self.currency = "CNY" if self.country == "China" else "USD" + + if not self.referrer_id: + self.set_referrer_id() + + def set_notification_emails(self): + if not self.notify_email: + self.notify_email = self.user + + if not self.billing_email: + self.billing_email = self.user + + def set_referrer_id(self): + h = blake2b(digest_size=4) + h.update(self.user.encode()) + self.referrer_id = h.hexdigest() + + def set_partner_email(self): + if self.jerp_partner and not self.partner_email: + self.partner_email = self.user + + def validate_disable(self): + if self.has_value_changed("enabled") and self.enabled == 0 and has_unsettled_invoices(self.name): + jingrow.throw( + "Cannot disable team with Draft or Unpaid invoices. Please finalize and settle the pending invoices first" + ) + + def validate_billing_team(self): + if not (self.billing_team and self.payment_mode == "Paid By Partner"): + return + + if self.payment_mode == "Paid By Partner" and not self.billing_team: + jingrow.throw("Billing Team is mandatory for Paid By Partner payment mode") + + if self.payment_mode == "Paid By Partner" and has_unsettled_invoices(self.name): + jingrow.throw( + "Cannot set payment mode to Paid By Partner. Please finalize and settle the pending invoices first" + ) + + def delete(self, force=False, workflow=False): + if not (force or workflow): + jingrow.throw( + f"You are only deleting the Team Document for {self.name}. To continue to" + " do so, pass force=True with this call. Else, pass workflow=True to raise" + " a Team Deletion Request to trigger complete team deletion process." + ) + + if force: + return super().delete() + + if workflow: + return jingrow.get_pg({"pagetype": "Team Deletion Request", "team": self.name}).insert() + + jingrow.throw( + f"You are only deleting the Team Document for {self.name}. To continue to" + " do so, pass force=True with this call. Else, pass workflow=True to raise" + " a Team Deletion Request to trigger complete team deletion process." + ) + return None + + def disable_account(self): + self.suspend_sites("Account disabled") + self.enabled = False + self.save() + self.add_comment("Info", "disabled account") + + def enable_account(self): + self.unsuspend_sites("Account enabled") + self.enabled = True + self.save() + self.add_comment("Info", "enabled account") + + @classmethod + def create_new( + cls, + account_request: AccountRequest, + first_name: str, + last_name: str, + password: str | None = None, + country: str | None = None, + is_us_eu: bool = False, + via_jerp: bool = False, + user_exists: bool = False, + ): + """Create new team along with user (user created first).""" + team: "Team" = jingrow.get_pg( + { + "pagetype": "Team", + "user": account_request.email, + "country": country, + "enabled": 1, + "via_jerp": via_jerp, + "is_us_eu": is_us_eu, + "account_request": account_request.name, + } + ) + + if not user_exists: + user = team.create_user( + first_name, last_name, account_request.email, password, account_request.role + ) + else: + user = jingrow.get_pg("User", account_request.email) + user.append_roles(account_request.role) + user.save(ignore_permissions=True) + + team.team_title = "Parent Team" + team.insert(ignore_permissions=True, ignore_links=True) + team.append("team_members", {"user": user.name}) + if not account_request.invited_by_parent_team: + team.append("communication_emails", {"type": "invoices", "value": user.name}) + team.append("communication_emails", {"type": "marketplace_notifications", "value": user.name}) + else: + team.parent_team = account_request.invited_by + + if account_request.product_trial: + team.is_saas_user = 1 + + team.save(ignore_permissions=True) + + team.create_stripe_customer() + + if account_request.referrer_id: + team.create_referral_bonus(account_request.referrer_id) + + if not team.via_jerp and not account_request.invited_by_parent_team: + team.create_upcoming_invoice() + return team + + @staticmethod + def create_user(first_name=None, last_name=None, email=None, password=None, role=None): + user = jingrow.new_pg("User") + user.first_name = first_name + user.last_name = last_name + user.email = email + user.owner = email + user.new_password = password + user.append_roles(role) + user.flags.no_welcome_mail = True + user.save(ignore_permissions=True) + return user + + def create_user_for_member( + self, + first_name=None, + last_name=None, + email=None, + password=None, + role=None, + jcloud_roles=None, + ): + user = jingrow.db.get_value("User", email, ["name"], as_dict=True) + if not user: + user = self.create_user(first_name, last_name, email, password, role) + + self.append("team_members", {"user": user.name}) + self.save(ignore_permissions=True) + + for role in jcloud_roles or []: + jingrow.get_pg("Jcloud Role", role.jcloud_role).add_user(user.name) + + @dashboard_whitelist() + def remove_team_member(self, member): + member_to_remove = find(self.team_members, lambda x: x.user == member) + if member_to_remove: + self.remove(member_to_remove) + + JcloudRole = jingrow.qb.PageType("Jcloud Role") + JcloudRoleUser = jingrow.qb.PageType("Jcloud Role User") + roles = ( + jingrow.qb.from_(JcloudRole) + .join(JcloudRoleUser) + .on(JcloudRole.name == JcloudRoleUser.parent) + .where(JcloudRoleUser.user == member) + .select(JcloudRole.name) + .run(as_dict=True, pluck="name") + ) + + for role in roles: + jingrow.get_pg("Jcloud Role", role).remove_user(member) + else: + jingrow.throw(f"Team member {jingrow.bold(member)} does not exists") + + self.save(ignore_permissions=True) + + def set_billing_name(self): + if not self.billing_name: + self.billing_name = jingrow.utils.get_fullname(self.user) + + def set_default_user(self): + if not self.user and self.team_members: + self.user = self.team_members[0].user + + def set_team_currency(self): + if not self.currency and self.country: + self.currency = "CNY" if self.country == "China" else "USD" + + def get_user_list(self): + return [row.user for row in self.team_members] + + def get_users_only_in_this_team(self): + return [ + user + for user in self.get_user_list() + if not jingrow.db.exists("Team Member", {"user": user, "parent": ("!=", self.name)}) + ] + + def validate_duplicate_members(self): + team_users = self.get_user_list() + duplicate_members = [m for m in team_users if team_users.count(m) > 1] + duplicate_members = list(set(duplicate_members)) + if duplicate_members: + jingrow.throw( + _("Duplicate Team Members: {0}").format(", ".join(duplicate_members)), + jingrow.DuplicateEntryError, + ) + + def validate_payment_mode(self): + """简化版的支付方式验证,不再检查余额是否充足""" + # 如果没有设置支付方式,默认设为余额支付 + if not self.payment_mode: + self.payment_mode = "Prepaid Credits" + + # 只保留对卡支付的基本验证 + if self.has_value_changed("payment_mode") and self.payment_mode == "Card": + if jingrow.db.count("Stripe Payment Method", {"team": self.name}) == 0: + jingrow.throw("No card added") + + # 处理默认支付方法的逻辑 + if not self.is_new() and not self.default_payment_method: + # 重置支付方法的默认状态 + payment_methods = jingrow.db.get_list( + "Stripe Payment Method", {"team": self.name, "is_default": 1} + ) + for pm in payment_methods: + pg = jingrow.get_pg("Stripe Payment Method", pm.name) + pg.is_default = 0 + pg.save() + + # 保留遥测事件记录 + if self.has_value_changed("payment_mode") and self.payment_mode and self.account_request: + old_pg = self.get_pg_before_save() + if (not old_pg) or (not old_pg.payment_mode): + ar = jingrow.get_pg("Account Request", self.account_request) + if not (ar.is_saas_signup() or ar.invited_by_parent_team): + capture("added_card_or_prepaid_credits", "fc_signup", self.user) + + def on_update(self): + if not self.enabled: + return + + self.validate_payment_mode() + self.update_draft_invoice_payment_mode() + self.validate_partnership_date() + + + def validate_partnership_date(self): + if self.jerp_partner or not self.partnership_date: + return + + if partner_email := self.partner_email: + jingrow_partnership_date = jingrow.db.get_value( + "Team", + {"enabled": 1, "jerp_partner": 1, "partner_email": partner_email}, + "jingrow_partnership_date", + ) + if jingrow_partnership_date and jingrow_partnership_date > jingrow.utils.getdate( + self.partnership_date + ): + jingrow.throw("Partnership date cannot be less than the partnership date of the partner") + + def update_draft_invoice_payment_mode(self): + if self.has_value_changed("payment_mode"): + draft_invoices = jingrow.get_all( + "Invoice", filters={"docstatus": 0, "team": self.name}, pluck="name" + ) + + for invoice in draft_invoices: + jingrow.db.set_value("Invoice", invoice, "payment_mode", self.payment_mode) + + @jingrow.whitelist() + def impersonate(self, member, reason): + user = jingrow.db.get_value("Team Member", member, "user") + impersonation = jingrow.get_pg( + { + "pagetype": "Team Member Impersonation", + "user": user, + "impersonator": jingrow.session.user, + "team": self.name, + "member": member, + "reason": reason, + } + ) + impersonation.save() + jingrow.local.login_manager.login_as(user) + + @jingrow.whitelist() + def enable_jerp_partner_privileges(self): + self.jerp_partner = 1 + if not self.partner_email: + self.partner_email = self.user + self.jingrow_partnership_date = self.get_partnership_start_date() + self.servers_enabled = 1 + self.save(ignore_permissions=True) + self.create_partner_referral_code() + self.create_new_invoice() + + @jingrow.whitelist() + def disable_jerp_partner_privileges(self): + self.jerp_partner = 0 + self.save(ignore_permissions=True) + + def create_partner_referral_code(self): + if not self.partner_referral_code: + self.partner_referral_code = random_string(10).upper() + self.save(ignore_permissions=True) + + def get_partnership_start_date(self): + if jingrow.flags.in_test: + return jingrow.utils.getdate() + + client = get_jingrow_io_connection() + data = client.get_value("Partner", "start_date", {"email": self.partner_email}) + if not data: + jingrow.throw("Partner not found on framework.jingrow.com") + return jingrow.utils.getdate(data.get("start_date")) + + def create_new_invoice(self): + """ + After enabling partner privileges, new invoice should be created + to track the partner achievements + """ + # check if any active user with an invoice + if not jingrow.get_all("Invoice", {"team": self.name, "docstatus": ("<", 2)}, pluck="name"): + return + today = jingrow.utils.getdate() + current_invoice = jingrow.db.get_value( + "Invoice", + { + "team": self.name, + "type": "Subscription", + "docstatus": 0, + "period_end": jingrow.utils.get_last_day(today), + }, + "name", + ) + + if not current_invoice: + return + + current_inv_pg = jingrow.get_pg("Invoice", current_invoice) + + if current_inv_pg.partner_email and current_inv_pg.partner_email == self.partner_email: + # don't create new invoice if partner email is set + return + + if ( + not current_invoice + or today == jingrow.utils.get_last_day(today) + or today == current_inv_pg.period_start + ): + # don't create invoice if new team or today is the last day of the month + return + current_inv_pg.period_end = jingrow.utils.add_days(today, -1) + current_inv_pg.flags.on_partner_conversion = True + current_inv_pg.save() + current_inv_pg.finalize_invoice() + + # create invoice + invoice = jingrow.get_pg( + { + "pagetype": "Invoice", + "team": self.name, + "type": "Subscription", + "period_start": today, + } + ) + invoice.insert() + + def create_referral_bonus(self, referrer_id): + # Get team name with this this referrer id + referrer_team = jingrow.db.get_value("Team", {"referrer_id": referrer_id}) + jingrow.get_pg( + {"pagetype": "Referral Bonus", "for_team": self.name, "referred_by": referrer_team} + ).insert(ignore_permissions=True) + + def has_member(self, user): + return user in self.get_user_list() + + def is_defaulter(self): + if self.free_account: + return False + + try: + unpaid_invoices = jingrow.get_all( + "Invoice", + { + "status": "Unpaid", + "team": self.name, + "docstatus": ("<", 2), + "type": "Subscription", + }, + pluck="name", + ) + except jingrow.DoesNotExistError: + return False + + return unpaid_invoices + + def create_stripe_customer(self): + if not self.stripe_customer_id: + stripe = get_stripe() + customer = stripe.Customer.create(email=self.user, name=get_fullname(self.user)) + self.stripe_customer_id = customer.id + self.save() + + @jingrow.whitelist() + def update_billing_details(self, billing_details): + if self.billing_address: + address_pg = jingrow.get_pg("Address", self.billing_address) + if (address_pg.country != billing_details.country) and ( + address_pg.country == "China" or billing_details.country == "China" + ): + jingrow.throw("Cannot change country of billing address") + else: + if self.account_request: + ar: "AccountRequest" = jingrow.get_pg("Account Request", self.account_request) + if not (ar.is_saas_signup() or ar.invited_by_parent_team): + capture("added_billing_address", "fc_signup", self.user) + address_pg = jingrow.new_pg("Address") + address_pg.address_title = billing_details.billing_name or self.billing_name + address_pg.append( + "links", + {"link_pagetype": self.pagetype, "link_name": self.name, "link_title": self.name}, + ) + + address_pg.update( + { + "address_line1": billing_details.address, + "city": billing_details.city, + "state": billing_details.state, + "pincode": billing_details.get("postal_code", "").strip().replace(" ", ""), + "country": billing_details.country, + "gstin": billing_details.gstin, + } + ) + address_pg.save() + address_pg.reload() + + self.billing_name = billing_details.billing_name or self.billing_name + self.billing_address = address_pg.name + self.save() + self.reload() + + self.update_billing_details_on_draft_invoices() + + def update_billing_details_on_draft_invoices(self): + draft_invoices = jingrow.get_all("Invoice", {"team": self.name, "docstatus": 0}, pluck="name") + for draft_invoice in draft_invoices: + # Invoice.customer_name set by Invoice.validate() + jingrow.get_pg("Invoice", draft_invoice).save() + + def create_payment_method( + self, + payment_method_id, + setup_intent_id, + mandate_id, + mandate_reference, + set_default=False, + ): + stripe = get_stripe() + payment_method = stripe.PaymentMethod.retrieve(payment_method_id) + + pg = jingrow.get_pg( + { + "pagetype": "Stripe Payment Method", + "stripe_payment_method_id": payment_method["id"], + "last_4": payment_method["card"]["last4"], + "name_on_card": payment_method["billing_details"]["name"], + "expiry_month": payment_method["card"]["exp_month"], + "expiry_year": payment_method["card"]["exp_year"], + "brand": payment_method["card"]["brand"] or "", + "team": self.name, + "stripe_setup_intent_id": setup_intent_id, + "stripe_mandate_id": mandate_id if mandate_id else None, + "stripe_mandate_reference": mandate_reference if mandate_reference else None, + } + ) + pg.insert() + + # unsuspend sites on payment method added + self.unsuspend_sites(reason="Payment method added") + if set_default: + pg.set_default() + self.reload() + + self.remove_subscription_config_in_trial_sites() + + return pg + + def get_payment_methods(self): + return jingrow.db.get_all( + "Stripe Payment Method", + {"team": self.name}, + [ + "name", + "last_4", + "name_on_card", + "expiry_month", + "expiry_year", + "brand", + "is_default", + "creation", + ], + order_by="creation desc", + ) + + def get_past_invoices(self): + invoices = jingrow.db.get_all( + "Invoice", + filters={ + "team": self.name, + "status": ("not in", ("Draft", "Refunded")), + "docstatus": ("!=", 2), + }, + fields=[ + "name", + "total", + "amount_due", + "status", + "type", + "stripe_invoice_url", + "period_start", + "period_end", + "due_date", + "payment_date", + "currency", + "invoice_pdf", + "due_date as date", + ], + order_by="due_date desc", + ) + + for invoice in invoices: + invoice.formatted_total = jingrow.utils.fmt_money(invoice.total, 2, invoice.currency) + invoice.stripe_link_expired = False + if invoice.status == "Unpaid": + invoice.formatted_amount_due = jingrow.utils.fmt_money(invoice.amount_due, 2, invoice.currency) + days_diff = jingrow.utils.date_diff(jingrow.utils.now(), invoice.due_date) + if days_diff > 30: + invoice.stripe_link_expired = True + return invoices + + def allocate_credit_amount(self, amount, source, remark=None, type="Adjustment"): + pg = jingrow.get_pg( + pagetype="Balance Transaction", + team=self.name, + type=type, + source=source, + amount=amount, + description=remark, + ) + pg.insert(ignore_permissions=True) + pg.submit() + + self.reload() + if not self.payment_mode: + self.validate_payment_mode() + self.save(ignore_permissions=True) + return pg + + def get_available_credits(self): + def get_stripe_balance(): + return self.get_stripe_balance() + + return jingrow.cache().hget("customer_available_credits", self.name, generator=get_stripe_balance) + + def get_stripe_balance(self): + stripe = get_stripe() + customer_object = stripe.Customer.retrieve(self.stripe_customer_id) + return (customer_object["balance"] * -1) / 100 + + @dashboard_whitelist() + def get_team_members(self): + return get_team_members(self.name) + + @dashboard_whitelist() + def invite_team_member(self, email, roles=None): + JcloudRole = jingrow.qb.PageType("Jcloud Role") + JcloudRoleUser = jingrow.qb.PageType("Jcloud Role User") + + has_admin_access = ( + jingrow.qb.from_(JcloudRole) + .select(JcloudRole.name) + .join(JcloudRoleUser) + .on((JcloudRole.name == JcloudRoleUser.parent) & (JcloudRoleUser.user == jingrow.session.user)) + .where(JcloudRole.team == self.name) + .where(JcloudRole.admin_access == 1) + ) + + if jingrow.session.user != self.user and not has_admin_access.run(): + jingrow.throw(_("Only team owner can invite team members")) + + jingrow.utils.validate_email_address(email, True) + + if jingrow.db.exists("Team Member", {"user": email, "parent": self.name, "parenttype": "Team"}): + jingrow.throw(_("Team member already exists")) + + account_request = jingrow.get_pg( + { + "pagetype": "Account Request", + "team": self.name, + "email": email, + "role": "Jcloud Member", + "invited_by": self.user, + "send_email": True, + } + ) + + for role in roles: + account_request.append("jcloud_roles", {"jcloud_role": role}) + + account_request.insert() + + @jingrow.whitelist() + def get_balance(self): + res = jingrow.get_all( + "Balance Transaction", + filters={"team": self.name, "docstatus": 1, "type": ("!=", "Partnership Fee")}, + order_by="creation desc", + limit=1, + pluck="ending_balance", + ) + if not res: + return 0 + return res[0] + + def can_create_site(self): # noqa: C901 + why = "" + allow = (True, "") + + if not self.enabled: + why = "You cannot create a new site because your account is disabled" + return (False, why) + + if self.free_account or self.parent_team or self.billing_team: + return allow + + if self.is_saas_user and not self.payment_mode: + if not jingrow.db.get_all("Site", {"team": self.name}, limit=1): + return allow + why = "You have already created trial site in the past" + + # allow user to create their first site without payment method + if not jingrow.db.get_all("Site", {"team": self.name}, limit=1): + return allow + + + if self.payment_mode == "Prepaid Credits": + # if balance is greater than 0 or have atleast 2 paid invoices, then allow to create site + if ( + self.get_balance() > 0 + or jingrow.db.count( + "Invoice", + { + "team": self.name, + "status": "Paid", + "amount_paid": ("!=", 0), + }, + ) + > 2 + ): + return allow + why = "Cannot create site due to insufficient balance" + + if self.payment_mode == "Card": + if self.default_payment_method: + return allow + why = "Cannot create site without adding a card" + + return (False, why) + + def can_install_paid_apps(self): + if self.free_account or self.billing_team or self.payment_mode: + return True + + return bool( + jingrow.db.exists("Invoice", {"team": self.name, "amount_paid": (">", 0), "status": "Paid"}) + ) + + def billing_info(self): + """ + 返回账单信息 + 注意:之前此方法依赖Stripe,现已简化 + """ + return { + "name": self.billing_name or jingrow.utils.get_fullname(self.user), + "email": self.notify_email or self.user, + "address": jingrow.db.get_value("Address", self.billing_address, "address_line1") if self.billing_address else "", + "city": jingrow.db.get_value("Address", self.billing_address, "city") if self.billing_address else "", + "state": jingrow.db.get_value("Address", self.billing_address, "state") if self.billing_address else "", + "country": self.country or "", + "postal_code": jingrow.db.get_value("Address", self.billing_address, "pincode") if self.billing_address else "", + "gstin": jingrow.db.get_value("Address", self.billing_address, "gstin") if self.billing_address else "" + } + + def billing_details(self): + """ + 返回账单详情 + 注意:之前此方法依赖Stripe,现已简化 + """ + address = {} + if self.billing_address: + address = jingrow.db.get_value( + "Address", + self.billing_address, + ["address_line1", "city", "state", "country", "pincode", "gstin"], + as_dict=True, + ) or {} + + return { + "billing_name": self.billing_name, + "address": address.get("address_line1", ""), + "city": address.get("city", ""), + "state": address.get("state", ""), + "country": address.get("country", ""), + "postal_code": address.get("pincode", ""), + "gstin": address.get("gstin", "") + } + + def get_partner_level(self): + # fetch partner level from framework.jingrow.com + client = get_jingrow_io_connection() + response = client.session.get( + f"{client.url}/api/method/get_partner_level", + headers=client.headers, + params={"email": self.partner_email}, + ) + + if response.ok: + res = response.json() + partner_level = res.get("message") + certificate_count = res.get("certificates") + if partner_level: + return partner_level, certificate_count + return None + + self.add_comment(text="Failed to fetch partner level" + "

" + response.text) + return None + + def is_payment_mode_set(self): + if self.payment_mode in ("Prepaid Credits", "Paid By Partner") or ( + self.payment_mode == "Card" and self.default_payment_method and self.billing_address + ): + return True + return False + + def get_onboarding(self): + site_created = jingrow.db.count("Site", {"team": self.name}) > 0 + saas_site_request = self.get_pending_saas_site_request() + is_payment_mode_set = self.is_payment_mode_set() + if not is_payment_mode_set and self.parent_team: + parent_team = jingrow.get_cached_pg("Team", self.parent_team) + is_payment_mode_set = parent_team.is_payment_mode_set() + + complete = False + if ( + is_payment_mode_set + or jingrow.db.get_value("User", self.user, "user_type") == "System User" + or has_role("Jcloud Support Agent") + ): + complete = True + elif saas_site_request: + complete = False + + return jingrow._dict( + { + "site_created": site_created, + "is_saas_user": bool(self.via_jerp or self.is_saas_user), + "saas_site_request": saas_site_request, + "complete": complete, + "is_payment_mode_set": is_payment_mode_set, + } + ) + + def get_route_on_login(self): + if self.payment_mode: + return "/sites" + + if self.is_saas_user: + pending_site_request = self.get_pending_saas_site_request() + if pending_site_request: + return f"/create-site/{pending_site_request.product_trial}/setup?account_request={pending_site_request.account_request}" + + return "/welcome" + + def get_pending_saas_site_request(self): + if jingrow.db.exists("Product Trial Request", {"team": self.name, "status": "Site Created"}): + return None + + return jingrow.db.get_value( + "Product Trial Request", + { + "team": self.name, + "status": ("in", ["Pending", "Wait for Site", "Completing Setup Wizard", "Error"]), + }, + ["name", "product_trial", "product_trial.title", "status", "account_request"], + order_by="creation desc", + as_dict=True, + ) + + def get_trial_sites(self): + return jingrow.db.get_all( + "Site", + { + "team": self.name, + "is_standby": False, + "trial_end_date": ("is", "set"), + "status": ("!=", "Archived"), + }, + ["name", "trial_end_date", "standby_for_product.title as product_title", "host_name"], + order_by="`tabSite`.`modified` desc", + ) + + @jingrow.whitelist() + def suspend_sites(self, reason=None): + from jcloud.jcloud.pagetype.site.site import Site + + sites_to_suspend = self.get_sites_to_suspend() + for site in sites_to_suspend: + try: + Site("Site", site).suspend(reason, skip_reload=True) + except Exception: + log_error("Failed to Suspend Sites", traceback=jingrow.get_traceback()) + return sites_to_suspend + + def get_sites_to_suspend(self): + plan = jingrow.qb.PageType("Site Plan") + query = ( + jingrow.qb.from_(plan) + .select(plan.name) + .where((plan.enabled == 1) & ((plan.is_jingrow_plan == 1) | (plan.is_trial_plan == 1))) + ).run(as_dict=True) + jingrow_plans = [d.name for d in query] + + return jingrow.db.get_all( + "Site", + { + "team": self.name, + "status": ("not in", ("Archived", "Suspended")), + "free": 0, + "plan": ("not in", jingrow_plans), + }, + pluck="name", + ) + + def reallocate_workers_if_needed( + self, workloads_before: list[str, float, str], workloads_after: list[str, float, str] + ): + for before, after in zip(workloads_before, workloads_after): + if after[1] - before[1] >= 8: # 100 USD equivalent + jingrow.enqueue_pg( + "Server", + before[2], + method="auto_scale_workers", + job_id=f"auto_scale_workers:{before[2]}", + deduplicate=True, + enqueue_after_commit=True, + ) + + @jingrow.whitelist() + def unsuspend_sites(self, reason=None): + from jcloud.jcloud.pagetype.bench.bench import Bench + from jcloud.jcloud.pagetype.site.site import Site + + suspended_sites = [ + d.name for d in jingrow.db.get_all("Site", {"team": self.name, "status": "Suspended"}) + ] + workloads_before = list(Bench.get_workloads(suspended_sites)) + for site in suspended_sites: + Site("Site", site).unsuspend(reason, skip_reload=True) + workloads_after = list(Bench.get_workloads(suspended_sites)) + self.reallocate_workers_if_needed(workloads_before, workloads_after) + + return suspended_sites + + def remove_subscription_config_in_trial_sites(self): + for site in jingrow.db.get_all( + "Site", + {"team": self.name, "status": ("!=", "Archived"), "trial_end_date": ("is", "set")}, + pluck="name", + ): + try: + jingrow.get_pg("Site", site).update_site_config( + { + "subscription": {"status": "Subscribed"}, + } + ) + except Exception: + log_error("Failed to remove subscription config in trial sites") + + def get_upcoming_invoice(self, for_update=False): + # get the current period's invoice + today = jingrow.utils.today() + result = jingrow.db.get_all( + "Invoice", + filters={ + "status": "Draft", + "team": self.name, + "type": "Subscription", + "period_start": ("<=", today), + "period_end": (">=", today), + }, + order_by="creation desc", + limit=1, + pluck="name", + ) + if result: + return jingrow.get_pg("Invoice", result[0], for_update=for_update) + return None + + def create_upcoming_invoice(self): + today = jingrow.utils.today() + return jingrow.get_pg( + pagetype="Invoice", team=self.name, period_start=today, type="Subscription" + ).insert() + + def notify_with_email(self, recipients: list[str], **kwargs): + if not self.send_notifications: + return + if not recipients: + recipients = [self.notify_email] + + jingrow.sendmail(recipients=recipients, **kwargs) + + @jingrow.whitelist() + def send_telegram_alert_for_failed_payment(self, invoice): + team_url = get_url_to_form("Team", self.name) + invoice_url = get_url_to_form("Invoice", invoice) + message = f"Failed Invoice Payment [{invoice}]({invoice_url}) of Partner: [{self.name}]({team_url})" + TelegramMessage.enqueue(message=message) + + @jingrow.whitelist() + def send_email_for_failed_payment(self, invoice, sites=None): + invoice = jingrow.get_pg("Invoice", invoice) + email = ( + jingrow.db.get_value("Communication Email", {"parent": self.name, "type": "invoices"}, "value") + or self.user + ) + payment_method = self.default_payment_method + last_4 = jingrow.db.get_value("Stripe Payment Method", payment_method, "last_4") + account_update_link = jingrow.utils.get_url("/dashboard") + subject = "Invoice Payment Failed for Jingrow Subscription" + + jingrow.sendmail( + recipients=email, + subject=subject, + template="payment_failed_partner" if self.jerp_partner else "payment_failed", + args={ + "subject": subject, + "payment_link": invoice.stripe_invoice_url, + "amount": invoice.get_formatted("amount_due"), + "account_update_link": account_update_link, + "last_4": last_4 or "", + "card_not_added": not payment_method, + "sites": sites, + "team": self, + }, + ) + + +def get_team_members(team): + if not jingrow.db.exists("Team", team): + return [] + + r = jingrow.db.get_all("Team Member", filters={"parent": team}, fields=["user"]) + member_emails = [d.user for d in r] + + users = [] + if member_emails: + users = jingrow.db.sql( + """ + select + u.name, + u.first_name, + u.last_name, + u.full_name, + u.user_image, + u.name as email, + GROUP_CONCAT(r.`role`) as roles + from `tabUser` u + left join `tabHas Role` r + on (r.parent = u.name) + where u.name in %s + group by u.name + """, + [member_emails], + as_dict=True, + ) + for user in users: + user.roles = (user.roles or "").split(",") + + return users + + +def get_child_team_members(team): + if not jingrow.db.exists("Team", team): + return [] + + # a child team cannot be parent to another child team + if jingrow.get_value("Team", team, "parent_team"): + return [] + + child_team_members = [d.name for d in jingrow.db.get_all("Team", {"parent_team": team}, ["name"])] + + child_teams = [] + if child_team_members: + child_teams = jingrow.db.sql( + """ + select t.name, t.team_title, t.parent_team, t.user + from `tabTeam` t + where t.name in %s + and t.enabled = 1 + """, + [child_team_members], + as_dict=True, + ) + + return child_teams + + +def get_default_team(user): + if jingrow.db.exists("Team", user): + return user + return None + + +def process_stripe_webhook(pg, method): + """This method runs after a Stripe Webhook Log is created""" + + if pg.event_type not in ["payment_intent.succeeded"]: + return + + event = jingrow.parse_json(pg.payload) + payment_intent = event["data"]["object"] + if payment_intent.get("invoice"): + # ignore payment for invoice + return + + metadata = payment_intent.get("metadata") + payment_for = metadata.get("payment_for") + + if payment_for and payment_for == "micro_debit_test_charge": + process_micro_debit_test_charge(event) + return + + if payment_for and payment_for == "partnership_fee": + process_partnership_fee(payment_intent) + return + + handle_payment_intent_succeeded(payment_intent) + + +def handle_payment_intent_succeeded(payment_intent): # noqa: C901 + from datetime import datetime + + if isinstance(payment_intent, str): + stripe = get_stripe() + payment_intent = stripe.PaymentIntent.retrieve(payment_intent) + + metadata = payment_intent.get("metadata") + if jingrow.db.exists("Invoice", {"stripe_payment_intent_id": payment_intent["id"], "status": "Paid"}): + # ignore creating if already allocated + return + + if not jingrow.db.exists("Team", {"stripe_customer_id": payment_intent["customer"]}): + # might be checkout session payment + # log the stripe webhook log + # TODO: handle checkout session payment + return + team: Team = jingrow.get_pg("Team", {"stripe_customer_id": payment_intent["customer"]}) + amount_with_tax = payment_intent["amount"] / 100 + gst = float(metadata.get("gst", 0)) + amount = amount_with_tax - gst + balance_transaction = team.allocate_credit_amount( + amount, source="Prepaid Credits", remark=payment_intent["id"] + ) + + team.remove_subscription_config_in_trial_sites() + invoice = jingrow.get_pg( + pagetype="Invoice", + team=team.name, + type="Prepaid Credits", + status="Paid", + due_date=datetime.fromtimestamp(payment_intent["created"]), + total=amount, + amount_due=amount, + gst=gst or 0, + amount_due_with_tax=amount_with_tax, + amount_paid=amount_with_tax, + stripe_payment_intent_id=payment_intent["id"], + ) + invoice.append( + "items", + { + "description": "Prepaid Credits", + "document_type": "Balance Transaction", + "document_name": balance_transaction.name, + "quantity": 1, + "rate": amount, + }, + ) + invoice.insert() + invoice.reload() + + if not team.payment_mode: + jingrow.db.set_value("Team", team.name, "payment_mode", "Prepaid Credits") + if team.account_request: + ar: "AccountRequest" = jingrow.get_pg("Account Request", team.account_request) + if not (ar.is_saas_signup() or ar.invited_by_parent_team): + capture("added_card_or_prepaid_credits", "fc_signup", team.user) + + # latest stripe API sets charge id in latest_charge + charge = payment_intent.get("latest_charge") + if not charge: + # older stripe API sets charge id in charges.data + charges = payment_intent.get("charges", {}).get("data", []) + charge = charges[0]["id"] if charges else None + if charge: + # update transaction amount, fee and exchange rate + invoice.update_transaction_details(charge) + invoice.submit() + + _enqueue_finalize_unpaid_invoices_for_team(team.name) + + +def _enqueue_finalize_unpaid_invoices_for_team(team: str): + # Enqueue a background job to call finalize_draft_invoice for unpaid invoices + jingrow.enqueue( + "jcloud.jcloud.pagetype.team.team.enqueue_finalize_unpaid_for_team", + team=team, + queue="long", + ) + + +def enqueue_finalize_unpaid_for_team(team: str): + # get a list of unpaid invoices for the team + invoices = jingrow.get_all( + "Invoice", + filters={"team": team, "status": "Unpaid", "type": "Subscription"}, + pluck="name", + ) + + # Enqueue a background job to call finalize_invoice + for invoice in invoices: + pg = jingrow.get_pg("Invoice", invoice) + pg.finalize_invoice() + + +def process_partnership_fee(payment_intent): + from datetime import datetime + + if isinstance(payment_intent, str): + stripe = get_stripe() + payment_intent = stripe.PaymentIntent.retrieve(payment_intent) + + metadata = payment_intent.get("metadata") + if jingrow.db.exists("Invoice", {"stripe_payment_intent_id": payment_intent["id"], "status": "Paid"}): + # ignore creating duplicate partnership fee invoice + return + + team = jingrow.get_pg("Team", {"stripe_customer_id": payment_intent["customer"]}) + amount_with_tax = payment_intent["amount"] / 100 + gst = float(metadata.get("gst", 0)) + amount = amount_with_tax - gst + balance_transaction = team.allocate_credit_amount( + amount, source="Prepaid Credits", remark=payment_intent["id"], type="Partnership Fee" + ) + + invoice = jingrow.get_pg( + pagetype="Invoice", + team=team.name, + type="Partnership Fees", + status="Paid", + due_date=datetime.fromtimestamp(payment_intent["created"]), + total=amount, + amount_due=amount, + gst=gst or 0, + amount_due_with_tax=amount_with_tax, + amount_paid=amount_with_tax, + stripe_payment_intent_id=payment_intent["id"], + ) + invoice.append( + "items", + { + "description": "Partnership Fee", + "document_type": "Balance Transaction", + "document_name": balance_transaction.name, + "quantity": 1, + "rate": amount, + }, + ) + invoice.insert() + invoice.reload() + + # latest stripe API sets charge id in latest_charge + charge = payment_intent.get("latest_charge") + if not charge: + # older stripe API sets charge id in charges.data + charges = payment_intent.get("charges", {}).get("data", []) + charge = charges[0]["id"] if charges else None + if charge: + # update transaction amount, fee and exchange rate + invoice.update_transaction_details(charge) + invoice.submit() + + +def get_permission_query_conditions(user): + from jcloud.utils import get_current_team + + if not user: + user = jingrow.session.user + + user_type = jingrow.db.get_value("User", user, "user_type", cache=True) + if user_type == "System User": + return "" + + team = get_current_team() + + return f"(`tabTeam`.`name` = {jingrow.db.escape(team)})" + + +def has_permission(pg, ptype, user): + from jcloud.utils import get_current_team + + if not user: + user = jingrow.session.user + + user_type = jingrow.db.get_value("User", user, "user_type", cache=True) + if user_type == "System User": + return True + + team = get_current_team(True) + child_team_members = [d.name for d in jingrow.db.get_all("Team", {"parent_team": team.name}, ["name"])] + if pg.name == team.name or pg.name in child_team_members: + return True + + return False + + +def validate_site_creation(pg, method): + if jingrow.session.user == "Administrator": + return + if not pg.team: + return + # allow product signups + if pg.standby_for_product: + return + + # validate site creation for team + team = jingrow.get_pg("Team", pg.team) + [allow_creation, why] = team.can_create_site() + if not allow_creation: + jingrow.throw(why) + + +def has_unsettled_invoices(team): + if not jingrow.db.exists( + "Invoice", {"team": team, "status": ("in", ("Unpaid", "Draft")), "type": "Subscription"} + ): + return False + + currency = jingrow.db.get_value("Team", team, "currency") + minimum_amount = 5 + if currency == "CNY": + minimum_amount = 450 + + data = jingrow.get_all( + "Invoice", + {"team": team, "status": ("in", ("Unpaid", "Draft")), "type": "Subscription"}, + ["sum(amount_due) as amount_due"], + )[0] + if data.amount_due <= minimum_amount: + return False + return True + + +def is_us_eu(): + """Is the customer from U.S. or European Union""" + from jcloud.utils import get_current_team + + countrygroup = [ + "United States", + "United Kingdom", + "Austria", + "Belgium", + "Bulgaria", + "Croatia", + "Republic of Cyprus", + "Czech Republic", + "Denmark", + "Estonia", + "Finland", + "France", + "Germany", + "Greece", + "Hungary", + "Ireland", + "Italy", + "Latvia", + "Lithuania", + "Luxembourg", + "Malta", + "Netherlands", + "Poland", + "Portugal", + "Romania", + "Slovakia", + "Slovenia", + "Spain", + "Sweden", + "Switzerland", + "Australia", + "New Zealand", + "Canada", + "Mexico", + ] + return jingrow.db.get_value("Team", get_current_team(), "country") in countrygroup diff --git a/jcloud/jcloud/pagetype/team/team_invoice.py b/jcloud/jcloud/pagetype/team/team_invoice.py new file mode 100644 index 0000000..a93281b --- /dev/null +++ b/jcloud/jcloud/pagetype/team/team_invoice.py @@ -0,0 +1,111 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.utils import getdate + +from jcloud.utils import log_error + + +class TeamInvoice: + def __init__(self, team, month, year): + if isinstance(team, jingrow.string_types): + team = jingrow.get_pg("Team", team) + self.team = team + self.month = month + self.year = year + + def create(self, period_start=None): + invoice = jingrow.new_pg("Invoice") + invoice.update( + { + "team": self.team.name, + "period_start": period_start, + "month": self.month, + "year": self.year, + } + ) + invoice.insert() + return invoice + + def update_site_usage(self, ledger_entry): + self.get_draft_invoice() + + if not self.draft_invoice: + log_error( + "No draft invoice created to update site usage", ledger_entry=ledger_entry.name + ) + return + + # return if this ledger_entry is already accounted for in an invoice + if ledger_entry.invoice: + return + + # return if this ledger_entry usage is not supposed to be billed + if ledger_entry.free_usage: + return + + # return if this ledger entry does not fall inside period of invoice + ledger_entry_date = getdate(ledger_entry.date) + start = getdate(self.draft_invoice.period_start) + end = getdate(self.draft_invoice.period_end) + if not (start <= ledger_entry_date <= end): + return + self.update_ledger_entry_in_invoice(ledger_entry, self.draft_invoice) + + def remove_ledger_entry_from_invoice(self, ledger_entry, invoice): + usage_row = None + for usage in invoice.site_usage: + if usage.site == ledger_entry.site and usage.plan == ledger_entry.plan: + usage_row = usage + + if usage_row and usage_row.days_active > 0: + usage_row.days_active = usage_row.days_active - 1 + + invoice.save() + ledger_entry.db_set("invoice", "") + + def update_ledger_entry_in_invoice(self, ledger_entry, invoice): + usage_row = None + for usage in invoice.site_usage: + if usage.site == ledger_entry.site and usage.plan == ledger_entry.plan: + usage_row = usage + + # if no row found, create a new row + if not usage_row: + invoice.append( + "site_usage", + {"site": ledger_entry.site, "plan": ledger_entry.plan, "days_active": 1}, + ) + # if found, update row + else: + usage_row.days_active = (usage_row.days_active or 0) + 1 + + invoice.save() + ledger_entry.db_set("invoice", invoice.name) + + def get_draft_invoice(self): + if hasattr(self, "draft_invoice"): + return self.draft_invoice + + res = jingrow.db.get_all( + "Invoice", + filters={ + "team": self.team.name, + "status": "Draft", + "month": self.month, + "year": self.year, + }, + limit=1, + ) + self.draft_invoice = jingrow.get_pg("Invoice", res[0].name) if res else None + return self.draft_invoice + + def get_invoice(self): + res = jingrow.db.get_all( + "Invoice", + filters={"team": self.team.name, "month": self.month, "year": self.year}, + limit=1, + ) + return jingrow.get_pg("Invoice", res[0].name) if res else None diff --git a/jcloud/jcloud/pagetype/team/test_team.py b/jcloud/jcloud/pagetype/team/test_team.py new file mode 100644 index 0000000..9b8e02c --- /dev/null +++ b/jcloud/jcloud/pagetype/team/test_team.py @@ -0,0 +1,83 @@ +# Copyright (c) 2020, JINGROW +# See license.txt +from __future__ import annotations + +import unittest +from unittest.mock import Mock, patch + +import jingrow +from jingrow.tests.ui_test_helpers import create_test_user + +from jcloud.jcloud.pagetype.account_request.test_account_request import ( + create_test_account_request, +) +from jcloud.jcloud.pagetype.team.team import Team + + +def create_test_jcloud_admin_team(email: str | None = None) -> Team: + """Create test jcloud admin user.""" + if not email: + email = jingrow.mock("email") + create_test_user(email) + user = jingrow.get_pg("User", {"email": email}) + user.remove_roles(*jingrow.get_all("Role", pluck="name")) + user.add_roles("Jcloud Admin") + return create_test_team(email) + + +@patch.object(Team, "update_billing_details_on_jingrowio", new=Mock()) +@patch.object(Team, "create_stripe_customer", new=Mock()) +def create_test_team(email: str | None = None, country="China") -> Team: + """Create test team pg.""" + if not email: + email = jingrow.mock("email") + create_test_user(email) # ignores if user already exists + user = jingrow.get_value("User", {"email": email}, "name") + team = jingrow.get_pg({"pagetype": "Team", "user": user, "enabled": 1, "country": country}).insert( + ignore_if_duplicate=True + ) + team.reload() + return team + + +class TestTeam(unittest.TestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_create_new_method_works(self): + account_request = create_test_account_request("testsubdomain") + team_count_before = jingrow.db.count("Team") + with patch.object(Team, "create_stripe_customer"): + Team.create_new(account_request, "first name", "last name", "test@email.com", country="China") + team_count_after = jingrow.db.count("Team") + self.assertGreater(team_count_after, team_count_before) + + def test_new_team_has_correct_billing_name(self): + account_request = create_test_account_request("testsubdomain") + with patch.object(Team, "create_stripe_customer"): + team = Team.create_new( + account_request, "first name", "last name", "test@email.com", country="China" + ) + self.assertEqual(team.billing_name, "first name last name") + + def test_create_user_for_member_adds_team_member(self): + # create system manager to pass mandatory site requirement + Team.create_user("sys_mgr", email="testuser1@gmail.com", role="System Manager") + + team = create_test_team() + email = "testuser@jingrow.cloud" + team.create_user_for_member("test", "user", "testuser@jingrow.cloud") + self.assertTrue(team.has_member(email)) # kinda dumb because we assume has_member method is correct + + def test_new_team_has_correct_currency_set(self): + account_request1 = create_test_account_request("testsubdomain") + with patch.object(Team, "create_stripe_customer"): + team1 = Team.create_new(account_request1, "Jon", "Doe", "test@gmail.com", country="China") + self.assertEqual(team1.currency, "CNY") + + account_request2 = create_test_account_request("testsubdomain2") + with patch.object(Team, "create_stripe_customer"): + team2 = Team.create_new( + account_request2, "John", "Meyer", "jonmeyer@gmail.com", country="Pakistan" + ) + self.assertEqual(team2.currency, "USD") diff --git a/jcloud/jcloud/pagetype/team_change/__init__.py b/jcloud/jcloud/pagetype/team_change/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/team_change/team_change.js b/jcloud/jcloud/pagetype/team_change/team_change.js new file mode 100644 index 0000000..186748b --- /dev/null +++ b/jcloud/jcloud/pagetype/team_change/team_change.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Team Change", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/team_change/team_change.json b/jcloud/jcloud/pagetype/team_change/team_change.json new file mode 100644 index 0000000..923707e --- /dev/null +++ b/jcloud/jcloud/pagetype/team_change/team_change.json @@ -0,0 +1,118 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-01-16 11:13:22.843011", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "document_type", + "document_name", + "from_team", + "to_team", + "transfer_completed", + "reason", + "key" + ], + "fields": [ + { + "fieldname": "document_type", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Document Type", + "options": "PageType", + "reqd": 1 + }, + { + "fieldname": "from_team", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "From Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "to_team", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "To Team", + "options": "Team", + "reqd": 1 + }, + { + "fieldname": "document_name", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Document Name", + "options": "document_type", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "transfer_completed", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Transfer Completed" + }, + { + "fieldname": "reason", + "fieldtype": "Long Text", + "label": "Reason" + }, + { + "fieldname": "key", + "fieldtype": "Data", + "label": "Key", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-02-20 13:46:49.448661", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Team Change", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/team_change/team_change.py b/jcloud/jcloud/pagetype/team_change/team_change.py new file mode 100644 index 0000000..780a340 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_change/team_change.py @@ -0,0 +1,55 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + + +class TeamChange(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + document_name: DF.DynamicLink + document_type: DF.Link + from_team: DF.Link + key: DF.Data | None + reason: DF.LongText | None + to_team: DF.Link + transfer_completed: DF.Check + # end: auto-generated types + + def validate(self): + team = jingrow.get_pg(self.document_type, self.document_name).team + if team != self.from_team: + jingrow.throw(f"The owner of {self.document_type} is not {self.from_team}") + + def on_update(self): + if self.document_type == "Site" and self.transfer_completed: + notify_email = jingrow.get_value("Team", self.to_team, "user") + jingrow.db.set_value( + "Site", self.document_name, {"team": self.to_team, "notify_email": notify_email} + ) + jingrow.db.set_value( + "Subscription", + {"document_name": self.document_name}, + "team", + self.to_team, + ) + jingrow.db.set_value("Site Domain", {"site": self.document_name}, "team", self.to_team) + tls_certificates = jingrow.get_all( + "Site Domain", + filters={"site": self.document_name}, + fields=["tls_certificate"], + pluck="tls_certificate", + ) + jingrow.db.set_value("TLS Certificate", {"name": ["in", tls_certificates]}, "team", self.to_team) + + if self.document_type == "Release Group" and self.transfer_completed: + jingrow.db.set_value("Release Group", self.document_name, "team", self.to_team) diff --git a/jcloud/jcloud/pagetype/team_change/test_team_change.py b/jcloud/jcloud/pagetype/team_change/test_team_change.py new file mode 100644 index 0000000..d9e7e22 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_change/test_team_change.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestTeamChange(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/team_deletion_request/__init__.py b/jcloud/jcloud/pagetype/team_deletion_request/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/team_deletion_request/team_deletion_request.js b/jcloud/jcloud/pagetype/team_deletion_request/team_deletion_request.js new file mode 100644 index 0000000..b7d729f --- /dev/null +++ b/jcloud/jcloud/pagetype/team_deletion_request/team_deletion_request.js @@ -0,0 +1,12 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Team Deletion Request', { + onload: function (frm) { + jingrow.realtime.on('pg_update', (data) => { + if (!frm.is_dirty()) { + frm.reload_pg(); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/team_deletion_request/team_deletion_request.json b/jcloud/jcloud/pagetype/team_deletion_request/team_deletion_request.json new file mode 100644 index 0000000..a825a79 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_deletion_request/team_deletion_request.json @@ -0,0 +1,112 @@ +{ + "actions": [], + "autoname": "format:deleted-team-{####}@deleted.com", + "creation": "2021-02-11 18:39:37.910757", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "column_break_2", + "status", + "section_break_4", + "team_disabled", + "stripe_data_deleted", + "jingrowio_data_deleted", + "data_anonymized", + "deletion_steps", + "users_anonymized" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending Verification\nDeletion Verified\nProcessing Deletion\nDeleted", + "read_only": 1 + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_4", + "fieldtype": "Section Break", + "label": "Deletion Checklist" + }, + { + "default": "0", + "fieldname": "team_disabled", + "fieldtype": "Check", + "label": "Team Disabled", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "stripe_data_deleted", + "fieldtype": "Check", + "label": "Stripe Data Deleted", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "jingrowio_data_deleted", + "fieldtype": "Check", + "label": "framework.jingrow.com Data Deleted", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "data_anonymized", + "fieldtype": "Check", + "label": "Data Anonymized", + "read_only": 1 + }, + { + "fieldname": "deletion_steps", + "fieldtype": "Table", + "label": "Data Anonymization Steps", + "options": "Personal Data Deletion Step" + }, + { + "fieldname": "users_anonymized", + "fieldtype": "Table", + "label": "Users Anonymized", + "options": "Team Member Deletion Request" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2021-04-26 13:13:50.519109", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Team Deletion Request", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/team_deletion_request/team_deletion_request.py b/jcloud/jcloud/pagetype/team_deletion_request/team_deletion_request.py new file mode 100644 index 0000000..3093569 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_deletion_request/team_deletion_request.py @@ -0,0 +1,297 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.core.utils import find +from jingrow.utils.verified_command import get_signed_params +from jingrow.website.pagetype.personal_data_deletion_request.personal_data_deletion_request import ( + PersonalDataDeletionRequest, +) + + +def handle_exception(self): + jingrow.db.rollback() + traceback = f"

{jingrow.get_traceback()}
" + self.add_comment(text=f"Failure occurred during Data Deletion:{traceback}") + jingrow.db.commit() + + +class TeamDeletionRequest(PersonalDataDeletionRequest): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + from jingrow.website.pagetype.personal_data_deletion_step.personal_data_deletion_step import ( + PersonalDataDeletionStep, + ) + + from jcloud.jcloud.pagetype.team_member_deletion_request.team_member_deletion_request import ( + TeamMemberDeletionRequest, + ) + + data_anonymized: DF.Check + deletion_steps: DF.Table[PersonalDataDeletionStep] + jingrowio_data_deleted: DF.Check + status: DF.Literal[ + "Pending Verification", "Deletion Verified", "Processing Deletion", "Deleted" + ] + stripe_data_deleted: DF.Check + team: DF.Link + team_disabled: DF.Check + users_anonymized: DF.Table[TeamMemberDeletionRequest] + # end: auto-generated types + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.email = jingrow.db.get_value("Team", self.team, "user") + # turn off data deletions in partial content for the sake of sanity + self.full_match_privacy_docs += self.partial_privacy_docs + self.partial_privacy_docs = [] + + def before_insert(self): + self.validate_team_owner() + self.validate_duplicate_request() + + def after_insert(self): + self.add_deletion_steps() + self.set_users_anonymized() + + def validate(self): + self.validate_sites_states() + self.finalize_pending_invoices() + self.validate_outstanding_invoices() + + def on_update(self): + self.finish_up() + + def handle_exc(foo): + def after_execute(self): + try: + foo(self) + except Exception: + handle_exception(self) + + return after_execute + + @property + def team_pg(self): + return jingrow.get_cached_pg("Team", self.team) + + def rename_team_on_data_deletion(self): + if ( + self.status == "Deleted" + and self.name != self.team + and jingrow.db.exists("Team", self.team) + ): + jingrow.rename_pg("Team", self.team, self.name) + + def validate_team_owner(self): + if ( + self.team_pg.user == jingrow.session.user or "System Manager" in jingrow.get_roles() + ): + return + + jingrow.throw( + "You need to be a Team owner to request account deletion", exc=jingrow.PermissionError + ) + + def validate_duplicate_request(self): + if jingrow.db.exists(self.pagetype, {"team": self.team}): + jingrow.throw( + f"{self.pagetype} for {self.team} already exists!", exc=jingrow.DuplicateEntryError + ) + + def delete_team_data(self): + self.db_set("status", "Processing Deletion") + if not self.team_disabled: + self.disable_team() + if not self.jingrowio_data_deleted: + self.delete_data_on_jingrowio() + if not self.stripe_data_deleted: + self.delete_stripe_customer() + if ( + self.team_disabled and self.jingrowio_data_deleted and self.stripe_data_deleted + ) and not self.data_anonymized: + self.delete_data_on_jcloud() + self.finish_up() + + def finish_up(self): + if ( + self.team_disabled + and self.jingrowio_data_deleted + and self.stripe_data_deleted + and self.data_anonymized + ): + self.db_set("status", "Deleted") + self.rename_team_on_data_deletion() + jingrow.db.commit() + self.reload() + + def generate_url_for_confirmation(self): + params = get_signed_params({"team": self.team}) + api = jingrow.utils.get_url("/api/method/jcloud.api.account.delete_team") + url = f"{api}?{params}" + + if jingrow.conf.developer_mode: + print(f"URL generated for {self.pagetype} {self.name}: {url}") + + return url + + @handle_exc + def disable_team(self): + team = self.team_pg + team.enabled = False + team.save() + self.db_set("team_disabled", True, commit=True) + self.reload() + + @handle_exc + def delete_stripe_customer(self): + from jcloud.api.billing import get_stripe + + stripe = get_stripe() + team = self.team_pg + + try: + stripe.Customer.delete(team.stripe_customer_id) + except stripe.error.InvalidRequestError as e: + if "No such customer" not in str(e): + raise e + + team.db_set("stripe_customer_id", False) + self.db_set("stripe_data_deleted", True, commit=True) + self.reload() + + @handle_exc + def delete_data_on_jingrowio(self): + """Anonymize data on jingrow.com""" + from jcloud.utils.billing import get_jingrow_io_connection + + client = get_jingrow_io_connection() + response = client.session.delete( + f"{client.url}/api/method/delete-fc-team", + data={"team": self.team}, + headers=client.headers, + ) + if not response.ok: + response.raise_for_status() + + self.db_set("jingrowio_data_deleted", True, commit=True) + self.reload() + + def set_users_anonymized(self): + def numerate_email(x, i): + user_email, domain = x.split("@") + return f"{user_email}-{i + 1}@{domain}" + + team_members = [row.user for row in self.team_pg.team_members] + members_only_in_this_team = [ + user + for user in team_members + if not jingrow.db.exists( + "Team Member", {"user": user, "parent": ("!=", self.team_pg.name)} + ) + ] + + renamed_dict = { + x: numerate_email(self.name, i) for i, x in enumerate(members_only_in_this_team) + } + + for now, then in renamed_dict.items(): + self.append( + "users_anonymized", + {"team_member": now, "anon_team_member": then, "deletion_status": "Pending"}, + ) + + self.db_update() + self.update_children() + jingrow.db.commit() + self.reload() + + @handle_exc + def delete_data_on_jcloud(self): + if not self.users_anonymized: + self.set_users_anonymized() + + def is_deletion_pending(email): + return find( + self.users_anonymized, + lambda x: x.get("team_member") == email and x.get("deletion_status") == "Pending", + ) + + for user in self.users_anonymized: + now = user.get("team_member") + then = user.get("anon_team_member") + + if now == then and is_deletion_pending(now): + # user has been anonymized. set status as deleted + pass + elif is_deletion_pending(now): + self._anonymize_data(now, then, commit=True) + else: + continue + + try: + self.users_anonymized = filter( + lambda x: (x.get("team_member") != now) and (x.get("anon_team_member") != then), + self.users_anonymized, + ) + self.append( + "users_anonymized", + {"team_member": then, "anon_team_member": then, "deletion_status": "Deleted"}, + ) + self.db_update() + self.update_children() + except Exception: + handle_exception(self) + + jingrow.db.commit() + + self.db_set("data_anonymized", True, commit=True) + self.reload() + + def validate_sites_states(self): + non_archived_sites = jingrow.get_all( + "Site", filters={"status": ("!=", "Archived"), "team": self.team}, pluck="name" + ) + if non_archived_sites: + jingrow.throw( + f"Team {self.team} has {len(non_archived_sites)} sites. Drop them" + " before you can delete your account" + ) + + def finalize_pending_invoices(self): + pending_invoices = jingrow.get_all( + "Invoice", + filters={"team": self.team}, + or_filters={"docstatus": 0, "status": "Draft"}, + pluck="name", + ) + for invoice in pending_invoices: + jingrow.get_pg("Invoice", invoice).finalize_invoice() + + def validate_outstanding_invoices(self): + if self.team_pg.is_defaulter(): + jingrow.throw("You have Unpaid Invoices. Clear them to delete your account") + + +def process_team_deletion_requests(): + # order in desc since deleting jcloud data takes the most time + pagetype = "Team Deletion Request" + deletion_requests = jingrow.get_all( + pagetype, + filters={"status": ("in", ["Deletion Verified", "Processing Deletion"])}, + pluck="name", + order_by="creation desc", + ) + for name in deletion_requests: + try: + tdr = jingrow.get_pg(pagetype, name) + tdr.delete_team_data() + jingrow.db.commit() + except Exception: + continue diff --git a/jcloud/jcloud/pagetype/team_deletion_request/test_team_deletion_request.py b/jcloud/jcloud/pagetype/team_deletion_request/test_team_deletion_request.py new file mode 100644 index 0000000..fa64ef3 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_deletion_request/test_team_deletion_request.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# See license.txt + +import unittest + +import jingrow +import requests + +from jcloud.jcloud.pagetype.team.test_team import create_test_team +from jcloud.jcloud.pagetype.team_deletion_request.team_deletion_request import ( + TeamDeletionRequest, +) + + +class TestTeamDeletionRequest(unittest.TestCase): + @classmethod + def setUpClass(cls) -> None: + cls.team = create_test_team() + return super().setUpClass() + + @property + def team_deletion_request(self): + if not getattr(self, "_tdr", None): + try: + self._tdr = jingrow.get_last_pg( + "Team Deletion Request", filters={"team": self.team.name} + ) + except jingrow.DoesNotExistError: + self._tdr = self.team.delete(workflow=True) + return self._tdr + + def test_team_pg_deletion_raise(self): + self.assertRaises(jingrow.ValidationError, self.team.delete) + + def test_team_pg_deletion(self): + self.assertIsInstance(self.team_deletion_request, TeamDeletionRequest) + self.assertEqual(self.team_deletion_request.status, "Pending Verification") + + def test_url_for_verification(self): + deletion_url = self.team_deletion_request.generate_url_for_confirmation() + self.assertTrue( + deletion_url.startswith( + jingrow.utils.get_url("/api/method/jcloud.api.account.delete_team") + ) + ) + + def test_team_deletion_api(self): + # TODO: Test if the API flow actually sets the status + deletion_url = self.team_deletion_request.generate_url_for_confirmation() + res = requests.get(deletion_url, allow_redirects=True) + self.assertTrue(res.ok) diff --git a/jcloud/jcloud/pagetype/team_member/__init__.py b/jcloud/jcloud/pagetype/team_member/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/team_member/team_member.json b/jcloud/jcloud/pagetype/team_member/team_member.json new file mode 100644 index 0000000..e8045aa --- /dev/null +++ b/jcloud/jcloud/pagetype/team_member/team_member.json @@ -0,0 +1,38 @@ +{ + "actions": [], + "creation": "2020-03-05 18:26:34.180580", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "user", + "impersonate" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Link", + "in_list_view": 1, + "label": "User", + "options": "User", + "reqd": 1 + }, + { + "fieldname": "impersonate", + "fieldtype": "Button", + "label": "Impersonate" + } + ], + "istable": 1, + "links": [], + "modified": "2020-07-31 11:48:59.695189", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Team Member", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/team_member/team_member.py b/jcloud/jcloud/pagetype/team_member/team_member.py new file mode 100644 index 0000000..6e56921 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_member/team_member.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class TeamMember(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + user: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/team_member_deletion_request/__init__.py b/jcloud/jcloud/pagetype/team_member_deletion_request/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/team_member_deletion_request/team_member_deletion_request.js b/jcloud/jcloud/pagetype/team_member_deletion_request/team_member_deletion_request.js new file mode 100644 index 0000000..c718217 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_member_deletion_request/team_member_deletion_request.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Team Member Deletion Request', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/team_member_deletion_request/team_member_deletion_request.json b/jcloud/jcloud/pagetype/team_member_deletion_request/team_member_deletion_request.json new file mode 100644 index 0000000..7e0bf5c --- /dev/null +++ b/jcloud/jcloud/pagetype/team_member_deletion_request/team_member_deletion_request.json @@ -0,0 +1,46 @@ +{ + "actions": [], + "creation": "2021-04-26 13:13:13.385392", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team_member", + "anon_team_member", + "deletion_status" + ], + "fields": [ + { + "fieldname": "team_member", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team Member", + "options": "User" + }, + { + "fieldname": "deletion_status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Deletion Status", + "options": "Pending\nDeleted" + }, + { + "fieldname": "anon_team_member", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Anonymized Link" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2021-04-26 16:08:26.691639", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Team Member Deletion Request", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/team_member_deletion_request/team_member_deletion_request.py b/jcloud/jcloud/pagetype/team_member_deletion_request/team_member_deletion_request.py new file mode 100644 index 0000000..d62a49a --- /dev/null +++ b/jcloud/jcloud/pagetype/team_member_deletion_request/team_member_deletion_request.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class TeamMemberDeletionRequest(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + anon_team_member: DF.Data | None + deletion_status: DF.Literal["Pending", "Deleted"] + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + team_member: DF.Link | None + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/team_member_deletion_request/test_team_member_deletion_request.py b/jcloud/jcloud/pagetype/team_member_deletion_request/test_team_member_deletion_request.py new file mode 100644 index 0000000..2db9f75 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_member_deletion_request/test_team_member_deletion_request.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestTeamMemberDeletionRequest(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/team_member_impersonation/__init__.py b/jcloud/jcloud/pagetype/team_member_impersonation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/team_member_impersonation/team_member_impersonation.js b/jcloud/jcloud/pagetype/team_member_impersonation/team_member_impersonation.js new file mode 100644 index 0000000..e31d736 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_member_impersonation/team_member_impersonation.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Team Member Impersonation', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/team_member_impersonation/team_member_impersonation.json b/jcloud/jcloud/pagetype/team_member_impersonation/team_member_impersonation.json new file mode 100644 index 0000000..28b8ac5 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_member_impersonation/team_member_impersonation.json @@ -0,0 +1,89 @@ +{ + "actions": [], + "creation": "2020-07-31 11:36:50.175646", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "member", + "user", + "column_break_4", + "impersonator", + "reason" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "User", + "options": "User", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "reason", + "fieldtype": "Text Editor", + "label": "Reason", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Team", + "options": "Team", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "member", + "fieldtype": "Link", + "label": "Member", + "options": "Team Member", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "impersonator", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Impersonator", + "options": "User", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + } + ], + "in_create": 1, + "links": [], + "modified": "2020-07-31 12:11:35.425693", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Team Member Impersonation", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "title_field": "user", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/team_member_impersonation/team_member_impersonation.py b/jcloud/jcloud/pagetype/team_member_impersonation/team_member_impersonation.py new file mode 100644 index 0000000..f742471 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_member_impersonation/team_member_impersonation.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class TeamMemberImpersonation(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + impersonator: DF.Link + member: DF.Link + reason: DF.TextEditor + team: DF.Link + user: DF.Link + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/team_member_impersonation/test_team_member_impersonation.py b/jcloud/jcloud/pagetype/team_member_impersonation/test_team_member_impersonation.py new file mode 100644 index 0000000..9c409ae --- /dev/null +++ b/jcloud/jcloud/pagetype/team_member_impersonation/test_team_member_impersonation.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestTeamMemberImpersonation(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/team_onboarding/__init__.py b/jcloud/jcloud/pagetype/team_onboarding/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/team_onboarding/team_onboarding.json b/jcloud/jcloud/pagetype/team_onboarding/team_onboarding.json new file mode 100644 index 0000000..052aca0 --- /dev/null +++ b/jcloud/jcloud/pagetype/team_onboarding/team_onboarding.json @@ -0,0 +1,41 @@ +{ + "actions": [], + "creation": "2020-08-27 17:41:18.658299", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "step_name", + "status" + ], + "fields": [ + { + "fieldname": "step_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Step Name", + "reqd": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Status", + "options": "Pending\nSkipped\nCompleted\nNot Applicable", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2020-08-27 17:53:33.267271", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Team Onboarding", + "owner": "Administrator", + "permissions": [], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/team_onboarding/team_onboarding.py b/jcloud/jcloud/pagetype/team_onboarding/team_onboarding.py new file mode 100644 index 0000000..e44420f --- /dev/null +++ b/jcloud/jcloud/pagetype/team_onboarding/team_onboarding.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow +from jingrow.model.document import Document + + +class TeamOnboarding(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + status: DF.Literal["Pending", "Skipped", "Completed", "Not Applicable"] + step_name: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/telegram_group/__init__.py b/jcloud/jcloud/pagetype/telegram_group/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/telegram_group/patches/create_groups_from_jcloud_settings.py b/jcloud/jcloud/pagetype/telegram_group/patches/create_groups_from_jcloud_settings.py new file mode 100644 index 0000000..2b023d5 --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_group/patches/create_groups_from_jcloud_settings.py @@ -0,0 +1,21 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "telegram_group_topic") + jingrow.reload_pg("jcloud", "pagetype", "telegram_group") + jingrow.reload_pg("jcloud", "pagetype", "jcloud_settings") + settings = jingrow.get_pg("Jcloud Settings") + if settings.telegram_alert_chat_id: + group = jingrow.get_pg( + { + "pagetype": "Telegram Group", + "name": "Alerts", + "chat_id": settings.telegram_alert_chat_id, + } + ).insert() + settings.telegram_alerts_chat_group = group.name + + settings.save() diff --git a/jcloud/jcloud/pagetype/telegram_group/telegram_group.js b/jcloud/jcloud/pagetype/telegram_group/telegram_group.js new file mode 100644 index 0000000..8b5eacf --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_group/telegram_group.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Telegram Group", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/telegram_group/telegram_group.json b/jcloud/jcloud/pagetype/telegram_group/telegram_group.json new file mode 100644 index 0000000..03025bc --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_group/telegram_group.json @@ -0,0 +1,61 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "prompt", + "creation": "2023-05-11 17:40:05.778395", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "chat_id", + "token", + "topics" + ], + "fields": [ + { + "fieldname": "chat_id", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Chat ID", + "reqd": 1 + }, + { + "fieldname": "topics", + "fieldtype": "Table", + "label": "Topics", + "options": "Telegram Group Topic" + }, + { + "fieldname": "token", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Token" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-12-14 12:49:59.132352", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Telegram Group", + "naming_rule": "Set by user", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/telegram_group/telegram_group.py b/jcloud/jcloud/pagetype/telegram_group/telegram_group.py new file mode 100644 index 0000000..decf29f --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_group/telegram_group.py @@ -0,0 +1,26 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class TelegramGroup(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.telegram_group_topic.telegram_group_topic import ( + TelegramGroupTopic, + ) + + chat_id: DF.Data + token: DF.Data | None + topics: DF.Table[TelegramGroupTopic] + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/telegram_group/test_telegram_group.py b/jcloud/jcloud/pagetype/telegram_group/test_telegram_group.py new file mode 100644 index 0000000..3b721c8 --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_group/test_telegram_group.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestTelegramGroup(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/telegram_group_topic/__init__.py b/jcloud/jcloud/pagetype/telegram_group_topic/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/telegram_group_topic/telegram_group_topic.json b/jcloud/jcloud/pagetype/telegram_group_topic/telegram_group_topic.json new file mode 100644 index 0000000..bea97bd --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_group_topic/telegram_group_topic.json @@ -0,0 +1,41 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-05-11 17:39:50.237287", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "topic", + "topic_id" + ], + "fields": [ + { + "fieldname": "topic", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Topic", + "reqd": 1 + }, + { + "fieldname": "topic_id", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Topic ID", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-05-12 12:25:21.167281", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Telegram Group Topic", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/telegram_group_topic/telegram_group_topic.py b/jcloud/jcloud/pagetype/telegram_group_topic/telegram_group_topic.py new file mode 100644 index 0000000..d347575 --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_group_topic/telegram_group_topic.py @@ -0,0 +1,24 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class TelegramGroupTopic(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + topic: DF.Data + topic_id: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/telegram_message/__init__.py b/jcloud/jcloud/pagetype/telegram_message/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/telegram_message/telegram_message.js b/jcloud/jcloud/pagetype/telegram_message/telegram_message.js new file mode 100644 index 0000000..4c8fa3f --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_message/telegram_message.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Telegram Message", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/telegram_message/telegram_message.json b/jcloud/jcloud/pagetype/telegram_message/telegram_message.json new file mode 100644 index 0000000..72c246d --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_message/telegram_message.json @@ -0,0 +1,114 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-05-21 16:45:23.323529", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "priority", + "status", + "column_break_pube", + "topic", + "group", + "section_break_ujme", + "message", + "section_break_njxf", + "error", + "retry_count" + ], + "fields": [ + { + "default": "Queued", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Queued\nSent\nError", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "priority", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Priority", + "options": "High\nMedium\nLow", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "topic", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Topic", + "read_only": 1 + }, + { + "fieldname": "group", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Group", + "read_only": 1 + }, + { + "fieldname": "message", + "fieldtype": "Code", + "label": "Message", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_pube", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_ujme", + "fieldtype": "Section Break" + }, + { + "fieldname": "section_break_njxf", + "fieldtype": "Section Break" + }, + { + "fieldname": "error", + "fieldtype": "Code", + "label": "Error", + "read_only": 1 + }, + { + "fieldname": "retry_count", + "fieldtype": "Int", + "label": "Retry Count", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-05-22 15:25:47.007102", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Telegram Message", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/telegram_message/telegram_message.py b/jcloud/jcloud/pagetype/telegram_message/telegram_message.py new file mode 100644 index 0000000..e35b960 --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_message/telegram_message.py @@ -0,0 +1,123 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import traceback + +import jingrow +from jingrow.model.document import Document +from telegram.error import NetworkError, RetryAfter + +from jcloud.telegram_utils import Telegram +from jingrow.query_builder import Interval +from jingrow.query_builder.functions import Now + + +class TelegramMessage(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + error: DF.Code | None + group: DF.Data | None + message: DF.Code + priority: DF.Literal["High", "Medium", "Low"] + retry_count: DF.Int + status: DF.Literal["Queued", "Sent", "Error"] + topic: DF.Data | None + # end: auto-generated types + + def send(self): + try: + telegram = Telegram(self.topic, self.group) + if not self.group: + self.group = telegram.group + if not self.topic: + self.topic = telegram.topic + telegram.send(self.message, reraise=True) + self.status = "Sent" + except RetryAfter: + # Raise an exception that will be caught by the scheduler + # Try again after some time + raise + except NetworkError: + # Try again. Not more than 5 times + self.retry_count += 1 + self.error = traceback.format_exc() + if self.retry_count >= 5: + self.status = "Error" + raise + except Exception: + # It's unlinkely that this error will be resolved by retrying + # Fail immediately + self.error = traceback.format_exc() + self.status = "Error" + raise + finally: + self.save() + + @staticmethod + def enqueue( + message: str, + topic: str | None = None, + group: str | None = None, + priority: str = "Medium", + ): + """Enqueue message for sending""" + return jingrow.get_pg( + { + "pagetype": "Telegram Message", + "message": message, + "priority": priority, + "topic": topic, + "group": group, + } + ).insert(ignore_permissions=True) + + @staticmethod + def get_one() -> "TelegramMessage | None": + first = jingrow.get_all( + "Telegram Message", + filters={"status": "Queued"}, + order_by="FIELD(priority, 'High', 'Medium', 'Low'), creation ASC", + limit=1, + pluck="name", + ) + if first: + return jingrow.get_pg("Telegram Message", first[0]) + + @staticmethod + def send_one() -> None: + message = TelegramMessage.get_one() + if message: + return message.send() + + @staticmethod + def clear_old_logs(days=30): + table = jingrow.qb.PageType("Telegram Message") + jingrow.db.delete(table, filters=(table.modified < (Now() - Interval(days=days)))) + jingrow.db.commit() + + +def send_telegram_message(): + """Send one queued telegram message""" + + # Go through the queue till either of these things happen + # 1. There are no more queued messages + # 2. We successfully send a message + # 3. Telegram asks us to stop (RetryAfter) + # 4. We encounter an error that is not recoverable by retrying + # (attempt 5 retries and remove the message from queue) + while message := TelegramMessage.get_one(): + try: + message.send() + return + except RetryAfter: + # Retry in the next invocation + return + except Exception: + # Try next message + pass diff --git a/jcloud/jcloud/pagetype/telegram_message/test_telegram_message.py b/jcloud/jcloud/pagetype/telegram_message/test_telegram_message.py new file mode 100644 index 0000000..457353f --- /dev/null +++ b/jcloud/jcloud/pagetype/telegram_message/test_telegram_message.py @@ -0,0 +1,133 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +from unittest.mock import Mock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase +from telegram.error import RetryAfter, TimedOut + +from jcloud.jcloud.pagetype.telegram_message.telegram_message import ( + TelegramMessage, + send_telegram_message, +) +from jcloud.telegram_utils import Telegram + + +@patch.object(Telegram, "send") +class TestTelegramMessage(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_enqueue_creates_telegram_message(self, mock_send: Mock): + """Test if enqueue method creates Telegram Message""" + + before = jingrow.db.count("Telegram Message") + TelegramMessage.enqueue(message="Test Message") + after = jingrow.db.count("Telegram Message") + self.assertEqual(after, before + 1) + + def test_enqueue_creates_telegram_message_with_queued_status(self, mock_send: Mock): + """Test if enqueue method creates Telegram Message with Queued status""" + message = TelegramMessage.enqueue(message="Test Message") + self.assertEqual(message.status, "Queued") + + def test_send_calls_telegram_send(self, mock_send: Mock): + """Test if send method calls Telegram send method""" + TelegramMessage.enqueue(message="Test Message") + send_telegram_message() + mock_send.assert_called_once() + + def test_successful_send_call_sets_sent_status(self, mock_send: Mock): + """Test if successful send call sets status to Sent""" + first = TelegramMessage.enqueue(message="Test Message") + send_telegram_message() + first.reload() + self.assertEqual(first.status, "Sent") + + def test_failed_send_call_sets_error_status(self, mock_send: Mock): + """Test if failed send call sets status to Error""" + mock_send.side_effect = Exception() + first = TelegramMessage.enqueue(message="Test Message") + self.assertRaises(Exception, TelegramMessage.send_one) + first.reload() + self.assertEqual(first.status, "Error") + self.assertIn("Exception", first.error) + + def test_sends_messages_in_priority_order(self, mock_send: Mock): + """Test if messages are sent in priority order""" + high = TelegramMessage.enqueue(message="Test Message", priority="High") + medium = TelegramMessage.enqueue(message="Test Message", priority="Medium") + low = TelegramMessage.enqueue(message="Test Message", priority="Low") + + self.assertEqual(TelegramMessage.get_one(), high) + send_telegram_message() + self.assertEqual(TelegramMessage.get_one(), medium) + send_telegram_message() + self.assertEqual(TelegramMessage.get_one(), low) + send_telegram_message() + + low = TelegramMessage.enqueue(message="Test Message", priority="Low") + medium = TelegramMessage.enqueue(message="Test Message", priority="Medium") + high = TelegramMessage.enqueue(message="Test Message", priority="High") + + self.assertEqual(TelegramMessage.get_one(), high) + send_telegram_message() + self.assertEqual(TelegramMessage.get_one(), medium) + send_telegram_message() + self.assertEqual(TelegramMessage.get_one(), low) + send_telegram_message() + + def test_sends_messages_in_creation_order(self, mock_send: Mock): + """Test if messages are sent in creation order""" + first = TelegramMessage.enqueue(message="Test Message") + second = TelegramMessage.enqueue(message="Test Message") + + self.assertEqual(TelegramMessage.get_one(), first) + send_telegram_message() + self.assertEqual(TelegramMessage.get_one(), second) + send_telegram_message() + + def test_failed_send_network_error_increases_retry(self, mock_send: Mock): + """Test if failed send call because of network issues increases retry count""" + mock_send.side_effect = TimedOut() + first = TelegramMessage.enqueue(message="Test Message") + self.assertRaises(TimedOut, TelegramMessage.send_one) + first.reload() + self.assertEqual(first.status, "Queued") + self.assertEqual(first.retry_count, 1) + + def test_test_failed_send_after_max_retries_sets_error_status(self, mock_send: Mock): + """Test if failed send call after max_errors sets status to Error""" + mock_send.side_effect = TimedOut() + first = TelegramMessage.enqueue(message="Test Message") + first.retry_count = 4 + first.save() + self.assertRaises(TimedOut, TelegramMessage.send_one) + first.reload() + self.assertEqual(first.status, "Error") + + def test_failed_send_retry_after_doesnt_change_anything(self, mock_send: Mock): + """Test if failed send call because of rate limits doesn't change status""" + mock_send.side_effect = RetryAfter(10) + first = TelegramMessage.enqueue(message="Test Message") + self.assertRaises(RetryAfter, TelegramMessage.send_one) + first.reload() + self.assertEqual(first.status, "Queued") + + def test_send_message_returns_on_empty_queue(self, mock_send: Mock): + """Test if send_telegram_message returns on empty queue""" + first = TelegramMessage.enqueue(message="Test Message") + first.status = "Sent" + first.save() + send_telegram_message() + mock_send.assert_not_called() + + def test_send_message_does_not_raise_on_failure(self, mock_send: Mock): + """Test if send_telegram_message does not raise on failure""" + mock_send.side_effect = Exception() + first = TelegramMessage.enqueue(message="Test Message") + send_telegram_message() + first.reload() + self.assertEqual(first.status, "Error") + self.assertIn("Exception", first.error) diff --git a/jcloud/jcloud/pagetype/tls_certificate/__init__.py b/jcloud/jcloud/pagetype/tls_certificate/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/tls_certificate/server.conf b/jcloud/jcloud/pagetype/tls_certificate/server.conf new file mode 100644 index 0000000..6ade883 --- /dev/null +++ b/jcloud/jcloud/pagetype/tls_certificate/server.conf @@ -0,0 +1,18 @@ +# OpenSSL Server certificate configuration file. + +[ req ] +distinguished_name = req_distinguished_name +prompt = no +req_extensions = req_ext + +# Common Name has to be the same as the domain where we want to use this certificate +[ req_distinguished_name ] +CN = {{ pg.domain }} +O = {{ pg.ca.organization }} +OU = {{ pg.ca.organizational_unit }} + + +# Chrome has dropped support for certificates without SANs +[ req_ext ] +subjectAltName = DNS:{{ pg.domain }} + diff --git a/jcloud/jcloud/pagetype/tls_certificate/test_tls_certificate.py b/jcloud/jcloud/pagetype/tls_certificate/test_tls_certificate.py new file mode 100644 index 0000000..d7e839d --- /dev/null +++ b/jcloud/jcloud/pagetype/tls_certificate/test_tls_certificate.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + +import unittest +from unittest.mock import Mock, patch + +import jingrow + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.proxy_server.proxy_server import ProxyServer +from jcloud.jcloud.pagetype.proxy_server.test_proxy_server import create_test_proxy_server +from jcloud.jcloud.pagetype.root_domain.test_root_domain import create_test_root_domain +from jcloud.jcloud.pagetype.tls_certificate.tls_certificate import ( + BaseCA, + LetsEncrypt, + TLSCertificate, +) + + +@patch.object(TLSCertificate, "obtain_certificate", new=Mock()) +def create_test_tls_certificate(domain: str, wildcard: bool = False) -> TLSCertificate: + certificate = jingrow.get_pg( + { + "pagetype": "TLS Certificate", + "domain": domain, + "rsa_key_size": 2048, + "wildcard": wildcard, + } + ).insert(ignore_if_duplicate=True) + certificate.reload() + return certificate + + +def none_init(self, settings): + pass + + +def fake_extract(self): + return "a", "b", "c", "d" + + +@patch.object(AgentJob, "after_insert", new=Mock()) +@patch.object(LetsEncrypt, "_obtain", new=Mock()) +@patch.object(BaseCA, "_extract", new=fake_extract) +@patch.object(TLSCertificate, "_extract_certificate_details", new=Mock()) +class TestTLSCertificate(unittest.TestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_renewal_of_secondary_wildcard_domains_updates_server(self): + jerp_domain = create_test_root_domain("jerp.xyz") + fc_domain = create_test_root_domain("fc.dev") + create_test_proxy_server( # creates n1.fc.dev by default + "n1", domains=[{"domain": fc_domain.name}, {"domain": jerp_domain.name}] + ) + + cert = create_test_tls_certificate(jerp_domain.name, wildcard=True) + + with patch.object(LetsEncrypt, "__init__", new=none_init), patch.object( + ProxyServer, "setup_wildcard_hosts" + ) as mock_setup_wildcard_hosts: + cert._obtain_certificate() + mock_setup_wildcard_hosts.assert_called_once() + + def test_renewal_of_primary_wildcard_domains_doesnt_call_setup_wildcard_domains(self): + jerp_domain = create_test_root_domain("jerp.xyz") + fc_domain = create_test_root_domain("fc.dev") + create_test_proxy_server( + "n1", domains=[{"domain": fc_domain.name}, {"domain": jerp_domain.name}] + ) + + cert = create_test_tls_certificate(fc_domain.name, wildcard=True) + cert.reload() # already created with proxy server + + with patch.object(LetsEncrypt, "__init__", new=none_init), patch.object( + TLSCertificate, "trigger_server_tls_setup_callback", new=Mock() + ), patch.object( + ProxyServer, "setup_wildcard_hosts" + ) as mock_setup_wildcard_hosts: + cert._obtain_certificate() + + mock_setup_wildcard_hosts.assert_not_called() + + def test_renewal_of_primary_domain_calls_update_tls_certificates(self): + cert = create_test_tls_certificate("fc.dev", wildcard=True) + create_test_proxy_server("n1") + with patch.object(LetsEncrypt, "__init__", new=none_init), patch.object( + TLSCertificate, "trigger_server_tls_setup_callback" + ) as mock_trigger_server_tls_setup, patch.object( + ProxyServer, "setup_wildcard_hosts", new=Mock() + ): + cert._obtain_certificate() + mock_trigger_server_tls_setup.assert_called() diff --git a/jcloud/jcloud/pagetype/tls_certificate/tls_certificate.js b/jcloud/jcloud/pagetype/tls_certificate/tls_certificate.js new file mode 100644 index 0000000..1812148 --- /dev/null +++ b/jcloud/jcloud/pagetype/tls_certificate/tls_certificate.js @@ -0,0 +1,35 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('TLS Certificate', { + refresh: function (frm) { + frm.add_custom_button(__('Obtain Certificate'), () => { + frm.call({ + method: 'obtain_certificate', + pg: frm.pg, + callback: (result) => frm.refresh(), + }); + }); + if (frm.pg.wildcard) { + frm.add_custom_button(__('Trigger Callback'), () => { + frm.call({ + method: 'trigger_server_tls_setup_callback', + pg: frm.pg, + callback: (result) => frm.refresh(), + }); + }); + } + if (!frm.pg.wildcard) { + frm.add_custom_button('Copy Private Key', () => { + jingrow.confirm( + `Are you sure you want to copy private + key. You should ONLY do this for custom + domains. And notify user of their + responsibility on handling private + key.`, + () => jingrow.utils.copy_to_clipboard(frm.pg.private_key), + ); + }); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/tls_certificate/tls_certificate.json b/jcloud/jcloud/pagetype/tls_certificate/tls_certificate.json new file mode 100644 index 0000000..83ade54 --- /dev/null +++ b/jcloud/jcloud/pagetype/tls_certificate/tls_certificate.json @@ -0,0 +1,192 @@ +{ + "actions": [], + "creation": "2020-03-30 01:11:37.983494", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "domain", + "status", + "team", + "column_break_3", + "rsa_key_size", + "wildcard", + "section_break_6", + "issued_on", + "column_break_8", + "expires_on", + "section_break_10", + "decoded_certificate", + "certificate", + "full_chain", + "intermediate_chain", + "private_key", + "section_break_cvcg", + "error", + "retry_count" + ], + "fields": [ + { + "fieldname": "domain", + "fieldtype": "Data", + "in_standard_filter": 1, + "label": "Domain", + "reqd": 1, + "set_only_once": 1 + }, + { + "default": "0", + "fieldname": "wildcard", + "fieldtype": "Check", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Wildcard", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "decoded_certificate", + "fieldtype": "Code", + "label": "Decoded Certificate", + "read_only": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nActive\nExpired\nRevoked\nFailure", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_6", + "fieldtype": "Section Break" + }, + { + "default": "2048", + "fieldname": "rsa_key_size", + "fieldtype": "Select", + "label": "RSA Key Size", + "options": "2048\n3072\n4096", + "read_only_depends_on": "eval: pg.wildcard", + "reqd": 1 + }, + { + "fieldname": "issued_on", + "fieldtype": "Datetime", + "label": "Issued On", + "read_only": 1 + }, + { + "fieldname": "column_break_8", + "fieldtype": "Column Break" + }, + { + "fieldname": "expires_on", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "Expires On", + "read_only": 1 + }, + { + "fieldname": "section_break_10", + "fieldtype": "Section Break", + "hide_border": 1 + }, + { + "fieldname": "certificate", + "fieldtype": "Code", + "label": "Certificate", + "read_only": 1 + }, + { + "fieldname": "full_chain", + "fieldtype": "Code", + "label": "Full Chain", + "read_only": 1 + }, + { + "fieldname": "intermediate_chain", + "fieldtype": "Code", + "label": "Intermediate Chain", + "read_only": 1 + }, + { + "fieldname": "private_key", + "fieldtype": "Code", + "label": "Private Key", + "hidden": 1, + "read_only": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "read_only": 1 + }, + { + "fieldname": "section_break_cvcg", + "fieldtype": "Section Break" + }, + { + "fieldname": "error", + "fieldtype": "Code", + "label": "Error", + "read_only": 1 + }, + { + "default": "0", + "depends_on": "eval: pg.retry_count", + "fieldname": "retry_count", + "fieldtype": "Int", + "label": "Retry Count", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-10-17 10:43:33.881463", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "TLS Certificate", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "delete": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} diff --git a/jcloud/jcloud/pagetype/tls_certificate/tls_certificate.py b/jcloud/jcloud/pagetype/tls_certificate/tls_certificate.py new file mode 100644 index 0000000..a08bad1 --- /dev/null +++ b/jcloud/jcloud/pagetype/tls_certificate/tls_certificate.py @@ -0,0 +1,461 @@ +import os +import shlex +import subprocess +import tempfile +import time +from datetime import datetime + +import jingrow +import OpenSSL +from jingrow.model.document import Document + +from jcloud.api.site import check_dns_cname_a +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.runner import Ansible +from jcloud.utils import get_current_team, log_error +from tldextract import extract + + +class TLSCertificate(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + certificate: DF.Code | None + decoded_certificate: DF.Code | None + domain: DF.Data + error: DF.Code | None + expires_on: DF.Datetime | None + full_chain: DF.Code | None + intermediate_chain: DF.Code | None + issued_on: DF.Datetime | None + private_key: DF.Code | None + retry_count: DF.Int + rsa_key_size: DF.Literal["2048", "3072", "4096"] + status: DF.Literal["Pending", "Active", "Expired", "Revoked", "Failure"] + team: DF.Link | None + wildcard: DF.Check + # end: auto-generated types + + def autoname(self): + if self.wildcard: + self.name = f"*.{self.domain}" + else: + self.name = self.domain + + def after_insert(self): + self.obtain_certificate() + + def on_update(self): + if self.is_new(): + return + if self.has_value_changed("rsa_key_size"): + self.obtain_certificate() + + @jingrow.whitelist() + def obtain_certificate(self): + """ + 触发自动或手动签发证书时,排队执行 _obtain_certificate。 + """ + (user, session_data, team) = ( + jingrow.session.user, + jingrow.session.data, + get_current_team(), + ) + jingrow.set_user(jingrow.get_value("Team", team, "user")) + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_obtain_certificate", + enqueue_after_commit=True, + job_id=f"obtain_certificate:{self.name}", + deduplicate=True, + ) + jingrow.set_user(user) + jingrow.session.data = session_data + + @jingrow.whitelist() + def _obtain_certificate(self): + """ + 实际执行证书获取的核心逻辑。区分 DNS Provider,可能是 Route53 或 DNSPod 等。 + """ + try: + settings = jingrow.get_pg("Jcloud Settings", "Jcloud Settings") + ca = LetsEncrypt(settings) + ( + self.certificate, + self.full_chain, + self.intermediate_chain, + self.private_key, + ) = ca.obtain( + domain=self.domain, + rsa_key_size=self.rsa_key_size, + wildcard=self.wildcard + ) + self._extract_certificate_details() + self.status = "Active" + self.retry_count = 0 + self.error = None + except Exception as e: + if hasattr(e, "output") and e.output: + output_msg = e.output.decode() + # 如果 certbot 已在运行,会提示 "Another instance of Certbot is already running" + if "Another instance of Certbot is already running" in output_msg: + time.sleep(5) + jingrow.enqueue_pg( + self.pagetype, + self.name, + "_obtain_certificate", + job_id=f"obtain_certificate:{self.name}", + deduplicate=True, + ) + return + self.error = output_msg + else: + self.error = repr(e) + self.retry_count += 1 + self.status = "Failure" + log_error("TLS Certificate Exception", certificate=self.name) + + self.save() + self.trigger_site_domain_callback() + self.trigger_self_hosted_server_callback() + if self.wildcard: + self.trigger_server_tls_setup_callback() + self._update_secondary_wildcard_domains() + + def _update_secondary_wildcard_domains(self): + proxies_containing_domain = jingrow.get_all( + "Proxy Server Domain", {"domain": self.domain}, pluck="parent" + ) + proxies_using_domain = jingrow.get_all( + "Proxy Server", {"domain": self.domain}, pluck="name" + ) + proxies_containing_domain = set(proxies_containing_domain) - set(proxies_using_domain) + for proxy_name in proxies_containing_domain: + proxy = jingrow.get_pg("Proxy Server", proxy_name) + proxy.setup_wildcard_hosts() + + @jingrow.whitelist() + def trigger_server_tls_setup_callback(self): + server_doctypes = [ + "Proxy Server", + "Server", + "Database Server", + "Log Server", + "Monitor Server", + "Registry Server", + "Analytics Server", + "Trace Server", + ] + for server_pagetype in server_doctypes: + servers = jingrow.get_all( + server_pagetype, + {"status": "Active", "name": ("like", f"%.{self.domain}")} + ) + for srv in servers: + jingrow.enqueue( + "jcloud.jcloud.pagetype.tls_certificate.tls_certificate.update_server_tls_certifcate", + server=jingrow.get_pg(server_pagetype, srv), + certificate=self, + ) + + def trigger_site_domain_callback(self): + domain = jingrow.db.get_value("Site Domain", {"tls_certificate": self.name}, "name") + if domain: + jingrow.get_pg("Site Domain", domain).process_tls_certificate_update() + + def trigger_self_hosted_server_callback(self): + try: + jingrow.get_pg("Self Hosted Server", self.name).process_tls_cert_update() + except Exception: + pass + + def _extract_certificate_details(self): + x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, self.certificate) + self.decoded_certificate = OpenSSL.crypto.dump_certificate( + OpenSSL.crypto.FILETYPE_TEXT, x509 + ).decode() + self.issued_on = datetime.strptime(x509.get_notBefore().decode(), "%Y%m%d%H%M%SZ") + self.expires_on = datetime.strptime(x509.get_notAfter().decode(), "%Y%m%d%H%M%SZ") + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("TLS Certificate") + + +def renew_tls_certificates(): + """ + 定时任务:检查过期时间<25天的证书并自动续期。 + """ + tls_renewal_queue_size = jingrow.db.get_single_value("Jcloud Settings", "tls_renewal_queue_size") + pending = jingrow.get_all( + "TLS Certificate", + fields=["name", "domain", "wildcard", "retry_count"], + filters={ + "status": ("in", ("Active", "Failure")), + "expires_on": ("<", jingrow.utils.add_days(None, 25)), + "retry_count": ("<", 5), + }, + ignore_ifnull=True, + order_by="expires_on ASC, status DESC", + ) + renewals_attempted = 0 + for certificate in pending: + if tls_renewal_queue_size and (renewals_attempted >= tls_renewal_queue_size): + break + site = jingrow.db.get_value("Site Domain", {"tls_certificate": certificate.name}, "site") + try: + should_renew = False + if certificate.wildcard: + should_renew = True + else: + if not site: + continue + if jingrow.db.get_value("Site", site, "status") != "Active": + continue + dns_response = check_dns_cname_a(site, certificate["domain"]) + if dns_response["matched"]: + should_renew = True + else: + jingrow.db.set_value( + "TLS Certificate", + certificate["name"], + { + "status": "Failure", + "error": f"DNS check failed. {dns_response.get('answer')}", + "retry_count": certificate["retry_count"] + 1, + }, + ) + if should_renew: + renewals_attempted += 1 + certificate_pg = jingrow.get_pg("TLS Certificate", certificate["name"]) + certificate_pg._obtain_certificate() + jingrow.db.commit() + except Exception as e: + jingrow.db.rollback() + jingrow.db.set_value( + "TLS Certificate", + certificate["name"], + { + "status": "Failure", + "error": repr(e), + "retry_count": certificate["retry_count"] + 1, + }, + ) + log_error("TLS Renewal Exception", certificate=certificate, site=site) + jingrow.db.commit() + + +def update_server_tls_certifcate(server, certificate): + """ + Ansible 部署:将新证书更新到对应服务器。 + """ + try: + proxysql_admin_password = None + if server.pagetype == "Proxy Server": + proxysql_admin_password = server.get_password("proxysql_admin_password") + ansible = Ansible( + playbook="tls.yml", + user=server.get("ssh_user") or "root", + port=server.get("ssh_port") or 22, + server=server, + variables={ + "certificate_private_key": certificate.private_key, + "certificate_full_chain": certificate.full_chain, + "certificate_intermediate_chain": certificate.intermediate_chain, + "is_proxy_server": bool(proxysql_admin_password), + "proxysql_admin_password": proxysql_admin_password, + }, + ) + ansible.run() + except Exception: + log_error("TLS Setup Exception", server=server.as_dict()) + + +def retrigger_failed_wildcard_tls_callbacks(): + """ + 可用于手动重新为失败的通配符证书部署。 + """ + server_doctypes = [ + "Proxy Server", + "Server", + "Database Server", + "Log Server", + "Monitor Server", + "Registry Server", + "Analytics Server", + "Trace Server", + ] + for server_pagetype in server_doctypes: + servers = jingrow.get_all(server_pagetype, {"status": "Active"}, pluck="name") + for srv in servers: + plays = jingrow.get_all( + "Ansible Play", + {"play": "Setup TLS Certificates", "server": srv}, + pluck="status", + limit=1, + order_by="creation DESC", + ) + if plays and plays[0] != "Success": + server_pg = jingrow.get_pg(server_pagetype, srv) + jingrow.enqueue( + "jcloud.jcloud.pagetype.tls_certificate.tls_certificate.update_server_tls_certifcate", + server=server_pg, + certificate=server_pg.get_certificate(), + ) + + +class LetsEncrypt: + """ + 兼容 Route53 和 DNSPod 的 Certbot 获取逻辑。 + 对 DNSPod 采用"生成临时 ini 文件"方式来传递 API 密钥,而不长期保留 .ini。 + """ + + def __init__(self, settings): + self.settings = settings + self.directory = settings.certbot_directory + self.webroot_directory = settings.webroot_directory + self.eff_registration_email = settings.eff_registration_email + # 开发模式可启用 staging 测试 CA + self.staging = bool(jingrow.conf.developer_mode and settings.use_staging_ca) + + def obtain(self, domain, rsa_key_size=2048, wildcard=False): + self.domain = f"*.{domain}" if wildcard else domain + self.rsa_key_size = rsa_key_size + self.wildcard = wildcard + self._obtain() + return self._extract() + + def _obtain(self): + """根据域名类型选择验证方式""" + if self.wildcard: + # 通配符域名需要DNS验证和Root Domain记录 + self._obtain_wildcard() + else: + # 普通域名使用HTTP验证(不需要DNS API访问) + self._obtain_naked() + + def _obtain_wildcard(self): + """ + 处理通配符: 根据 Root Domain 上的 dns_provider 判断: + - 'AWS Route 53' => 用 AWS_ACCESS_KEY_ID / SECRET + - 'DNSPod' => 生成临时 ini 文件, 走 -a dns-dnspod + """ + # 如果是通配符域名,就去掉"*."前缀 + domain_to_check = self.domain[2:] if self.domain.startswith("*.") else self.domain + # 使用tldextract提取根域名 + extracted = extract(domain_to_check) + root_domain = f"{extracted.domain}.{extracted.suffix}" + domain_pg = jingrow.get_pg("Root Domain", root_domain) + provider = domain_pg.dns_provider + + if provider == "AWS Route 53": + # 原先的 environment 方式: + env = os.environ.copy() + env["AWS_ACCESS_KEY_ID"] = domain_pg.aws_access_key_id or "" + env["AWS_SECRET_ACCESS_KEY"] = domain_pg.get_password("aws_secret_access_key") or "" + plugin_args = "-a dns-route53" + command = self._certbot_command(plugin_args) + self.run(command, environment=env) + + elif provider == "DNSPod": + # 生成临时 ini 文件 + from tempfile import NamedTemporaryFile + import stat + + app_id = domain_pg.dnspod_app_id or "" + app_token = domain_pg.get_password("dnspod_app_token") or "" + + # 1. 创建临时文件 + ini_file = NamedTemporaryFile("w", prefix="dnspod_", suffix=".ini", delete=False) + ini_path = ini_file.name + + # 2. 写入内容 - 使用正确的参数名称 + ini_file.write(f"dns_dnspod_109_secret_id = {app_id}\n") + ini_file.write(f"dns_dnspod_109_secret_key = {app_token}\n") + ini_file.close() + + # 3. chmod 600 + os.chmod(ini_path, stat.S_IRUSR | stat.S_IWUSR) # 0o600 + + try: + # 使用正确的插件名 dns-dnspod-109 + plugin_args = ( + f"-a dns-dnspod-109 " + f"--dns-dnspod-109-credentials {ini_path} " + # 如果需要延迟,可加: --dns-dnspod-109-propagation-seconds 60 + ) + command = self._certbot_command(plugin_args) + self.run(command) + finally: + # 4. 用完后删除临时 ini + if os.path.exists(ini_path): + os.remove(ini_path) + + else: + raise Exception(f"Unsupported DNS Provider: {provider}") + + def _obtain_naked(self): + """ + 对非通配符场景,默认使用 webroot HTTP-01 验证。您也可加分支区分 Route53 / DNSPod DNS 验证。 + """ + if not os.path.exists(self.webroot_directory): + os.mkdir(self.webroot_directory) + plugin_args = f"--webroot --webroot-path {self.webroot_directory}" + command = self._certbot_command(plugin_args) + self.run(command) + + def _certbot_command(self, plugin_args): + """ + 拼接 certbot certonly 命令。plugin_args 里可包含 -a dns-dnspod 等。 + """ + staging = "--staging" if self.staging else "" + force_renewal = "--force-renewal" + cmd = ( + f"certbot certonly {plugin_args} {staging}" + f" --logs-dir {self.directory}/logs --work-dir {self.directory}" + f" --config-dir {self.directory} {force_renewal}" + f" --agree-tos --eff-email --email {self.eff_registration_email} --staple-ocsp" + f" --key-type rsa --rsa-key-size {self.rsa_key_size} --cert-name {self.domain}" + f" --domains {self.domain}" + ) + return cmd + + def run(self, command, environment=None): + """ + 实际执行 certbot 命令。若失败就抛出异常并记录日志。 + """ + try: + subprocess.check_output( + shlex.split(command), + stderr=subprocess.STDOUT, + env=environment + ) + except subprocess.CalledProcessError as e: + output = (e.output or b"").decode() + if "Another instance of Certbot is already running" not in output: + log_error("Certbot Exception", command=command, output=output) + raise e + except Exception as e: + log_error("Certbot Exception", command=command, exception=e) + raise e + + def _extract(self): + """ + 从certbot生成文件中读取证书、链、私钥等内容并返回。 + """ + base_path = os.path.join(self.directory, "live", self.domain) + with open(os.path.join(base_path, "cert.pem")) as f: + certificate = f.read() + with open(os.path.join(base_path, "fullchain.pem")) as f: + full_chain = f.read() + with open(os.path.join(base_path, "chain.pem")) as f: + intermediate_chain = f.read() + with open(os.path.join(base_path, "privkey.pem")) as f: + private_key = f.read() + + return certificate, full_chain, intermediate_chain, private_key diff --git a/jcloud/jcloud/pagetype/trace_server/__init__.py b/jcloud/jcloud/pagetype/trace_server/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/trace_server/test_trace_server.py b/jcloud/jcloud/pagetype/trace_server/test_trace_server.py new file mode 100644 index 0000000..b2237dd --- /dev/null +++ b/jcloud/jcloud/pagetype/trace_server/test_trace_server.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestTraceServer(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/trace_server/trace_server.js b/jcloud/jcloud/pagetype/trace_server/trace_server.js new file mode 100644 index 0000000..71abb3a --- /dev/null +++ b/jcloud/jcloud/pagetype/trace_server/trace_server.js @@ -0,0 +1,65 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Trace Server', { + refresh: function (frm) { + [ + [__('Ping Agent'), 'ping_agent', false, frm.pg.is_server_setup], + [__('Ping Ansible'), 'ping_ansible', true], + [__('Ping Ansible Unprepared'), 'ping_ansible_unprepared', true], + [__('Update Agent'), 'update_agent', true, frm.pg.is_server_setup], + [__('Prepare Server'), 'prepare_server', true, !frm.pg.is_server_setup], + [__('Setup Server'), 'setup_server', true, !frm.pg.is_server_setup], + [__('Upgrade Server'), 'upgrade_server', true, frm.pg.is_server_setup], + [ + __('Reconfigure Monitor Server'), + 'reconfigure_monitor_server', + true, + frm.pg.is_server_setup, + ], + [ + __('Fetch Keys'), + 'fetch_keys', + false, + frm.pg.is_server_setup && + (!frm.pg.jingrow_public_key || !frm.pg.root_public_key), + ], + [ + __('Show Sentry Password'), + 'show_sentry_password', + false, + frm.pg.is_server_setup, + ], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/trace_server/trace_server.json b/jcloud/jcloud/pagetype/trace_server/trace_server.json new file mode 100644 index 0000000..96e4545 --- /dev/null +++ b/jcloud/jcloud/pagetype/trace_server/trace_server.json @@ -0,0 +1,293 @@ +{ + "actions": [], + "creation": "2022-06-28 13:24:27.669633", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "status", + "hostname", + "domain", + "column_break_4", + "provider", + "virtual_machine", + "is_server_setup", + "networking_section", + "ip", + "column_break_10", + "private_ip", + "private_mac_address", + "private_vlan_id", + "agent_section", + "agent_password", + "ssh_section", + "jingrow_user_password", + "jingrow_public_key", + "column_break_19", + "root_public_key", + "monitoring_section", + "monitoring_password", + "sentry_section", + "sentry_admin_email", + "sentry_admin_password", + "column_break_27", + "sentry_mail_server", + "sentry_mail_port", + "sentry_mail_login", + "sentry_mail_password", + "sentry_oauth_client_section", + "sentry_oauth_server_url", + "column_break_33", + "sentry_oauth_client_id", + "sentry_oauth_client_secret" + ], + "fields": [ + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nInstalling\nActive\nBroken\nArchived", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "hostname", + "fieldtype": "Data", + "label": "Hostname", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "label": "Domain", + "options": "Root Domain", + "read_only": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "default": "Generic", + "fieldname": "provider", + "fieldtype": "Select", + "label": "Provider", + "options": "Generic\nScaleway\nAWS EC2\nOCI", + "set_only_once": 1 + }, + { + "depends_on": "eval:pg.provider === \"AWS EC2\"", + "fieldname": "virtual_machine", + "fieldtype": "Link", + "label": "Virtual Machine", + "mandatory_depends_on": "eval:pg.provider === \"AWS EC2\"", + "options": "Virtual Machine" + }, + { + "default": "0", + "fieldname": "is_server_setup", + "fieldtype": "Check", + "label": "Server Setup", + "read_only": 1 + }, + { + "fieldname": "networking_section", + "fieldtype": "Section Break", + "label": "Networking" + }, + { + "fetch_from": "virtual_machine.public_ip_address", + "fieldname": "ip", + "fieldtype": "Data", + "in_list_view": 1, + "label": "IP", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "column_break_10", + "fieldtype": "Column Break" + }, + { + "fetch_from": "virtual_machine.private_ip_address", + "fieldname": "private_ip", + "fieldtype": "Data", + "label": "Private IP", + "reqd": 1, + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_mac_address", + "fieldtype": "Data", + "label": "Private Mac Address", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "depends_on": "eval: pg.provider === \"Scaleway\"", + "fieldname": "private_vlan_id", + "fieldtype": "Data", + "label": "Private VLAN ID", + "mandatory_depends_on": "eval: pg.provider === \"Scaleway\"", + "set_only_once": 1 + }, + { + "fieldname": "agent_section", + "fieldtype": "Section Break", + "label": "Agent" + }, + { + "fieldname": "agent_password", + "fieldtype": "Password", + "label": "Agent Password", + "set_only_once": 1 + }, + { + "fieldname": "ssh_section", + "fieldtype": "Section Break", + "label": "SSH" + }, + { + "fieldname": "jingrow_user_password", + "fieldtype": "Password", + "label": "Jingrow User Password", + "set_only_once": 1 + }, + { + "fieldname": "jingrow_public_key", + "fieldtype": "Code", + "label": "Jingrow Public Key", + "read_only": 1 + }, + { + "fieldname": "column_break_19", + "fieldtype": "Column Break" + }, + { + "fieldname": "root_public_key", + "fieldtype": "Code", + "label": "Root Public Key", + "read_only": 1 + }, + { + "fieldname": "monitoring_section", + "fieldtype": "Section Break", + "label": "Monitoring" + }, + { + "fieldname": "monitoring_password", + "fieldtype": "Password", + "label": "Monitoring Password", + "set_only_once": 1 + }, + { + "collapsible": 1, + "fieldname": "sentry_section", + "fieldtype": "Section Break", + "label": "Sentry" + }, + { + "fieldname": "sentry_admin_email", + "fieldtype": "Data", + "label": "Sentry Admin Email" + }, + { + "fieldname": "sentry_admin_password", + "fieldtype": "Password", + "label": "Sentry Admin Password", + "set_only_once": 1 + }, + { + "fieldname": "column_break_27", + "fieldtype": "Column Break" + }, + { + "fieldname": "sentry_mail_server", + "fieldtype": "Data", + "label": "Sentry Mail Server", + "set_only_once": 1 + }, + { + "default": "587", + "fieldname": "sentry_mail_port", + "fieldtype": "Int", + "label": "Sentry Mail Port", + "set_only_once": 1 + }, + { + "fieldname": "sentry_mail_login", + "fieldtype": "Data", + "label": "Sentry Mail Login", + "set_only_once": 1 + }, + { + "fieldname": "sentry_mail_password", + "fieldtype": "Password", + "label": "Sentry Mail Password", + "set_only_once": 1 + }, + { + "collapsible": 1, + "fieldname": "sentry_oauth_client_section", + "fieldtype": "Section Break", + "label": "Sentry OAuth Client" + }, + { + "fieldname": "sentry_oauth_server_url", + "fieldtype": "Data", + "label": "Sentry OAuth Server URL", + "set_only_once": 1 + }, + { + "fieldname": "sentry_oauth_client_id", + "fieldtype": "Data", + "label": "Sentry OAuth Client ID", + "set_only_once": 1 + }, + { + "fieldname": "sentry_oauth_client_secret", + "fieldtype": "Data", + "label": "Sentry OAuth Client Secret", + "set_only_once": 1 + }, + { + "fieldname": "column_break_33", + "fieldtype": "Column Break" + } + ], + "links": [ + { + "link_pagetype": "Ansible Play", + "link_fieldname": "server" + } + ], + "modified": "2023-12-13 15:09:34.499141", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Trace Server", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/trace_server/trace_server.py b/jcloud/jcloud/pagetype/trace_server/trace_server.py new file mode 100644 index 0000000..956fd0d --- /dev/null +++ b/jcloud/jcloud/pagetype/trace_server/trace_server.py @@ -0,0 +1,152 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow + +from jcloud.jcloud.pagetype.server.server import BaseServer +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class TraceServer(BaseServer): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + agent_password: DF.Password | None + domain: DF.Link | None + jingrow_public_key: DF.Code | None + jingrow_user_password: DF.Password | None + hostname: DF.Data + ip: DF.Data + is_server_setup: DF.Check + monitoring_password: DF.Password | None + private_ip: DF.Data + private_mac_address: DF.Data | None + private_vlan_id: DF.Data | None + provider: DF.Literal["Generic", "Scaleway", "AWS EC2", "OCI"] + root_public_key: DF.Code | None + sentry_admin_email: DF.Data | None + sentry_admin_password: DF.Password | None + sentry_mail_login: DF.Data | None + sentry_mail_password: DF.Password | None + sentry_mail_port: DF.Int + sentry_mail_server: DF.Data | None + sentry_oauth_client_id: DF.Data | None + sentry_oauth_client_secret: DF.Data | None + sentry_oauth_server_url: DF.Data | None + status: DF.Literal["Pending", "Installing", "Active", "Broken", "Archived"] + virtual_machine: DF.Link | None + # end: auto-generated types + + def validate(self): + self.validate_agent_password() + self.validate_monitoring_password() + self.validate_sentry_admin_password() + + def validate_monitoring_password(self): + if not self.monitoring_password: + self.monitoring_password = jingrow.generate_hash() + + def validate_sentry_admin_password(self): + if not self.sentry_admin_password: + self.sentry_admin_password = jingrow.generate_hash() + + def _setup_server(self): + agent_repository_url = self.get_agent_repository_url() + certificate_name = jingrow.db.get_value( + "TLS Certificate", {"wildcard": True, "domain": self.domain}, "name" + ) + certificate = jingrow.get_pg("TLS Certificate", certificate_name) + + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if log_server: + kibana_password = jingrow.get_pg("Log Server", log_server).get_password( + "kibana_password" + ) + else: + kibana_password = None + + try: + ansible = Ansible( + playbook="trace.yml", + server=self, + variables={ + "server": self.name, + "workers": 1, + "domain": self.domain, + "log_server": log_server, + "agent_password": self.get_password("agent_password"), + "agent_repository_url": agent_repository_url, + "kibana_password": kibana_password, + "sentry_admin_email": self.sentry_admin_email, + "sentry_admin_password": self.get_password("sentry_admin_password"), + "sentry_mail_server": self.sentry_mail_server, + "sentry_mail_port": self.sentry_mail_port, + "sentry_mail_login": self.sentry_mail_login, + "sentry_mail_password": self.get_password("sentry_mail_password"), + "sentry_oauth_server_url": self.sentry_oauth_server_url, + "sentry_oauth_client_id": self.sentry_oauth_client_id, + "sentry_oauth_client_secret": self.get_password("sentry_oauth_client_secret"), + "monitoring_password": self.get_password("monitoring_password"), + "private_ip": self.private_ip, + "certificate_private_key": certificate.private_key, + "certificate_full_chain": certificate.full_chain, + "certificate_intermediate_chain": certificate.intermediate_chain, + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + self.is_server_setup = True + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Trace Server Setup Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def upgrade_server(self): + self.status = "Installing" + self.save() + jingrow.enqueue_pg( + self.pagetype, self.name, "_upgrade_server", queue="long", timeout=2400 + ) + + def _upgrade_server(self): + try: + ansible = Ansible( + playbook="trace_upgrade.yml", + server=self, + variables={ + "server": self.name, + "sentry_admin_email": self.sentry_admin_email, + "sentry_mail_server": self.sentry_mail_server, + "sentry_mail_port": self.sentry_mail_port, + "sentry_mail_login": self.sentry_mail_login, + "sentry_mail_password": self.get_password("sentry_mail_password"), + "sentry_oauth_server_url": self.sentry_oauth_server_url, + "sentry_oauth_client_id": self.sentry_oauth_client_id, + "sentry_oauth_client_secret": self.get_password("sentry_oauth_client_secret"), + }, + ) + play = ansible.run() + self.reload() + if play.status == "Success": + self.status = "Active" + else: + self.status = "Broken" + except Exception: + self.status = "Broken" + log_error("Trace Server Upgrade Exception", server=self.as_dict()) + self.save() + + @jingrow.whitelist() + def show_sentry_password(self): + return self.get_password("sentry_admin_password") diff --git a/jcloud/jcloud/pagetype/usage_record/__init__.py b/jcloud/jcloud/pagetype/usage_record/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/usage_record/test_usage_record.py b/jcloud/jcloud/pagetype/usage_record/test_usage_record.py new file mode 100644 index 0000000..4e4251a --- /dev/null +++ b/jcloud/jcloud/pagetype/usage_record/test_usage_record.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestUsageRecord(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/usage_record/usage_record.js b/jcloud/jcloud/pagetype/usage_record/usage_record.js new file mode 100644 index 0000000..326a83c --- /dev/null +++ b/jcloud/jcloud/pagetype/usage_record/usage_record.js @@ -0,0 +1,7 @@ +// Copyright (c) 2020, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Usage Record', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/usage_record/usage_record.json b/jcloud/jcloud/pagetype/usage_record/usage_record.json new file mode 100644 index 0000000..8f5e1d9 --- /dev/null +++ b/jcloud/jcloud/pagetype/usage_record/usage_record.json @@ -0,0 +1,179 @@ +{ + "actions": [], + "autoname": "UR-.YYYY.-.######", + "creation": "2020-10-19 17:36:31.235411", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "document_type", + "document_name", + "column_break_4", + "date", + "time", + "section_break_7", + "plan_type", + "plan", + "currency", + "amount", + "subscription", + "interval", + "invoice", + "column_break_13", + "payout", + "remark", + "amended_from", + "site" + ], + "fields": [ + { + "fieldname": "document_type", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Document Type", + "options": "PageType" + }, + { + "fieldname": "document_name", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Document Name", + "options": "document_type" + }, + { + "fetch_from": "team.currency", + "fieldname": "currency", + "fieldtype": "Link", + "in_standard_filter": 1, + "label": "Currency", + "options": "Currency" + }, + { + "fieldname": "amount", + "fieldtype": "Currency", + "label": "Amount", + "options": "currency" + }, + { + "fieldname": "remark", + "fieldtype": "Small Text", + "label": "Remark" + }, + { + "fieldname": "team", + "fieldtype": "Link", + "in_standard_filter": 1, + "label": "Team", + "options": "Team" + }, + { + "fieldname": "invoice", + "fieldtype": "Link", + "label": "Invoice", + "options": "Invoice" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_7", + "fieldtype": "Section Break" + }, + { + "fieldname": "date", + "fieldtype": "Date", + "label": "Date", + "search_index": 1 + }, + { + "fieldname": "time", + "fieldtype": "Time", + "label": "Time" + }, + { + "fieldname": "subscription", + "fieldtype": "Link", + "label": "Subscription", + "options": "Subscription" + }, + { + "fieldname": "column_break_13", + "fieldtype": "Column Break" + }, + { + "fieldname": "interval", + "fieldtype": "Data", + "label": "Interval" + }, + { + "fieldname": "amended_from", + "fieldtype": "Link", + "label": "Amended From", + "no_copy": 1, + "options": "Usage Record", + "print_hide": 1, + "read_only": 1 + }, + { + "fieldname": "plan", + "fieldtype": "Dynamic Link", + "label": "Plan", + "options": "plan_type" + }, + { + "fieldname": "payout", + "fieldtype": "Data", + "label": "Saas Developer Payout" + }, + { + "fieldname": "site", + "fieldtype": "Link", + "label": "Site", + "options": "Site" + }, + { + "fieldname": "plan_type", + "fieldtype": "Link", + "label": "Plan Type", + "options": "PageType", + "search_index": 1 + } + ], + "index_web_pages_for_search": 1, + "is_submittable": 1, + "links": [], + "modified": "2024-08-23 16:46:05.290651", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Usage Record", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "amend": 1, + "cancel": 1, + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "submit": 1, + "write": 1 + } + ], + "quick_entry": 1, + "search_fields": "team, document_name", + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "team", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/usage_record/usage_record.py b/jcloud/jcloud/pagetype/usage_record/usage_record.py new file mode 100644 index 0000000..0da8c08 --- /dev/null +++ b/jcloud/jcloud/pagetype/usage_record/usage_record.py @@ -0,0 +1,127 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document + + +class UsageRecord(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + amended_from: DF.Link | None + amount: DF.Currency + currency: DF.Link | None + date: DF.Date | None + document_name: DF.DynamicLink | None + document_type: DF.Link | None + interval: DF.Data | None + invoice: DF.Link | None + payout: DF.Data | None + plan: DF.DynamicLink | None + plan_type: DF.Link | None + remark: DF.SmallText | None + site: DF.Link | None + subscription: DF.Link | None + team: DF.Link | None + time: DF.Time | None + # end: auto-generated types + + def validate(self): + if not self.date: + self.date = jingrow.utils.today() + + if not self.time: + self.time = jingrow.utils.nowtime() + + def before_submit(self): + self.validate_duplicate_usage_record() + + def on_submit(self): + self.update_usage_in_invoice() + + def on_cancel(self): + self.remove_usage_from_invoice() + + def update_usage_in_invoice(self): + team = jingrow.get_pg("Team", self.team) + + if team.parent_team: + team = jingrow.get_pg("Team", team.parent_team) + + if team.billing_team: + team = jingrow.get_pg("Team", team.billing_team) + + if team.free_account: + return + # Get a read lock on this invoice + # We're going to update the invoice and we don't want any other process to update it + invoice = team.get_upcoming_invoice(for_update=True) + if not invoice: + invoice = team.create_upcoming_invoice() + + invoice.add_usage_record(self) + + def remove_usage_from_invoice(self): + team = jingrow.get_pg("Team", self.team) + invoice = team.get_upcoming_invoice() + if invoice: + invoice.remove_usage_record(self) + + def validate_duplicate_usage_record(self): + usage_record = jingrow.get_all( + "Usage Record", + { + "name": ("!=", self.name), + "team": self.team, + "document_type": self.document_type, + "document_name": self.document_name, + "interval": self.interval, + "date": self.date, + "plan": self.plan, + "docstatus": 1, + "subscription": self.subscription, + }, + pluck="name", + ) + + if usage_record: + jingrow.throw( + f"Usage Record {usage_record[0]} already exists for this document", + jingrow.DuplicateEntryError, + ) + + +def link_unlinked_usage_records(): + td = jingrow.utils.today() + fd = jingrow.utils.get_first_day(td) + ld = jingrow.utils.get_last_day(td) + free_teams = jingrow.db.get_all("Team", {"free_account": 1}, pluck="name") + + usage_records = jingrow.get_all( + "Usage Record", + filters={ + "invoice": ("is", "not set"), + "date": ("between", (fd, ld)), + "team": ("not in", free_teams), + "docstatus": 1, + }, + pluck="name", + ignore_ifnull=True, + ) + + for usage_record in usage_records: + try: + jingrow.get_pg("Usage Record", usage_record).update_usage_in_invoice() + except Exception: + jingrow.log_error("Failed to Link UR to Invoice") + + +def on_pagetype_update(): + jingrow.db.add_index("Usage Record", ["subscription", "date"]) diff --git a/jcloud/jcloud/pagetype/user_2fa/__init__.py b/jcloud/jcloud/pagetype/user_2fa/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/user_2fa/test_user_2fa.py b/jcloud/jcloud/pagetype/user_2fa/test_user_2fa.py new file mode 100644 index 0000000..2345b40 --- /dev/null +++ b/jcloud/jcloud/pagetype/user_2fa/test_user_2fa.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestUser2FA(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/user_2fa/user_2fa.js b/jcloud/jcloud/pagetype/user_2fa/user_2fa.js new file mode 100644 index 0000000..1b0ac0e --- /dev/null +++ b/jcloud/jcloud/pagetype/user_2fa/user_2fa.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("User 2FA", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/user_2fa/user_2fa.json b/jcloud/jcloud/pagetype/user_2fa/user_2fa.json new file mode 100644 index 0000000..26fc7f5 --- /dev/null +++ b/jcloud/jcloud/pagetype/user_2fa/user_2fa.json @@ -0,0 +1,82 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "field:user", + "creation": "2024-08-21 16:10:57.634579", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "user", + "totp_secret", + "enabled" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Link", + "label": "User", + "options": "User", + "unique": 1 + }, + { + "fieldname": "totp_secret", + "fieldtype": "Password", + "label": "TOTP Secret" + }, + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-08-21 16:36:26.321395", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "User 2FA", + "naming_rule": "By fieldname", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/user_2fa/user_2fa.py b/jcloud/jcloud/pagetype/user_2fa/user_2fa.py new file mode 100644 index 0000000..d20a807 --- /dev/null +++ b/jcloud/jcloud/pagetype/user_2fa/user_2fa.py @@ -0,0 +1,29 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class User2FA(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + enabled: DF.Check + totp_secret: DF.Password | None + user: DF.Link | None + # end: auto-generated types + + def validate(self): + if self.enabled and not self.totp_secret: + self.generate_secret() + + def generate_secret(self): + import pyotp + + self.totp_secret = pyotp.random_base32() diff --git a/jcloud/jcloud/pagetype/user_ssh_certificate/__init__.py b/jcloud/jcloud/pagetype/user_ssh_certificate/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/user_ssh_certificate/test_user_ssh_certificate.py b/jcloud/jcloud/pagetype/user_ssh_certificate/test_user_ssh_certificate.py new file mode 100644 index 0000000..77dc529 --- /dev/null +++ b/jcloud/jcloud/pagetype/user_ssh_certificate/test_user_ssh_certificate.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# See license.txt + + +# import jingrow +import unittest + + +class TestUserSSHCertificate(unittest.TestCase): + pass diff --git a/jcloud/jcloud/pagetype/user_ssh_certificate/user_ssh_certificate.js b/jcloud/jcloud/pagetype/user_ssh_certificate/user_ssh_certificate.js new file mode 100644 index 0000000..bc2e965 --- /dev/null +++ b/jcloud/jcloud/pagetype/user_ssh_certificate/user_ssh_certificate.js @@ -0,0 +1,37 @@ +// Copyright (c) 2019, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('User SSH Certificate', { + refresh: function (frm) { + frm.set_query('user_ssh_key', () => { + return { + filters: { user: frm.pg.user }, + }; + }); + if (frm.pg.docstatus === 1) { + let key_type = frm.pg.ssh_public_key.split(' ')[0].split('-')[1]; + frm.add_custom_button('Copy Certificate Details', function () { + let text = `echo '${frm.pg.ssh_certificate.trim()}' > ~/.ssh/id_${key_type}-cert.pub`; + copy_to_clipboard(text); + }); + if (!frm.pg.all_server_access) { + frm.add_custom_button('Copy SSH Command', function () { + copy_to_clipboard(frm.pg.ssh_command); + }); + } + frm.set_df_property( + 'ssh_certificate', + 'description', + `Save this certificate on your system under ~/.ssh/id_${key_type}-cert.pub`, + ); + } + }, +}); + +function copy_to_clipboard(text) { + jingrow.utils.copy_to_clipboard(text); + jingrow.show_alert({ + indicator: 'green', + message: __('Paste the command in your system terminal.'), + }); +} diff --git a/jcloud/jcloud/pagetype/user_ssh_certificate/user_ssh_certificate.json b/jcloud/jcloud/pagetype/user_ssh_certificate/user_ssh_certificate.json new file mode 100644 index 0000000..0b7f487 --- /dev/null +++ b/jcloud/jcloud/pagetype/user_ssh_certificate/user_ssh_certificate.json @@ -0,0 +1,168 @@ +{ + "actions": [], + "creation": "2019-07-10 16:04:46.784120", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "all_servers", + "server_type", + "access_server", + "column_break_1", + "reason", + "section_break_2", + "user", + "user_ssh_key", + "validity", + "column_break_4", + "valid_until", + "ssh_fingerprint", + "section_break_8", + "ssh_public_key", + "amended_from", + "certificate_details", + "ssh_certificate", + "ssh_command" + ], + "fields": [ + { + "fetch_from": "user_ssh_key.ssh_public_key", + "fieldname": "ssh_public_key", + "fieldtype": "Code", + "label": "SSH Public Key", + "read_only": 1 + }, + { + "fieldname": "validity", + "fieldtype": "Select", + "label": "Validity", + "options": "3h\n6h\n12h\n1d" + }, + { + "depends_on": "eval:!pg.all_servers", + "fieldname": "server_type", + "fieldtype": "Select", + "label": "Server Type", + "options": "Server\nProxy Server\nDatabase Server" + }, + { + "depends_on": "eval:!pg.all_servers", + "fieldname": "access_server", + "fieldtype": "Dynamic Link", + "label": "Access Server", + "options": "server_type" + }, + { + "fieldname": "amended_from", + "fieldtype": "Link", + "label": "Amended From", + "no_copy": 1, + "options": "User SSH Certificate", + "print_hide": 1, + "read_only": 1 + }, + { + "depends_on": "eval:!(pg.__islocal)", + "fieldname": "ssh_fingerprint", + "fieldtype": "Data", + "label": "SSH Fingerprint", + "read_only": 1 + }, + { + "fieldname": "certificate_details", + "fieldtype": "Code", + "label": "Certificate Details", + "read_only": 1 + }, + { + "fieldname": "section_break_2", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break" + }, + { + "fieldname": "valid_until", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "Valid Until", + "read_only": 1 + }, + { + "fieldname": "ssh_certificate", + "fieldtype": "Code", + "label": "SSH Certificate", + "read_only": 1 + }, + { + "fieldname": "ssh_command", + "fieldtype": "Code", + "label": "SSH Command", + "read_only": 1 + }, + { + "fieldname": "reason", + "fieldtype": "Small Text", + "label": "Reason", + "reqd": 1 + }, + { + "fieldname": "column_break_1", + "fieldtype": "Column Break" + }, + { + "fieldname": "user", + "fieldtype": "Link", + "label": "User", + "options": "User", + "reqd": 1 + }, + { + "fieldname": "user_ssh_key", + "fieldtype": "Link", + "label": "User SSH Key", + "options": "User SSH Key", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "all_servers", + "fieldtype": "Check", + "label": "All Servers" + } + ], + "index_web_pages_for_search": 1, + "is_submittable": 1, + "links": [], + "modified": "2022-05-24 21:48:18.886106", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "User SSH Certificate", + "owner": "Administrator", + "permissions": [ + { + "cancel": 1, + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "submit": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "user", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/user_ssh_certificate/user_ssh_certificate.py b/jcloud/jcloud/pagetype/user_ssh_certificate/user_ssh_certificate.py new file mode 100644 index 0000000..3616d95 --- /dev/null +++ b/jcloud/jcloud/pagetype/user_ssh_certificate/user_ssh_certificate.py @@ -0,0 +1,144 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import base64 +import binascii +import hashlib +import re +import shlex +import subprocess + +import jingrow +from jingrow import safe_decode +from jingrow.model.document import Document + +from jcloud.utils import log_error + + +class UserSSHCertificate(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + access_server: DF.DynamicLink | None + all_servers: DF.Check + amended_from: DF.Link | None + certificate_details: DF.Code | None + reason: DF.SmallText + server_type: DF.Literal["Server", "Proxy Server", "Database Server"] + ssh_certificate: DF.Code | None + ssh_command: DF.Code | None + ssh_fingerprint: DF.Data | None + ssh_public_key: DF.Code | None + user: DF.Link + user_ssh_key: DF.Link + valid_until: DF.Datetime | None + validity: DF.Literal["3h", "6h", "12h", "1d"] + # end: auto-generated types + + def validate(self): + if not self.ssh_public_key: + jingrow.throw("Please make sure that a valid public key has been added in team pg.") + + # check if the ssh key is valid + try: + base64.b64decode(self.ssh_public_key.strip().split()[1]) + except binascii.Error: + jingrow.throw("Please ensure that the attached text is a valid public key") + + def before_insert(self): + if jingrow.get_all( + "User SSH Certificate", + { + "user": self.user, + "valid_until": [">", jingrow.utils.now()], + "access_server": self.access_server, + "all_servers": self.all_servers, + "docstatus": 1, + }, + ): + jingrow.throw("A valid certificate already exists.") + + def before_save(self): + # decode the ssh key and generate a fingerprint + ssh_key = self.ssh_public_key.strip().split()[1] + ssh_key_b64 = base64.b64decode(ssh_key) + sha256_sum = hashlib.sha256() + sha256_sum.update(ssh_key_b64) + self.ssh_fingerprint = safe_decode(base64.b64encode(sha256_sum.digest())) + + def _set_key_type(self): + # extract key_type (eg: rsa, ecdsa, ed25519.) for naming convention + self.key_type = self.ssh_public_key.strip().split()[0].split("-")[1] + if not self.key_type: + jingrow.throw("Could not guess the key type. Please check your public key.") + + def before_submit(self): + self._set_key_type() + tmp_pub_file_prefix = f"/tmp/id_{self.key_type}-{self.name}" + tmp_pub_file = tmp_pub_file_prefix + ".pub" + # write the public key to a /tmp file + with open(tmp_pub_file, "w") as public_key: + public_key.write(self.ssh_public_key) + public_key.flush() + + if self.all_servers: + principal = "all-servers" + else: + principal = self.access_server + + # try generating a certificate for the /tmp key. + try: + command = ( + f"ssh-keygen -s ca -I {self.name} -n {principal} -V +{self.validity} {tmp_pub_file}" + ) + subprocess.check_output(shlex.split(command), cwd="/etc/ssh") + except subprocess.CalledProcessError as e: + log_error("SSH Certificate Generation Error", exception=e) + jingrow.throw( + "Failed to generate a certificate for the specified key. Please try again." + ) + process = subprocess.Popen( + shlex.split(f"ssh-keygen -Lf {tmp_pub_file_prefix}-cert.pub"), + stdout=subprocess.PIPE, + ) + self.certificate_details = safe_decode(process.communicate()[0]) + self.set_output_fields() + + def before_cancel(self): + self.delete_key("ssh_certificate") + + def set_output_fields(self): + # extract the time for until when the key is active + regex = re.compile("Valid:.*\n") + self.valid_until = regex.findall(self.certificate_details)[0].strip().split()[-1] + self.ssh_certificate = read_certificate(self.key_type, self.name) + self.generate_ssh_command() + + def generate_ssh_command(self): + server = self.access_server + if not server: + server = "" + + ssh_port = 22 + ssh_user = "jingrow" + self.ssh_command = f"ssh {server} -p {ssh_port} -l {ssh_user}" + + +@jingrow.whitelist() +def read_certificate(key_type, docname): + with open("/tmp/id_{0}-{1}-cert.pub".format(key_type, docname)) as certificate: + try: + return certificate.read() + except Exception: + pass + + +def set_user_ssh_key(user, ssh_public_key): + jingrow.db.set_value("User", user, "ssh_public_key", ssh_public_key) diff --git a/jcloud/jcloud/pagetype/user_ssh_key/__init__.py b/jcloud/jcloud/pagetype/user_ssh_key/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/user_ssh_key/patches/set_existing_keys_as_default.py b/jcloud/jcloud/pagetype/user_ssh_key/patches/set_existing_keys_as_default.py new file mode 100644 index 0000000..0ec8b26 --- /dev/null +++ b/jcloud/jcloud/pagetype/user_ssh_key/patches/set_existing_keys_as_default.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + jingrow.reload_pagetype("User SSH Key") + jingrow.db.set_value("User SSH Key", {"is_default": False}, "is_default", True) diff --git a/jcloud/jcloud/pagetype/user_ssh_key/test_user_ssh_key.py b/jcloud/jcloud/pagetype/user_ssh_key/test_user_ssh_key.py new file mode 100644 index 0000000..5cab554 --- /dev/null +++ b/jcloud/jcloud/pagetype/user_ssh_key/test_user_ssh_key.py @@ -0,0 +1,102 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +import cryptography +import jingrow +from cryptography.hazmat.primitives.asymmetric import rsa +from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.team.test_team import create_test_jcloud_admin_team + + +def create_rsa_key() -> str: + key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + backend=cryptography.hazmat.backends.default_backend(), + ) + str_key = ( + key.public_key().public_bytes( + encoding=cryptography.hazmat.primitives.serialization.Encoding.OpenSSH, + format=cryptography.hazmat.primitives.serialization.PublicFormat.OpenSSH, + ), + ) + return str_key[0].decode("utf-8") + + +def create_ed25519_key() -> str: + key = Ed25519PrivateKey.generate() + str_key = ( + key.public_key().public_bytes( + encoding=cryptography.hazmat.primitives.serialization.Encoding.OpenSSH, + format=cryptography.hazmat.primitives.serialization.PublicFormat.OpenSSH, + ), + ) + return str_key[0].decode("utf-8") + + +def create_test_user_ssh_key(user: str, str_key: str = None): + """Create a test SSH key for the given user.""" + if not str_key: + str_key = create_rsa_key() + ssh_key = jingrow.get_pg( + { + "pagetype": "User SSH Key", + "user": user, + "ssh_public_key": str_key, + } + ).insert(ignore_if_duplicate=True) + ssh_key.reload() + return ssh_key + + +class TestUserSSHKey(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_create_valid_ssh_key_works_with_rsa_key(self): + team = create_test_jcloud_admin_team() + user = jingrow.get_pg("User", team.user) + try: + create_test_user_ssh_key(user.name) + except Exception: + self.fail("Adding a valid RSA SSH key failed") + + def test_create_valid_ssh_key_works_with_ed25519(self): + """Test that creating a valid SSH key works.""" + team = create_test_jcloud_admin_team() + user = jingrow.get_pg("User", team.user) + try: + create_test_user_ssh_key(user.name, create_ed25519_key()) + except Exception: + self.fail("Adding a valid Ed25519 SSH key failed") + + def test_adding_certificate_as_key_fails(self): + """Test that creating an invalid SSH key fails.""" + team = create_test_jcloud_admin_team() + user = jingrow.get_pg("User", team.user) + with self.assertRaisesRegex(jingrow.ValidationError, "Key type has to be one of.*"): + create_test_user_ssh_key(user.name, "ssh-ed25519-cert-v01@openssh.com FAKE_KEY") + + def test_adding_single_word_fails(self): + team = create_test_jcloud_admin_team() + user = jingrow.get_pg("User", team.user) + with self.assertRaisesRegex( + jingrow.ValidationError, "You must supply a key in OpenSSH public key format" + ): + create_test_user_ssh_key(user.name, "ubuntu@jingrow.cloud") + + def test_adding_partial_of_valid_key_with_valid_number_of_data_characters_fails( + self, + ): + team = create_test_jcloud_admin_team() + user = jingrow.get_pg("User", team.user) + with self.assertRaisesRegex( + jingrow.ValidationError, + "copy/pasting the key using one of the commands in documentation", + ): + create_test_user_ssh_key( + user.name, + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDB3zVjTzHQSEHQG7OD3bYi7V1xk+PCwko0W3+d1fSUvSDCxSMKtR31+CfMKmjnvoHubOHYI9wvLpx6KdZUl2uO", + ) diff --git a/jcloud/jcloud/pagetype/user_ssh_key/user_ssh_key.js b/jcloud/jcloud/pagetype/user_ssh_key/user_ssh_key.js new file mode 100644 index 0000000..7cb5237 --- /dev/null +++ b/jcloud/jcloud/pagetype/user_ssh_key/user_ssh_key.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('User SSH Key', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/jcloud/pagetype/user_ssh_key/user_ssh_key.json b/jcloud/jcloud/pagetype/user_ssh_key/user_ssh_key.json new file mode 100644 index 0000000..e52eb6a --- /dev/null +++ b/jcloud/jcloud/pagetype/user_ssh_key/user_ssh_key.json @@ -0,0 +1,103 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-01-28 20:07:41.467888", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "user", + "column_break_2", + "is_default", + "is_removed", + "section_break_4", + "ssh_public_key", + "ssh_fingerprint" + ], + "fields": [ + { + "fieldname": "user", + "fieldtype": "Link", + "in_list_view": 1, + "label": "User", + "options": "User", + "reqd": 1 + }, + { + "fieldname": "ssh_public_key", + "fieldtype": "Code", + "label": "SSH Public Key", + "reqd": 1 + }, + { + "fieldname": "ssh_fingerprint", + "fieldtype": "Data", + "in_list_view": 1, + "label": "SSH Fingerprint", + "read_only": 1 + }, + { + "default": "1", + "fieldname": "is_default", + "fieldtype": "Check", + "label": "Is Default" + }, + { + "fieldname": "column_break_2", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_4", + "fieldtype": "Section Break" + }, + { + "default": "0", + "fieldname": "is_removed", + "fieldtype": "Check", + "label": "Is Removed" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-01-30 17:39:14.574515", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "User SSH Key", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "delete": 1, + "if_owner": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "delete": 1, + "if_owner": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "user", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/user_ssh_key/user_ssh_key.py b/jcloud/jcloud/pagetype/user_ssh_key/user_ssh_key.py new file mode 100644 index 0000000..f0748cc --- /dev/null +++ b/jcloud/jcloud/pagetype/user_ssh_key/user_ssh_key.py @@ -0,0 +1,122 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import base64 +import shlex +import struct +import subprocess + +import jingrow +from jingrow.model.document import Document + +from jcloud.api.client import dashboard_whitelist + + +class SSHKeyValueError(ValueError): + pass + + +class SSHFingerprintError(ValueError): + pass + + +class UserSSHKey(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + is_default: DF.Check + is_removed: DF.Check + ssh_fingerprint: DF.Data | None + ssh_public_key: DF.Code + user: DF.Link + # end: auto-generated types + + dashboard_fields = ["ssh_fingerprint", "is_default", "user", "is_removed"] + + valid_key_types = [ + "ssh-rsa", + "ssh-ed25519", + "ecdsa-sha2-nistp256", + "ecdsa-sha2-nistp384", + "ecdsa-sha2-nistp521", + "sk-ecdsa-sha2-nistp256@openssh.com", + "sk-ssh-ed25519@openssh.com", + ] + + def check_embedded_key_type(self, key_type: str, key_bytes: bytes): + type_len = struct.unpack(">I", key_bytes[:4])[0] # >I is big endian unsigned int + offset = 4 + type_len + embedded_type = key_bytes[4:offset] + if embedded_type.decode("utf-8") != key_type: + raise SSHKeyValueError(f"Key type {key_type} does not match key") + + def validate(self): + msg = "You must supply a key in OpenSSH public key format. Please try copy/pasting the key using one of the commands in documentation." + try: + key_type, key, *comment = self.ssh_public_key.strip().split() + if key_type not in self.valid_key_types: + raise SSHKeyValueError( + f"Key type has to be one of {', '.join(self.valid_key_types)}" + ) + key_bytes = base64.b64decode(key) + self.check_embedded_key_type(key_type, key_bytes) + self.generate_ssh_fingerprint(self.ssh_public_key.encode()) + except SSHKeyValueError as e: + jingrow.throw( + f"{str(e)}\n{msg}", + ) + except Exception: + jingrow.throw(msg) + + def after_insert(self): + if self.is_default: + self.make_other_keys_non_default() + + def on_update(self): + if self.has_value_changed("is_default") and self.is_default: + self.make_other_keys_non_default() + + @dashboard_whitelist() + def delete(self): + if self.is_default: + other_key = jingrow.get_all( + "User SSH Key", + filters={"user": self.user, "name": ("!=", self.name)}, + fields=["name"], + limit=1, + ) + if other_key: + jingrow.db.set_value("User SSH Key", other_key[0].name, "is_default", True) + + if jingrow.db.exists("SSH Certificate", {"user_ssh_key": self.name}): + self.is_removed = 1 + self.save() + + else: + super().delete() + + def make_other_keys_non_default(self): + jingrow.db.set_value( + "User SSH Key", + {"user": self.user, "is_default": True, "name": ("!=", self.name)}, + "is_default", + False, + ) + + def generate_ssh_fingerprint(self, key_bytes: bytes): + try: + self.ssh_fingerprint = ( + subprocess.check_output( + shlex.split("ssh-keygen -lf -"), stderr=subprocess.STDOUT, input=key_bytes + ) + .decode() + .split()[1] + .split(":")[1] + ) + except subprocess.CalledProcessError as e: + raise SSHKeyValueError(f"Error generating fingerprint: {e.output.decode()}") diff --git a/jcloud/jcloud/pagetype/version_upgrade/__init__.py b/jcloud/jcloud/pagetype/version_upgrade/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/version_upgrade/test_version_upgrade.py b/jcloud/jcloud/pagetype/version_upgrade/test_version_upgrade.py new file mode 100644 index 0000000..0269c26 --- /dev/null +++ b/jcloud/jcloud/pagetype/version_upgrade/test_version_upgrade.py @@ -0,0 +1,79 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +from unittest.mock import Mock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.release_group.test_release_group import ( + create_test_release_group, +) +from jcloud.jcloud.pagetype.server.test_server import create_test_server +from jcloud.jcloud.pagetype.site.test_site import create_test_bench, create_test_site +from jcloud.jcloud.pagetype.site_update.test_site_update import create_test_site_update +from jcloud.jcloud.pagetype.version_upgrade.version_upgrade import VersionUpgrade + + +def create_test_version_upgrade(site: str, destination_group: str) -> VersionUpgrade: + return jingrow.get_pg( + dict(pagetype="Version Upgrade", site=site, destination_group=destination_group) + ).insert(ignore_if_duplicate=True) + + +@patch.object(AgentJob, "enqueue_http_request", Mock()) +class TestVersionUpgrade(JingrowTestCase): + def tearDown(self) -> None: + jingrow.db.rollback() + + def test_version_upgrade_creation_throws_when_destination_doesnt_have_all_apps_in_source( + self, + ): + server = create_test_server() + app1 = create_test_app() # jingrow + app2 = create_test_app("app2", "App 2") + app3 = create_test_app("app3", "App 3") + + group1 = create_test_release_group([app1, app2, app3]) + group2 = create_test_release_group([app1]) + + source_bench = create_test_bench(group=group1, server=server.name) + create_test_bench(group=group2, server=server.name) + + site = create_test_site(bench=source_bench.name) + site.install_app(app2.name) + + group2.add_server(server.name) + + self.assertRaisesRegex( + jingrow.ValidationError, + f".*apps installed on {site.name}: app., app.$", + create_test_version_upgrade, + site.name, + group2.name, + ) + + def test_version_upgrade_creates_site_update_even_when_past_updates_failed(self): + server = create_test_server() + app1 = create_test_app() # jingrow + + group1 = create_test_release_group([app1]) + group2 = create_test_release_group([app1]) + + source_bench = create_test_bench(group=group1, server=server.name) + create_test_bench(group=group2, server=server.name) + + site = create_test_site(bench=source_bench.name) + + group2.add_server(server.name) + + create_test_site_update( + site.name, group2.name, "Recovered" + ) # cause of failure not resolved + site_updates_before = jingrow.db.count("Site Update", {"site": site.name}) + version_upgrade = create_test_version_upgrade(site.name, group2.name) + version_upgrade.start() # simulate scheduled one. User will be admin + site_updates_after = jingrow.db.count("Site Update", {"site": site.name}) + self.assertEqual(site_updates_before + 1, site_updates_after) diff --git a/jcloud/jcloud/pagetype/version_upgrade/version_upgrade.js b/jcloud/jcloud/pagetype/version_upgrade/version_upgrade.js new file mode 100644 index 0000000..3f09f23 --- /dev/null +++ b/jcloud/jcloud/pagetype/version_upgrade/version_upgrade.js @@ -0,0 +1,19 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Version Upgrade', { + refresh: function (frm) { + if (['Scheduled', 'Failure'].includes(frm.pg.status)) { + frm.add_custom_button(__('Start'), () => { + jingrow.confirm('Are you sure you want to try an update?', () => + frm.call('start'), + ); + }); + } + if (frm.pg.status == 'Success') { + frm.add_custom_button(__('Show on FC'), () => { + window.open(`https://jingrow.com/dashboard/sites/${frm.pg.site}`); + }); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/version_upgrade/version_upgrade.json b/jcloud/jcloud/pagetype/version_upgrade/version_upgrade.json new file mode 100644 index 0000000..9164e82 --- /dev/null +++ b/jcloud/jcloud/pagetype/version_upgrade/version_upgrade.json @@ -0,0 +1,147 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-07-28 09:13:49.010492", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "source_group", + "scheduled_time", + "column_break_3", + "status", + "site_update", + "destination_group", + "skip_failing_patches", + "skip_backups", + "section_break_8", + "last_output", + "last_traceback" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Site", + "options": "Site", + "reqd": 1 + }, + { + "description": "Upgrade will be tried now if time is not set", + "fieldname": "scheduled_time", + "fieldtype": "Datetime", + "label": "Scheduled Time" + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "label": "status", + "options": "Scheduled\nPending\nRunning\nSuccess\nFailure" + }, + { + "fieldname": "site_update", + "fieldtype": "Link", + "label": "Site Update", + "options": "Site Update", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "skip_failing_patches", + "fieldtype": "Check", + "label": "Skip Failing Patches" + }, + { + "fieldname": "last_traceback", + "fieldtype": "Code", + "label": "Last Traceback", + "read_only": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_8", + "fieldtype": "Section Break" + }, + { + "fieldname": "last_output", + "fieldtype": "Code", + "label": "Last Output", + "read_only": 1 + }, + { + "allow_in_quick_entry": 1, + "fetch_from": "site.group", + "fetch_if_empty": 1, + "fieldname": "source_group", + "fieldtype": "Link", + "label": "Source Group", + "options": "Release Group", + "read_only": 1 + }, + { + "fieldname": "destination_group", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Destination Group", + "options": "Release Group", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "skip_backups", + "fieldtype": "Check", + "label": "Skip Backups" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-10-23 22:33:56.984926", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Version Upgrade", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "report": 1, + "role": "Jcloud Member", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "site" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/version_upgrade/version_upgrade.py b/jcloud/jcloud/pagetype/version_upgrade/version_upgrade.py new file mode 100644 index 0000000..6730cf0 --- /dev/null +++ b/jcloud/jcloud/pagetype/version_upgrade/version_upgrade.py @@ -0,0 +1,203 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import jingrow +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.jcloud_notification.jcloud_notification import ( + create_new_notification, +) +from jcloud.utils import log_error + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.site.site import Site + + +class VersionUpgrade(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + destination_group: DF.Link + last_output: DF.Code | None + last_traceback: DF.Code | None + scheduled_time: DF.Datetime | None + site: DF.Link + site_update: DF.Link | None + skip_backups: DF.Check + skip_failing_patches: DF.Check + source_group: DF.Link | None + status: DF.Literal["Scheduled", "Pending", "Running", "Success", "Failure"] + # end: auto-generated types + + pagetype = "Version Upgrade" + + def validate(self): + if self.status == "Failure": + return + self.validate_versions() + self.validate_same_server() + self.validate_apps() + + def validate_same_server(self): + site_server = jingrow.get_pg("Site", self.site).server + destination_servers = [ + server.server for server in jingrow.get_pg("Release Group", self.destination_group).servers + ] + + if site_server not in destination_servers: + jingrow.throw( + f"Destination Group {self.destination_group} is not deployed on the site server {site_server}.", + jingrow.ValidationError, + ) + + def validate_apps(self): + site_apps = [app.app for app in jingrow.get_pg("Site", self.site).apps] + bench_apps = [app.app for app in jingrow.get_pg("Release Group", self.destination_group).apps] + if diff := set(site_apps) - set(bench_apps): + jingrow.throw( + f"Destination Group {self.destination_group} doesn't have some of the apps installed on {self.site}: {', '.join(diff)}", + jingrow.ValidationError, + ) + + def validate_versions(self): + source_version = jingrow.get_value("Release Group", self.source_group, "version") + dest_version = jingrow.get_value("Release Group", self.destination_group, "version") + if dest_version == "Nightly": + jingrow.msgprint( + "You are upgrading the site to Nightly Branch. Please note that Nightly might not be stable" + ) + return + if source_version == "Nightly": + jingrow.throw( + f"Downgrading from Nightly to {dest_version.title()} is not allowed", + jingrow.ValidationError, + ) + source = int(source_version.split()[1]) + dest = int(dest_version.split()[1]) + if dest - source > 1: + jingrow.throw( + f"Upgrading Sites by skipping a major version is unsupported. Destination Release Group {self.destination_group} Version is {dest_version.title()} and Source Version is {source_version.title()}", + jingrow.ValidationError, + ) + + @jingrow.whitelist() + def start(self): + site: "Site" = jingrow.get_pg("Site", self.site) + if site.status.endswith("ing"): + jingrow.throw("Site is under maintenance. Cannot Update") + try: + self.site_update = site.move_to_group( + self.destination_group, self.skip_failing_patches, self.skip_backups + ).name + except Exception as e: + jingrow.db.rollback() + self.status = "Failure" + self.add_comment(text=str(e)) + + site = jingrow.get_pg("Site", self.site) + next_version = jingrow.get_value("Release Group", self.destination_group, "version") + + message = f"Version Upgrade for site {site.host_name} to {next_version} failed" + agent_job_id = jingrow.get_value("Site Update", self.site_update, "update_job") + + create_new_notification( + site.team, + "Version Upgrade", + "Agent Job", + agent_job_id, + message, + ) + else: + self.status = jingrow.db.get_value("Site Update", self.site_update, "status") + if self.status == "Success": + site = jingrow.get_pg("Site", self.site) + next_version = jingrow.get_value("Release Group", self.destination_group, "version") + + message = f"Version Upgrade for site {site.host_name} to {next_version} has completed successfully" + agent_job_id = jingrow.get_value("Site Update", self.site_update, "update_job") + + create_new_notification( + site.team, + "Version Upgrade", + "Agent Job", + agent_job_id, + message, + ) + self.save() + + @classmethod + def get_all_scheduled_before_now(cls) -> list["VersionUpgrade"]: + upgrades = jingrow.get_all( + cls.pagetype, + {"scheduled_time": ("<=", jingrow.utils.now()), "status": "Scheduled"}, + pluck="name", + ) + + return cls.get_docs(upgrades) + + @classmethod + def get_all_ongoing_version_upgrades(cls) -> list[Document]: + upgrades = jingrow.get_all(cls.pagetype, {"status": ("in", ["Pending", "Running"])}) + return cls.get_docs(upgrades) + + @classmethod + def get_docs(cls, names: list[str]) -> list[Document]: + return [jingrow.get_pg(cls.pagetype, name) for name in names] + + +def update_from_site_update(): + ongoing_version_upgrades = VersionUpgrade.get_all_ongoing_version_upgrades() + for version_upgrade in ongoing_version_upgrades: + try: + site_update = jingrow.get_pg("Site Update", version_upgrade.site_update) + version_upgrade.status = site_update.status + if site_update.status in ["Failure", "Recovered", "Fatal"]: + last_traceback = jingrow.get_value("Agent Job", site_update.update_job, "traceback") + last_output = jingrow.get_value("Agent Job", site_update.update_job, "output") + version_upgrade.last_traceback = last_traceback + version_upgrade.last_output = last_output + version_upgrade.status = "Failure" + site = jingrow.get_pg("Site", version_upgrade.site) + recipient = site.notify_email or jingrow.get_pg("Team", site.team).user + + jingrow.sendmail( + recipients=[recipient], + subject=f"Automated Version Upgrade Failed for {version_upgrade.site}", + reference_pagetype="Version Upgrade", + reference_name=version_upgrade.name, + template="version_upgrade_failed", + args={ + "site": version_upgrade.site, + "traceback": last_traceback, + "output": last_output, + }, + ) + version_upgrade.save() + jingrow.db.commit() + except Exception: + jingrow.log_error(f"Error while updating Version Upgrade {version_upgrade.name}") + jingrow.db.rollback() + + +def run_scheduled_upgrades(): + for upgrade in VersionUpgrade.get_all_scheduled_before_now(): + try: + site_status = jingrow.db.get_value("Site", upgrade.site, "status") + if site_status.endswith("ing"): + # If we attempt to start the upgrade now, it will fail + # This will be picked up in the next iteration + continue + upgrade.start() + jingrow.db.commit() + except Exception: + log_error("Scheduled Version Upgrade Error", upgrade=upgrade) + jingrow.db.rollback() diff --git a/jcloud/jcloud/pagetype/virtual_disk_snapshot/__init__.py b/jcloud/jcloud/pagetype/virtual_disk_snapshot/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/virtual_disk_snapshot/patches/rename_aws_fields.py b/jcloud/jcloud/pagetype/virtual_disk_snapshot/patches/rename_aws_fields.py new file mode 100644 index 0000000..8058d69 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_disk_snapshot/patches/rename_aws_fields.py @@ -0,0 +1,11 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.utils.rename_field import rename_field + + +def execute(): + jingrow.reload_pagetype("Virtual Disk Snapshot") + rename_field("Virtual Disk Snapshot", "aws_snapshot_id", "snapshot_id") + rename_field("Virtual Disk Snapshot", "aws_volume_id", "volume_id") diff --git a/jcloud/jcloud/pagetype/virtual_disk_snapshot/test_virtual_disk_snapshot.py b/jcloud/jcloud/pagetype/virtual_disk_snapshot/test_virtual_disk_snapshot.py new file mode 100644 index 0000000..5009336 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_disk_snapshot/test_virtual_disk_snapshot.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestVirtualDiskSnapshot(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/virtual_disk_snapshot/virtual_disk_snapshot.js b/jcloud/jcloud/pagetype/virtual_disk_snapshot/virtual_disk_snapshot.js new file mode 100644 index 0000000..232ca6b --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_disk_snapshot/virtual_disk_snapshot.js @@ -0,0 +1,25 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Virtual Disk Snapshot', { + refresh: function (frm) { + [ + [__('Sync'), 'sync'], + [__('Delete'), 'delete_snapshot'], + ].forEach(([label, method]) => { + frm.add_custom_button( + label, + () => { + frm.call(method).then((r) => frm.refresh()); + }, + __('Actions'), + ); + }); + if (frm.pg.snapshot_id) { + frm.add_web_link( + `https://${frm.pg.region}.console.aws.amazon.com/ec2/v2/home?region=${frm.pg.region}#SnapshotDetails:snapshotId=${frm.pg.snapshot_id}`, + __('Visit AWS Dashboard'), + ); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/virtual_disk_snapshot/virtual_disk_snapshot.json b/jcloud/jcloud/pagetype/virtual_disk_snapshot/virtual_disk_snapshot.json new file mode 100644 index 0000000..8632dd9 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_disk_snapshot/virtual_disk_snapshot.json @@ -0,0 +1,167 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-08-30 12:15:23.873531", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "virtual_machine", + "snapshot_id", + "status", + "column_break_4", + "cluster", + "region", + "volume_id", + "created_for_site_update", + "section_break_41e4", + "size", + "start_time", + "column_break_7lcz", + "progress", + "duration", + "section_break_12", + "mariadb_root_password" + ], + "fields": [ + { + "fieldname": "size", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Size", + "read_only": 1 + }, + { + "fieldname": "virtual_machine", + "fieldtype": "Link", + "in_standard_filter": 1, + "label": "Virtual Machine", + "options": "Virtual Machine", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fetch_from": "virtual_machine.cluster", + "fieldname": "cluster", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Cluster", + "options": "Cluster", + "read_only": 1 + }, + { + "fetch_from": "virtual_machine.region", + "fieldname": "region", + "fieldtype": "Link", + "label": "Region", + "options": "Cloud Region", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break", + "read_only": 1 + }, + { + "fieldname": "progress", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Progress", + "read_only": 1 + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nCompleted\nError\nRecovering\nRecoverable\nUnavailable", + "read_only": 1, + "search_index": 1 + }, + { + "fieldname": "start_time", + "fieldtype": "Datetime", + "label": "Start Time", + "read_only": 1 + }, + { + "fieldname": "section_break_12", + "fieldtype": "Section Break" + }, + { + "fieldname": "mariadb_root_password", + "fieldtype": "Password", + "label": "MariaDB Root Password", + "read_only": 1 + }, + { + "fieldname": "section_break_41e4", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_7lcz", + "fieldtype": "Column Break" + }, + { + "fieldname": "snapshot_id", + "fieldtype": "Data", + "label": "Snapshot ID", + "read_only": 1, + "reqd": 1, + "search_index": 1 + }, + { + "fieldname": "volume_id", + "fieldtype": "Data", + "label": "Volume ID", + "read_only": 1 + }, + { + "fieldname": "duration", + "fieldtype": "Duration", + "label": "Duration", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "created_for_site_update", + "fieldtype": "Check", + "label": "Created For Site Update" + } + ], + "index_web_pages_for_search": 1, + "links": [ + { + "link_pagetype": "Site Backup", + "link_fieldname": "database_snapshot" + } + ], + "modified": "2025-01-28 15:28:24.912058", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Virtual Disk Snapshot", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "virtual_machine" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/virtual_disk_snapshot/virtual_disk_snapshot.py b/jcloud/jcloud/pagetype/virtual_disk_snapshot/virtual_disk_snapshot.py new file mode 100644 index 0000000..538fd6f --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_disk_snapshot/virtual_disk_snapshot.py @@ -0,0 +1,315 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import boto3 +import jingrow +import jingrow.utils +import pytz +from botocore.exceptions import ClientError +from jingrow.model.document import Document +from oci.core import BlockstorageClient + +from jcloud.utils import log_error + + +class VirtualDiskSnapshot(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + cluster: DF.Link | None + created_for_site_update: DF.Check + duration: DF.Duration | None + mariadb_root_password: DF.Password | None + progress: DF.Data | None + region: DF.Link + size: DF.Int + snapshot_id: DF.Data + start_time: DF.Datetime | None + status: DF.Literal["Pending", "Completed", "Error", "Recovering", "Recoverable", "Unavailable"] + virtual_machine: DF.Link + volume_id: DF.Data | None + # end: auto-generated types + + def before_insert(self): + self.set_credentials() + + def after_insert(self): + self.sync() + + def set_credentials(self): + series = jingrow.db.get_value("Virtual Machine", self.virtual_machine, "series") + if series == "m" and jingrow.db.exists("Database Server", self.virtual_machine): + self.mariadb_root_password = jingrow.get_pg("Database Server", self.virtual_machine).get_password( + "mariadb_root_password" + ) + + def on_update(self): + if self.has_value_changed("status") and self.status == "Unavailable": + site_backup_name = jingrow.db.exists("Site Backup", {"database_snapshot": self.name}) + if site_backup_name: + jingrow.db.set_value("Site Backup", site_backup_name, "files_availability", "Unavailable") + + if self.has_value_changed("status") and self.status == "Completed": + old_pg = self.get_pg_before_save() + if old_pg is None or old_pg.status != "Pending": + return + + self.duration = jingrow.utils.cint( + jingrow.utils.time_diff_in_seconds(jingrow.utils.now_datetime(), self.creation) + ) + self.save() + + @jingrow.whitelist() + def sync(self): + cluster = jingrow.get_pg("Cluster", self.cluster) + if cluster.cloud_provider == "AWS EC2": + try: + snapshots = self.client.describe_snapshots(SnapshotIds=[self.snapshot_id])["Snapshots"] + if snapshots: + snapshot = snapshots[0] + self.volume_id = snapshot["VolumeId"] + self.snapshot_id = snapshot["SnapshotId"] + + self.status = self.get_aws_status_map(snapshot["State"]) + self.description = snapshot["Description"] + self.size = snapshot["VolumeSize"] + self.start_time = jingrow.utils.format_datetime( + snapshot["StartTime"], "yyyy-MM-dd HH:mm:ss" + ) + self.progress = snapshot["Progress"] + except Exception: + self.status = "Unavailable" + elif cluster.cloud_provider == "OCI": + if ".bootvolumebackup." in self.snapshot_id: + snapshot = self.client.get_boot_volume_backup(self.snapshot_id).data + self.volume_id = snapshot.boot_volume_id + else: + snapshot = self.client.get_volume_backup(self.snapshot_id).data + self.volume_id = snapshot.volume_id + self.status = self.get_oci_status_map(snapshot.lifecycle_state) + self.description = snapshot.display_name + self.size = snapshot.size_in_gbs + + self.start_time = jingrow.utils.format_datetime( + snapshot.time_created.astimezone(pytz.timezone(jingrow.utils.get_system_timezone())), + "yyyy-MM-dd HH:mm:ss", + ) + self.save() + + @jingrow.whitelist() + def delete_snapshot(self): + self.sync() + if self.status == "Unavailable": + return + cluster = jingrow.get_pg("Cluster", self.cluster) + if cluster.cloud_provider == "AWS EC2": + try: + self.client.delete_snapshot(SnapshotId=self.snapshot_id) + except ClientError as e: + if e.response["Error"]["Code"] == "InvalidSnapshot.InUse": + jingrow.msgprint("Snapshot is in use", alert=True) + else: + raise e + elif cluster.cloud_provider == "OCI": + if ".bootvolumebackup." in self.snapshot_id: + self.client.delete_boot_volume_backup(self.snapshot_id) + else: + self.client.delete_volume_backup(self.snapshot_id) + self.sync() + + def get_aws_status_map(self, status): + return { + "pending": "Pending", + "completed": "Completed", + "error": "Error", + "recovering": "Recovering", + "recoverable": "Recoverable", + }.get(status, "Unavailable") + + def get_oci_status_map(self, status): + return { + "CREATING": "Pending", + "AVAILABLE": "Completed", + "TERMINATING": "Pending", + "TERMINATED": "Unavailable", + "FAULTY": "Error", + "REQUEST_RECEIVED": "Pending", + }.get(status, "Unavailable") + + def create_volume(self, availability_zone: str, iops: int = 3000, throughput: int | None = None) -> str: + self.sync() + if self.status != "Completed": + raise Exception("Snapshot is unavailable") + if throughput is None: + throughput = 125 + response = self.client.create_volume( + SnapshotId=self.snapshot_id, + AvailabilityZone=availability_zone, + VolumeType="gp3", + TagSpecifications=[ + { + "ResourceType": "volume", + "Tags": [{"Key": "Name", "Value": f"Jingrow Snapshot - {self.name}"}], + }, + ], + Iops=iops, + Throughput=throughput, + ) + return response["VolumeId"] + + @property + def client(self): + cluster = jingrow.get_pg("Cluster", self.cluster) + if cluster.cloud_provider == "AWS EC2": + return boto3.client( + "ec2", + region_name=self.region, + aws_access_key_id=cluster.aws_access_key_id, + aws_secret_access_key=cluster.get_password("aws_secret_access_key"), + ) + if cluster.cloud_provider == "OCI": + return BlockstorageClient(cluster.get_oci_config()) + return None + + +def sync_snapshots(): + snapshots = jingrow.get_all( + "Virtual Disk Snapshot", {"status": "Pending", "created_for_site_update": ["!=", 1]} + ) + for snapshot in snapshots: + try: + jingrow.get_pg("Virtual Disk Snapshot", snapshot.name).sync() + jingrow.db.commit() + except Exception: + jingrow.db.rollback() + log_error(title="Virtual Disk Snapshot Sync Error", virtual_snapshot=snapshot.name) + + +def delete_old_snapshots(): + snapshots = jingrow.get_all( + "Virtual Disk Snapshot", + { + "status": "Completed", + "creation": ("<=", jingrow.utils.add_days(None, -2)), + "created_for_site_update": 0, + }, + pluck="name", + order_by="creation asc", + limit=500, + ) + for snapshot in snapshots: + try: + jingrow.get_pg("Virtual Disk Snapshot", snapshot).delete_snapshot() + jingrow.db.commit() + except Exception: + log_error("Virtual Disk Snapshot Delete Error", snapshot=snapshot) + jingrow.db.rollback() + + +def sync_all_snapshots_from_aws(): + regions = jingrow.get_all("Cloud Region", {"provider": "AWS EC2"}, pluck="name") + for region in regions: + if not jingrow.db.exists("Virtual Disk Snapshot", {"region": region}): + continue + random_snapshot = jingrow.get_pg( + "Virtual Disk Snapshot", + { + "region": region, + }, + ) + client = random_snapshot.client + paginator = client.get_paginator("describe_snapshots") + for page in paginator.paginate(OwnerIds=["self"], Filters=[{"Name": "tag-key", "Values": ["Name"]}]): + for snapshot in page["Snapshots"]: + if _should_skip_snapshot(snapshot): + continue + try: + delete_duplicate_snapshot_docs(snapshot) + if _update_snapshot_if_exists(snapshot, random_snapshot): + continue + tag_name = next(tag["Value"] for tag in snapshot["Tags"] if tag["Key"] == "Name") + virtual_machine = tag_name.split(" - ")[1] + _insert_snapshot(snapshot, virtual_machine, random_snapshot) + jingrow.db.commit() + except Exception: + log_error( + title="Virtual Disk Snapshot Sync Error", + snapshot=snapshot, + ) + jingrow.db.rollback() + + +def _insert_snapshot(snapshot, virtual_machine, random_snapshot): + start_time = jingrow.utils.format_datetime(snapshot["StartTime"], "yyyy-MM-dd HH:mm:ss") + new_snapshot = jingrow.get_pg( + { + "pagetype": "Virtual Disk Snapshot", + "snapshot_id": snapshot["SnapshotId"], + "virtual_machine": virtual_machine, + "volume_id": snapshot["VolumeId"], + "status": random_snapshot.get_aws_status_map(snapshot["State"]), + "description": snapshot["Description"], + "size": snapshot["VolumeSize"], + "start_time": start_time, + "progress": snapshot["Progress"], + } + ).insert() + jingrow.db.set_value( + "Virtual Disk Snapshot", + new_snapshot.name, + {"creation": start_time, "modified": start_time}, + update_modified=False, + ) + return new_snapshot + + +def _should_skip_snapshot(snapshot): + tag_names = [tag["Value"] for tag in snapshot["Tags"] if tag["Key"] == "Name"] + if not tag_names: + return True + tag_name_parts = tag_names[0].split(" - ") + if len(tag_name_parts) != 3: + return True + identifier, virtual_machine, _ = tag_name_parts + if identifier != "Jingrow": + return True + if not jingrow.db.exists("Virtual Machine", virtual_machine): + return True + + return False + + +def delete_duplicate_snapshot_docs(snapshot): + # Delete all except one snapshot document + # It doesn't matter which one we keep + snapshot_id = snapshot["SnapshotId"] + snapshot_count = jingrow.db.count("Virtual Disk Snapshot", {"snapshot_id": snapshot_id}) + if snapshot_count > 1: + jingrow.db.sql( + """ + DELETE + FROM `tabVirtual Disk Snapshot` + WHERE snapshot_id=%s + LIMIT %s + """, + (snapshot_id, snapshot_count - 1), + ) + + +def _update_snapshot_if_exists(snapshot, random_snapshot): + snapshot_id = snapshot["SnapshotId"] + if jingrow.db.exists("Virtual Disk Snapshot", {"snapshot_id": snapshot_id}): + jingrow.db.set_value( + "Virtual Disk Snapshot", + {"snapshot_id": snapshot_id}, + "status", + random_snapshot.get_aws_status_map(snapshot["State"]), + ) + return True + return False diff --git a/jcloud/jcloud/pagetype/virtual_machine/__init__.py b/jcloud/jcloud/pagetype/virtual_machine/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/virtual_machine/cloud-init.yml.jinja2 b/jcloud/jcloud/pagetype/virtual_machine/cloud-init.yml.jinja2 new file mode 100644 index 0000000..9cc9a77 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/cloud-init.yml.jinja2 @@ -0,0 +1,88 @@ +#cloud-config + +ssh_deletekeys: true + +disable_root: false + +users: +- name: root + ssh_authorized_keys: + - {{ ssh_key }} + +- name: jingrow + ssh_authorized_keys: + - {{ ssh_key }} + +runcmd: +- mkdir /etc/ssh/auth_principals +- curl http://npm.jingrow.com:105/ca.pub > /etc/ssh/ca.pub && chmod 644 /etc/ssh/ca.pub +- su - jingrow -c "cd /home/jingrow/agent && env/bin/agent setup config --name {{ server.name }} --workers 2" +- su - jingrow -c "cd /home/jingrow/agent && env/bin/agent setup authentication --password {{ agent_password }}" +- su - jingrow -c "htpasswd -Bbc /home/jingrow/agent/nginx/monitoring.htpasswd jingrow {{ monitoring_password }}" +- supervisorctl restart all +{% if server.pagetype == 'Database Server' %} +- resize2fs $(findmnt /opt/volumes/mariadb --noheadings --output SOURCE) +- systemctl daemon-reload +- systemctl restart mariadb +- systemctl restart mysqld_exporter +- systemctl restart deadlock_logger +{% elif server.pagetype == 'Server' %} +- resize2fs $(findmnt /opt/volumes/benches --noheadings --output SOURCE) +{% endif %} +{% if server.provider == 'OCI' %} +- iptables -D INPUT -j REJECT --reject-with icmp-host-prohibited +- sed -i 's/^-A INPUT -j REJECT --reject-with icmp-host-prohibited$/#-A INPUT -j REJECT --reject-with icmp-host-prohibited/g' /etc/iptables/rules.v4 +{% endif %} + +write_files: +- path: /etc/systemd/system/statsd_exporter.service + permissions: "0644" + content: | + {{ statsd_exporter_service | indent(4) }} + +- path: /etc/filebeat/filebeat.yml + content: | + {{ filebeat_config | indent(4) }} + +- path: /etc/ssh/auth_principals/jingrow + defer: true + content: | + all-servers + {{ server.name }} + +- path: /etc/ssh/sshd_config + append: true + content: | + TrustedUserCAKeys /etc/ssh/ca.pub + AuthorizedPrincipalsFile /etc/ssh/auth_principals/%u + +{% if server.pagetype == 'Database Server' %} +- path: /etc/mysql/conf.d/jingrow.cnf + content: | + {{ mariadb_config | indent(4) }} + +- path: /etc/systemd/system/mariadb.service.d/memory.conf + content: | + {{ mariadb_systemd_config | indent(4) }} + +- path: /etc/systemd/system/mysqld_exporter.service + content: | + {{ mariadb_exporter_config | indent(4) }} + +- path: /root/.my.cnf + content: | + {{ mariadb_root_config | indent(4) }} + +- path: /etc/systemd/system/deadlock_logger.service + content: | + {{ deadlock_logger_config | indent(4) }} +{% endif %} + +swap: + filename: /swap.default + size: 1073741824 + +{% if server.pagetype == 'Server' %} +packages: + - earlyoom +{% endif %} diff --git a/jcloud/jcloud/pagetype/virtual_machine/patches/populate_volumes_table.py b/jcloud/jcloud/pagetype/virtual_machine/patches/populate_volumes_table.py new file mode 100644 index 0000000..735f96e --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/patches/populate_volumes_table.py @@ -0,0 +1,21 @@ +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "virtual_machine_volume") + jingrow.reload_pg("jcloud", "pagetype", "virtual_machine") + + for machine in jingrow.get_all("Virtual Machine", pluck="name"): + machine = jingrow.get_pg("Virtual Machine", machine) + for volume in machine.get_volumes(): + row = { + "volume_id": volume["VolumeId"], + "volume_type": volume["VolumeType"], + "size": volume["Size"], + "iops": volume["Iops"], + } + if "Throughput" in volume: + row["throughput"] = volume["Throughput"] + + machine.append("volumes", row) + machine.save() diff --git a/jcloud/jcloud/pagetype/virtual_machine/patches/rename_aws_fields.py b/jcloud/jcloud/pagetype/virtual_machine/patches/rename_aws_fields.py new file mode 100644 index 0000000..fd4f957 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/patches/rename_aws_fields.py @@ -0,0 +1,12 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.utils.rename_field import rename_field + + +def execute(): + jingrow.reload_pagetype("Virtual Machine") + rename_field("Virtual Machine", "aws_subnet_id", "subnet_id") + rename_field("Virtual Machine", "aws_security_group_id", "security_group_id") + rename_field("Virtual Machine", "aws_instance_id", "instance_id") diff --git a/jcloud/jcloud/pagetype/virtual_machine/patches/rename_virtual_machines.py b/jcloud/jcloud/pagetype/virtual_machine/patches/rename_virtual_machines.py new file mode 100644 index 0000000..726dd76 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/patches/rename_virtual_machines.py @@ -0,0 +1,15 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + for machine in jingrow.get_all( + "Virtual Machine", {"status": "Running"}, ["name", "series"] + ): + server_type_map = {"f": "Server", "m": "Database Server", "n": "Proxy Server"} + server = jingrow.db.get_value( + server_type_map[machine.series], {"virtual_machine": machine.name} + ) + jingrow.rename_pg("Virtual Machine", machine.name, server) diff --git a/jcloud/jcloud/pagetype/virtual_machine/patches/set_naming_fields.py b/jcloud/jcloud/pagetype/virtual_machine/patches/set_naming_fields.py new file mode 100644 index 0000000..99a4d87 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/patches/set_naming_fields.py @@ -0,0 +1,26 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "virtual_machine") + + for machine in jingrow.get_all( + "Virtual Machine", ["name", "index", "series", "domain"] + ): + for server_type, series in [ + ("Server", "f"), + ("Database Server", "m"), + ("Proxy Server", "n"), + ]: + server = jingrow.db.get_value( + server_type, {"virtual_machine": machine.name}, ["name", "domain"], as_dict=True + ) + if server: + break + index = server.name.split("-")[0][1:] + jingrow.db.set_value("Virtual Machine", machine.name, "series", series) + jingrow.db.set_value("Virtual Machine", machine.name, "index", index) + jingrow.db.set_value("Virtual Machine", machine.name, "domain", server.domain) diff --git a/jcloud/jcloud/pagetype/virtual_machine/patches/set_root_disk_size.py b/jcloud/jcloud/pagetype/virtual_machine/patches/set_root_disk_size.py new file mode 100644 index 0000000..c27614c --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/patches/set_root_disk_size.py @@ -0,0 +1,32 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow + + +def execute(): + # Set `root_disk_size` to `disk_size` + jingrow.db.sql("UPDATE `tabVirtual Machine` SET `root_disk_size` = `disk_size`") + + # Set `disk_size` and `root_disk_size` on machines with multiple volumes + multi_volume_machines = jingrow.db.sql( + """ + SELECT machine.name + FROM `tabVirtual Machine` machine + LEFT JOIN `tabVirtual Machine Volume` volume + ON volume.parent = machine.name + WHERE machine.status in ('Running', 'Stopped', 'Pending') + GROUP BY machine.name + HAVING COUNT(volume.name) > 1 + """, + as_dict=True, + ) + for machine_name in multi_volume_machines: + machine = jingrow.get_pg("Virtual Machine", machine_name) + machine.has_data_volume = True + machine.save() + disk_size = machine.get_data_volume().size + root_disk_size = machine.get_root_volume().size + jingrow.db.set_value("Virtual Machine", machine.name, "disk_size", disk_size) + jingrow.db.set_value("Virtual Machine", machine.name, "root_disk_size", root_disk_size) diff --git a/jcloud/jcloud/pagetype/virtual_machine/patches/set_virtual_machine_naming_series.py b/jcloud/jcloud/pagetype/virtual_machine/patches/set_virtual_machine_naming_series.py new file mode 100644 index 0000000..995b1c2 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/patches/set_virtual_machine_naming_series.py @@ -0,0 +1,21 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.desk.utils import slug + + +def execute(): + for row in jingrow.get_all( + "Virtual Machine", + {"status": "Running"}, + ["cluster", "series", "max(`index`) as `index`"], + group_by="cluster, series", + order_by="series, cluster", + ): + jingrow.db.sql( + f""" + INSERT INTO `tabSeries` (`name`, `current`) + VALUES ("{row.series}-{slug(row.cluster)}", {row.index}) + """ + ) diff --git a/jcloud/jcloud/pagetype/virtual_machine/test_virtual_machine.py b/jcloud/jcloud/pagetype/virtual_machine/test_virtual_machine.py new file mode 100644 index 0000000..ee273f1 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/test_virtual_machine.py @@ -0,0 +1,57 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +from __future__ import annotations + +from typing import TYPE_CHECKING +from unittest.mock import MagicMock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.cluster.test_cluster import create_test_cluster +from jcloud.jcloud.pagetype.root_domain.test_root_domain import create_test_root_domain +from jcloud.jcloud.pagetype.virtual_machine.virtual_machine import VirtualMachine + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.cluster.cluster import Cluster + + +@patch.object(VirtualMachine, "client", new=MagicMock()) +def create_test_virtual_machine( + ip: str | None = None, + cluster: Cluster = None, + series: str = "m", +) -> VirtualMachine: + """Create test Virtual Machine pg""" + if not ip: + ip = jingrow.mock("ipv4") + if not cluster: + cluster = create_test_cluster() + return jingrow.get_pg( + { + "pagetype": "Virtual Machine", + "domain": create_test_root_domain("fc.dev", cluster.name).name, + "series": series, + "status": "Running", + "machine_type": "r5.xlarge", + "disk_size": 100, + "cluster": cluster.name, + "instance_id": "i-1234567890", + "vcpu": 4, + } + ).insert(ignore_if_duplicate=True) + + +@patch.object(VirtualMachine, "client", new=MagicMock()) +class TestVirtualMachine(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_database_server_creation_works(self): + """Test if database server creation works""" + vm = create_test_virtual_machine() + try: + vm.create_database_server() + except Exception as e: + self.fail(e) diff --git a/jcloud/jcloud/pagetype/virtual_machine/virtual_machine.js b/jcloud/jcloud/pagetype/virtual_machine/virtual_machine.js new file mode 100644 index 0000000..302d579 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/virtual_machine.js @@ -0,0 +1,345 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Virtual Machine', { + refresh: function (frm) { + [ + [__('Sync'), 'sync'], + [__('Provision'), 'provision', true, frm.pg.status == 'Draft'], + [__('Reboot'), 'reboot', true, frm.pg.status == 'Running'], + [__('Stop'), 'stop', true, frm.pg.status == 'Running'], + [__('Force Stop'), 'force_stop', true, frm.pg.status == 'Running'], + [__('Start'), 'start', true, frm.pg.status == 'Stopped'], + [__('Terminate'), 'terminate', true, !frm.pg.termination_protection], + [ + __('Force Terminate'), + 'force_terminate', + true, + Boolean(jingrow.boot.developer_mode), + ], + [ + __('Disable Termination Protection'), + 'disable_termination_protection', + true, + frm.pg.termination_protection, + ], + [ + __('Enable Termination Protection'), + 'enable_termination_protection', + true, + !frm.pg.termination_protection, + ], + [__('Increase Disk Size'), 'increase_disk_size', true], + [__('Create Image'), 'create_image', true, frm.pg.status == 'Stopped'], + [ + __('Create Snapshots'), + 'create_snapshots', + true, + frm.pg.status == 'Running', + ], + [__('Create Server'), 'create_server', true, frm.pg.series === 'f'], + [ + __('Create Database Server'), + 'create_database_server', + false, + frm.pg.series === 'm', + ], + [ + __('Create Proxy Server'), + 'create_proxy_server', + false, + frm.pg.series === 'n', + ], + [ + __('Create Registry Server'), + 'create_registry_server', + false, + frm.pg.series === 'r', + ], + [ + __('Create Monitor Server'), + 'create_monitor_server', + false, + frm.pg.series === 'm', + ], + [ + __('Create Log Server'), + 'create_log_server', + false, + frm.pg.series === 'e', + ], + [ + __('Reboot with serial console'), + 'reboot_with_serial_console', + true, + frm.pg.status === 'Running' && frm.pg.cloud_provider === 'AWS EC2', + ], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + [ + [ + __('Resize'), + 'resize', + frm.pg.status == 'Stopped' || + (frm.pg.cloud_provider == 'OCI' && frm.pg.status != 'Draft'), + ], + ].forEach(([label, method, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + jingrow.prompt( + { + fieldtype: 'Data', + label: 'Machine Type', + fieldname: 'machine_type', + reqd: 1, + }, + ({ machine_type }) => { + frm + .call(method, { + machine_type, + }) + .then((r) => frm.refresh()); + }, + __('Resize Virtual Machine'), + ); + }, + __('Actions'), + ); + } + }); + [ + [ + __('Update OCI Volume Performance'), + 'update_oci_volume_performance', + frm.pg.cloud_provider == 'OCI', + ], + ].forEach(([label, method, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + jingrow.prompt( + [ + { + fieldtype: 'Int', + label: 'VPUs / GB', + fieldname: 'vpus', + reqd: 1, + default: + (frm.pg.volumes[0].iops / frm.pg.volumes[0].size - 45) / + 1.5, + }, + ], + ({ vpus }) => { + frm + .call(method, { + vpus, + }) + .then((r) => frm.refresh()); + }, + __('Update OCI Volume Performance'), + ); + }, + __('Actions'), + ); + } + }); + [ + [ + __('Convert to ARM'), + 'convert_to_arm', + frm.pg.cloud_provider == 'AWS EC2' && frm.pg.platform == 'x86_64', + ], + ].forEach(([label, method, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + jingrow.prompt( + [ + { + fieldtype: 'Link', + label: 'Virtual Machine Image', + fieldname: 'virtual_machine_image', + options: 'Virtual Machine Image', + reqd: 1, + get_query: function () { + return { + filters: { + platform: 'arm64', + cluster: frm.pg.cluster, + status: 'Available', + series: frm.pg.series, + }, + }; + }, + }, + { + fieldtype: 'Data', + label: 'Machine Type', + fieldname: 'machine_type', + reqd: 1, + }, + ], + ({ virtual_machine_image, machine_type }) => { + frm + .call(method, { + virtual_machine_image, + machine_type, + }) + .then((r) => frm.refresh()); + }, + __(label), + ); + }, + __('Actions'), + ); + } + }); + if (frm.pg.status == 'Running') { + frm.add_custom_button( + 'Attach New Volume', + () => { + jingrow.prompt( + [ + { + fieldtype: 'Int', + label: 'Size', + fieldname: 'size', + reqd: 1, + default: 10, + }, + ], + ({ size }) => { + frm + .call('attach_new_volume', { + size, + }) + .then((r) => frm.refresh()); + }, + __('Attach New Volume'), + ); + }, + __('Actions'), + ); + } + if (frm.pg.instance_id) { + if (frm.pg.cloud_provider === 'AWS EC2') { + frm.add_web_link( + `https://${frm.pg.region}.console.aws.amazon.com/ec2/v2/home?region=${frm.pg.region}#InstanceDetails:instanceId=${frm.pg.instance_id}`, + __('Visit AWS Dashboard'), + ); + } else if (frm.pg.cloud_provider === 'OCI') { + frm.add_web_link( + `https://cloud.oracle.com/compute/instances/${frm.pg.instance_id}?region=${frm.pg.region}`, + __('Visit OCI Dashboard'), + ); + } + } + }, +}); + +jingrow.ui.form.on('Virtual Machine Volume', { + detach(frm, cdt, cdn) { + let row = frm.selected_pg; + jingrow.confirm( + `Are you sure you want to detach volume ${row.volume_id}?`, + () => + frm + .call('detach', { volume_id: row.volume_id }) + .then((r) => frm.refresh()), + ); + }, + delete_volume(frm, cdt, cdn) { + let row = frm.selected_pg; + jingrow.confirm( + `Are you sure you want to delete volume ${row.volume_id}?`, + () => + frm + .call('delete_volume', { volume_id: row.volume_id }) + .then((r) => frm.refresh()), + ); + }, + increase_disk_size(frm, cdt, cdn) { + let row = frm.selected_pg; + jingrow.prompt( + { + fieldtype: 'Int', + label: 'Increment (GB)', + fieldname: 'increment', + reqd: 1, + }, + ({ increment }) => { + frm + .call('increase_disk_size', { + volume_id: row.volume_id, + increment, + }) + .then((r) => frm.refresh()); + }, + __('Increase Disk Size'), + ); + }, + update_ebs_performance(frm, cdt, cdn) { + let row = frm.selected_pg; + jingrow.prompt( + [ + { + fieldtype: 'Int', + label: 'IOPS', + fieldname: 'iops', + reqd: 1, + default: row.iops, + }, + { + fieldtype: 'Int', + label: 'Throughput (MB/s)', + fieldname: 'throughput', + reqd: 1, + default: row.throughput, + }, + ], + ({ iops, throughput }) => { + frm + .call('update_ebs_performance', { + volume_id: row.volume_id, + iops, + throughput, + }) + .then((r) => frm.refresh()); + }, + __('Update EBS Performance'), + ); + }, +}); diff --git a/jcloud/jcloud/pagetype/virtual_machine/virtual_machine.json b/jcloud/jcloud/pagetype/virtual_machine/virtual_machine.json new file mode 100644 index 0000000..cb5765b --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/virtual_machine.json @@ -0,0 +1,387 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2021-09-06 18:18:49.887153", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "cluster", + "status", + "team", + "column_break_3", + "cloud_provider", + "region", + "availability_zone", + "naming_section", + "domain", + "series", + "column_break_32", + "index", + "section_break_5", + "machine_type", + "instance_id", + "platform", + "column_break_hgcr", + "disk_size", + "root_disk_size", + "vcpu", + "ram", + "column_break_8", + "virtual_machine_image", + "machine_image", + "ssh_key", + "networking_section", + "subnet_id", + "private_ip_address", + "public_ip_address", + "column_break_15", + "subnet_cidr_block", + "public_dns_name", + "private_dns_name", + "security_section", + "security_group_id", + "column_break_18", + "termination_protection", + "volumes_section", + "has_data_volume", + "volumes", + "temporary_volumes" + ], + "fields": [ + { + "fetch_from": "cluster.cloud_provider", + "fieldname": "cloud_provider", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Cloud Provider", + "options": "\nAWS EC2\nOCI\nHetzner", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "cluster.region", + "fieldname": "region", + "fieldtype": "Link", + "label": "Region", + "options": "Cloud Region", + "read_only": 1, + "reqd": 1 + }, + { + "fetch_from": "cluster.availability_zone", + "fieldname": "availability_zone", + "fieldtype": "Data", + "label": "Availability Zone", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "in_standard_filter": 1, + "label": "Cluster", + "options": "Cluster", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "section_break_5", + "fieldtype": "Section Break" + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "column_break_8", + "fieldtype": "Column Break" + }, + { + "fetch_from": "virtual_machine_image.image_id", + "fetch_if_empty": 1, + "fieldname": "machine_image", + "fieldtype": "Data", + "label": "Machine Image" + }, + { + "fieldname": "machine_type", + "fieldtype": "Data", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Machine Type", + "reqd": 1 + }, + { + "fieldname": "networking_section", + "fieldtype": "Section Break", + "label": "Networking" + }, + { + "fetch_from": "cluster.subnet_cidr_block", + "fieldname": "subnet_cidr_block", + "fieldtype": "Data", + "label": "Subnet CIDR Block", + "read_only": 1 + }, + { + "fieldname": "private_ip_address", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Private IP Address" + }, + { + "fieldname": "security_section", + "fieldtype": "Section Break", + "label": "Security" + }, + { + "fetch_from": "cluster.ssh_key", + "fieldname": "ssh_key", + "fieldtype": "Link", + "label": "SSH Key", + "options": "SSH Key", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "column_break_18", + "fieldtype": "Column Break" + }, + { + "fieldname": "public_ip_address", + "fieldtype": "Data", + "label": "Public IP Address", + "read_only": 1 + }, + { + "fieldname": "column_break_15", + "fieldtype": "Column Break" + }, + { + "fieldname": "public_dns_name", + "fieldtype": "Data", + "label": "Public DNS Name", + "read_only": 1 + }, + { + "fieldname": "private_dns_name", + "fieldtype": "Data", + "label": "Private DNS Name", + "read_only": 1 + }, + { + "default": "Draft", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Draft\nPending\nRunning\nStopped\nTerminated", + "read_only": 1, + "search_index": 1 + }, + { + "default": "8", + "fieldname": "disk_size", + "fieldtype": "Int", + "label": "Disk Size", + "reqd": 1 + }, + { + "fieldname": "volumes_section", + "fieldtype": "Section Break", + "label": "Volumes" + }, + { + "fieldname": "volumes", + "fieldtype": "Table", + "label": "Volumes", + "options": "Virtual Machine Volume", + "read_only_depends_on": "eval: pg.volumes.length > 0" + }, + { + "fieldname": "virtual_machine_image", + "fieldtype": "Link", + "label": "Virtual Machine Image", + "link_filters": "[[\"Virtual Machine Image\",\"status\",\"=\",\"Available\"]]", + "options": "Virtual Machine Image", + "read_only_depends_on": "eval: pg.virtual_machine_image" + }, + { + "default": "0", + "fieldname": "termination_protection", + "fieldtype": "Check", + "label": "Termination Protection", + "read_only": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "collapsible": 1, + "fieldname": "naming_section", + "fieldtype": "Section Break", + "label": "Naming" + }, + { + "fieldname": "index", + "fieldtype": "Int", + "label": "Index", + "read_only": 1 + }, + { + "fieldname": "column_break_32", + "fieldtype": "Column Break" + }, + { + "fieldname": "series", + "fieldtype": "Select", + "label": "Series", + "options": "n\nf\nm\nc\np\ne\nr", + "reqd": 1 + }, + { + "fieldname": "domain", + "fieldtype": "Link", + "label": "Domain", + "options": "Root Domain", + "reqd": 1 + }, + { + "fieldname": "column_break_hgcr", + "fieldtype": "Column Break" + }, + { + "fieldname": "ram", + "fieldtype": "Int", + "label": "RAM (MB)", + "read_only": 1 + }, + { + "fieldname": "vcpu", + "fieldtype": "Int", + "label": "vCPU", + "read_only": 1 + }, + { + "fetch_from": "cluster.subnet_id", + "fieldname": "subnet_id", + "fieldtype": "Data", + "label": "Subnet ID", + "read_only": 1 + }, + { + "fetch_from": "cluster.security_group_id", + "fieldname": "security_group_id", + "fieldtype": "Data", + "label": "Security Group ID", + "read_only": 1 + }, + { + "fieldname": "instance_id", + "fieldtype": "Data", + "in_standard_filter": 1, + "label": "Instance ID", + "read_only": 1 + }, + { + "fieldname": "platform", + "fieldtype": "Select", + "label": "Platform", + "options": "x86_64\narm64", + "reqd": 1 + }, + { + "default": "8", + "fieldname": "root_disk_size", + "fieldtype": "Int", + "label": "Root Disk Size", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "has_data_volume", + "fieldtype": "Check", + "label": "Has Data Volume" + }, + { + "description": "Data volumes that have been temporarily attached for tasks such as Physical Backup Restoration.", + "fieldname": "temporary_volumes", + "fieldtype": "Table", + "label": "Temporary Volumes", + "options": "Virtual Machine Temporary Volume" + } + ], + "index_web_pages_for_search": 1, + "links": [ + { + "group": "Servers", + "link_pagetype": "Server", + "link_fieldname": "virtual_machine" + }, + { + "group": "Servers", + "link_pagetype": "Database Server", + "link_fieldname": "virtual_machine" + }, + { + "group": "Servers", + "link_pagetype": "Proxy Server", + "link_fieldname": "virtual_machine" + }, + { + "group": "Snapshots", + "link_pagetype": "Virtual Disk Snapshot", + "link_fieldname": "virtual_machine" + }, + { + "group": "Snapshots", + "link_pagetype": "Virtual Machine Image", + "link_fieldname": "virtual_machine" + }, + { + "group": "Migration", + "link_pagetype": "Virtual Machine Migration", + "link_fieldname": "virtual_machine" + } + ], + "modified": "2025-01-15 11:53:47.156275", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Virtual Machine", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Admin", + "write": 1 + }, + { + "create": 1, + "read": 1, + "role": "Jcloud Member", + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/virtual_machine/virtual_machine.py b/jcloud/jcloud/pagetype/virtual_machine/virtual_machine.py new file mode 100644 index 0000000..fcae975 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/virtual_machine.py @@ -0,0 +1,1540 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import base64 +import ipaddress +import time + +import boto3 +import botocore +import jingrow +import rq +from jingrow.core.utils import find +from jingrow.desk.utils import slug +from jingrow.model.document import Document +from jingrow.model.naming import make_autoname +from hcloud import APIException, Client +from hcloud.images import Image +from hcloud.servers.domain import ServerCreatePublicNetwork +from oci.core import BlockstorageClient, ComputeClient, VirtualNetworkClient +from oci.core.models import ( + CreateBootVolumeBackupDetails, + CreateVnicDetails, + CreateVolumeBackupDetails, + InstanceOptions, + InstanceSourceViaImageDetails, + LaunchInstanceDetails, + LaunchInstancePlatformConfig, + LaunchInstanceShapeConfigDetails, + UpdateBootVolumeDetails, + UpdateInstanceDetails, + UpdateInstanceShapeConfigDetails, + UpdateVolumeDetails, +) +from oci.exceptions import TransientServiceError + +from jcloud.overrides import get_permission_query_conditions_for_pagetype +from jcloud.utils import log_error +from jcloud.utils.jobs import has_job_timeout_exceeded + +server_doctypes = [ + "Server", + "Database Server", + "Proxy Server", + "Monitor Server", + "Log Server", +] + + +class VirtualMachine(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.virtual_machine_temporary_volume.virtual_machine_temporary_volume import ( + VirtualMachineTemporaryVolume, + ) + from jcloud.jcloud.pagetype.virtual_machine_volume.virtual_machine_volume import VirtualMachineVolume + + availability_zone: DF.Data + cloud_provider: DF.Literal["", "AWS EC2", "OCI", "Hetzner"] + cluster: DF.Link + disk_size: DF.Int + domain: DF.Link + has_data_volume: DF.Check + index: DF.Int + instance_id: DF.Data | None + machine_image: DF.Data | None + machine_type: DF.Data + platform: DF.Literal["x86_64", "arm64"] + private_dns_name: DF.Data | None + private_ip_address: DF.Data | None + public_dns_name: DF.Data | None + public_ip_address: DF.Data | None + ram: DF.Int + region: DF.Link + root_disk_size: DF.Int + security_group_id: DF.Data | None + series: DF.Literal["n", "f", "m", "c", "p", "e", "r"] + ssh_key: DF.Link + status: DF.Literal["Draft", "Pending", "Running", "Stopped", "Terminated"] + subnet_cidr_block: DF.Data | None + subnet_id: DF.Data | None + team: DF.Link | None + temporary_volumes: DF.Table[VirtualMachineTemporaryVolume] + termination_protection: DF.Check + vcpu: DF.Int + virtual_machine_image: DF.Link | None + volumes: DF.Table[VirtualMachineVolume] + # end: auto-generated types + + def autoname(self): + series = f"{self.series}-{slug(self.cluster)}.#####" + self.index = int(make_autoname(series)[-5:]) + self.name = f"{self.series}{self.index}-{slug(self.cluster)}.{self.domain}" + + def after_insert(self): + if self.virtual_machine_image: + image = jingrow.get_pg("Virtual Machine Image", self.virtual_machine_image) + if image.has_data_volume: + # We have two separate volumes for root and data + # Copy their sizes correctly + self.disk_size = max(self.disk_size, image.size) + self.root_disk_size = max(self.root_disk_size, image.root_size) + else: + # We have only one volume. Both root and data are the same + self.disk_size = max(self.disk_size, image.size) + self.root_disk_size = self.disk_size + self.machine_image = image.image_id + self.has_data_volume = image.has_data_volume + if not self.machine_image: + self.machine_image = self.get_latest_ubuntu_image() + self.save() + + def validate(self): + if not self.private_ip_address: + ip = ipaddress.IPv4Interface(self.subnet_cidr_block).ip + index = self.index + 356 + if self.series == "n": + self.private_ip_address = str(ip + index) + else: + offset = ["f", "m", "c", "p", "e", "r"].index(self.series) + self.private_ip_address = str(ip + 256 * (2 * (index // 256) + offset) + (index % 256)) + + def on_trash(self): + snapshots = jingrow.get_all( + "Virtual Disk Snapshot", + {"virtual_machine": self.name, "status": "Unavailable"}, + pluck="name", + ) + for snapshot in snapshots: + jingrow.delete_pg("Virtual Disk Snapshot", snapshot) + + images = jingrow.get_all( + "Virtual Machine Image", + {"virtual_machine": self.name, "status": "Unavailable"}, + pluck="name", + ) + for image in images: + jingrow.delete_pg("Virtual Machine Image", image) + + def on_update(self): + if self.has_value_changed("has_data_volume"): + server = self.get_server() + if server: + server.has_data_volume = self.has_data_volume + server.save() + + @jingrow.whitelist() + def provision(self): + if self.cloud_provider == "AWS EC2": + return self._provision_aws() + if self.cloud_provider == "OCI": + return self._provision_oci() + if self.cloud_provider == "Hetzner": + return self._provision_hetzner() + return None + + def _provision_hetzner(self): + cluster = jingrow.get_pg("Cluster", self.cluster) + server_type = self.client().server_types.get_by_name(self.machine_type) + location = self.client().locations.get_by_name(cluster.region) + network = self.client().networks.get_by_id(cluster.vpc_id) + public_net = ServerCreatePublicNetwork(enable_ipv4=True, enable_ipv6=False) + ssh_key_name = self.ssh_key + ssh_key = self.client().ssh_keys.get_by_name(ssh_key_name) + server_response = self.client().servers.create( + name=f"{self.name}", + server_type=server_type, + image=Image(name="ubuntu-22.04"), + networks=[network], + location=location, + public_net=public_net, + ssh_keys=[ssh_key], + ) + server = server_response.server + # We assing only one private IP, so should be fine + self.private_ip_address = server.private_net[0].ip + + self.public_ip_address = server.public_net.ipv4.ip + + self.instance_id = server.id + + self.status = self.get_hetzner_status_map()[server.status] + + self.save() + + def _provision_aws(self): + additional_volumes = [] + if self.virtual_machine_image: + image = jingrow.get_pg("Virtual Machine Image", self.virtual_machine_image) + if image.has_data_volume: + volume = image.get_data_volume() + additional_volumes.append( + { + "DeviceName": volume.device, + "Ebs": { + "DeleteOnTermination": True, + "VolumeSize": max(self.disk_size, volume.size), + "VolumeType": volume.volume_type, + }, + } + ) + + for index, volume in enumerate(self.volumes, start=len(additional_volumes)): + device_name_index = chr(ord("f") + index) + volume_options = { + "DeviceName": f"/dev/sd{device_name_index}", + "Ebs": { + "DeleteOnTermination": True, + "VolumeSize": volume.size, + "VolumeType": volume.volume_type, + }, + } + if volume.iops: + volume_options["Ebs"]["Iops"] = volume.iops + if volume.throughput: + volume_options["Ebs"]["Throughput"] = volume.throughput + additional_volumes.append(volume_options) + + options = { + "BlockDeviceMappings": [ + *[ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "DeleteOnTermination": True, + "VolumeSize": self.root_disk_size, # This in GB. Fucking AWS! + "VolumeType": "gp3", + }, + } + ], + *additional_volumes, + ], + "ImageId": self.machine_image, + "InstanceType": self.machine_type, + "KeyName": self.ssh_key, + "MaxCount": 1, + "MinCount": 1, + "Monitoring": {"Enabled": False}, + "Placement": { + "AvailabilityZone": self.availability_zone, + "Tenancy": "default", + }, + "NetworkInterfaces": [ + { + "AssociatePublicIpAddress": True, + "DeleteOnTermination": True, + "DeviceIndex": 0, + "PrivateIpAddress": self.private_ip_address, + "Groups": self.get_security_groups(), + "SubnetId": self.subnet_id, + }, + ], + "DisableApiTermination": True, + "InstanceInitiatedShutdownBehavior": "stop", + "TagSpecifications": [ + { + "ResourceType": "instance", + "Tags": [{"Key": "Name", "Value": f"Jingrow - {self.name}"}], + }, + ], + "UserData": self.get_cloud_init() if self.virtual_machine_image else "", + } + if self.machine_type.startswith("t"): + options["CreditSpecification"] = {"CpuCredits": "unlimited" if self.series == "n" else "standard"} + response = self.client().run_instances(**options) + + self.instance_id = response["Instances"][0]["InstanceId"] + self.status = self.get_aws_status_map()[response["Instances"][0]["State"]["Name"]] + self.save() + + def _provision_oci(self): + cluster = jingrow.get_pg("Cluster", self.cluster) + # OCI doesn't have machine types. So let's make up our own. + # nxm = n vcpus and m GB ram + vcpu, ram_in_gbs = map(int, self.machine_type.split("x")) + instance = ( + self.client() + .launch_instance( + LaunchInstanceDetails( + compartment_id=cluster.oci_tenancy, + availability_domain=self.availability_zone, + display_name=self.name, + create_vnic_details=CreateVnicDetails( + private_ip=self.private_ip_address, + assign_private_dns_record=True, + nsg_ids=self.get_security_groups(), + ), + subnet_id=self.subnet_id, + instance_options=InstanceOptions(are_legacy_imds_endpoints_disabled=True), + source_details=InstanceSourceViaImageDetails( + image_id=self.machine_image, + boot_volume_size_in_gbs=max(self.root_disk_size, 50), + boot_volume_vpus_per_gb=30, + ), + shape="VM.Standard.E4.Flex", + shape_config=LaunchInstanceShapeConfigDetails( + ocpus=vcpu // 2, vcpus=vcpu, memory_in_gbs=ram_in_gbs + ), + platform_config=LaunchInstancePlatformConfig( + type="AMD_VM", + ), + is_pv_encryption_in_transit_enabled=True, + metadata={ + "ssh_authorized_keys": jingrow.db.get_value("SSH Key", self.ssh_key, "public_key"), + "user_data": ( + base64.b64encode(self.get_cloud_init().encode()).decode() + if self.virtual_machine_image + else "" + ), + }, + ) + ) + .data + ) + self.instance_id = instance.id + self.status = self.get_oci_status_map()[instance.lifecycle_state] + self.save() + + def get_cloud_init(self): + server = self.get_server() + if not server: + return "" + log_server, kibana_password = server.get_log_server() + cloud_init_template = "jcloud/jcloud/pagetype/virtual_machine/cloud-init.yml.jinja2" + context = { + "server": server, + "machine": self.name, + "ssh_key": jingrow.db.get_value("SSH Key", self.ssh_key, "public_key"), + "agent_password": server.get_password("agent_password"), + "monitoring_password": server.get_monitoring_password(), + "statsd_exporter_service": jingrow.render_template( + "jcloud/playbooks/roles/statsd_exporter/templates/statsd_exporter.service", + {"private_ip": self.private_ip_address}, + is_path=True, + ), + "filebeat_config": jingrow.render_template( + "jcloud/playbooks/roles/filebeat/templates/filebeat.yml", + { + "server_type": server.pagetype, + "server": self.name, + "log_server": log_server, + "kibana_password": kibana_password, + }, + is_path=True, + ), + } + if server.pagetype == "Database Server": + mariadb_context = { + "server_id": server.server_id, + "private_ip": self.private_ip_address, + "ansible_memtotal_mb": jingrow.db.get_value("Server Plan", server.plan, "memory") or 1024, + "mariadb_root_password": server.get_password("mariadb_root_password"), + } + + context.update( + { + "log_requests": True, + "mariadb_config": jingrow.render_template( + "jcloud/playbooks/roles/mariadb/templates/mariadb.cnf", + mariadb_context, + is_path=True, + ), + "mariadb_systemd_config": jingrow.render_template( + "jcloud/playbooks/roles/mariadb_systemd_limits/templates/memory.conf", + mariadb_context, + is_path=True, + ), + "mariadb_root_config": jingrow.render_template( + "jcloud/playbooks/roles/mariadb/templates/my.cnf", + mariadb_context, + is_path=True, + ), + "mariadb_exporter_config": jingrow.render_template( + "jcloud/playbooks/roles/mysqld_exporter/templates/mysqld_exporter.service", + mariadb_context, + is_path=True, + ), + "deadlock_logger_config": jingrow.render_template( + "jcloud/playbooks/roles/deadlock_logger/templates/deadlock_logger.service", + mariadb_context, + is_path=True, + ), + } + ) + + return jingrow.render_template(cloud_init_template, context, is_path=True) + + def get_server(self): + for pagetype in server_doctypes: + server = jingrow.db.get_value(pagetype, {"virtual_machine": self.name}, "name") + if server: + return jingrow.get_pg(pagetype, server) + return None + + def get_hetzner_status_map(self): + # Hetzner has not status for Terminating or Terminated. Just returns a server not found. + return { + "running": "Running", + "initializing": "Pending", + "starting": "Pending", + "stopping": "Pending", + "off": "Stopped", + "deleting": "Pending", + "migrating": "Pending", + "rebuilding": "Pending", + "unknown": "Pending", + } + + def get_aws_status_map(self): + return { + "pending": "Pending", + "running": "Running", + "shutting-down": "Pending", + "stopping": "Pending", + "stopped": "Stopped", + "terminated": "Terminated", + } + + def get_oci_status_map(self): + return { + "MOVING": "Pending", + "PROVISIONING": "Pending", + "RUNNING": "Running", + "STARTING": "Pending", + "STOPPING": "Pending", + "STOPPED": "Stopped", + "CREATING_IMAGE": "Pending", + "TERMINATING": "Pending", + "TERMINATED": "Terminated", + } + + def get_latest_ubuntu_image(self): + if self.cloud_provider == "AWS EC2": + architecture = {"x86_64": "amd64", "arm64": "arm64"}[self.platform] + return self.client("ssm").get_parameter( + Name=f"/aws/service/canonical/ubuntu/server/20.04/stable/current/{architecture}/hvm/ebs-gp2/ami-id" + )["Parameter"]["Value"] + if self.cloud_provider == "OCI": + cluster = jingrow.get_pg("Cluster", self.cluster) + client = ComputeClient(cluster.get_oci_config()) + images = client.list_images( + compartment_id=cluster.oci_tenancy, + operating_system="Canonical Ubuntu", + operating_system_version="20.04", + shape="VM.Standard3.Flex", + lifecycle_state="AVAILABLE", + ).data + return images[0].id + return None + + @jingrow.whitelist() + def reboot(self): + if self.cloud_provider == "AWS EC2": + self.client().reboot_instances(InstanceIds=[self.instance_id]) + elif self.cloud_provider == "OCI": + self.client().instance_action(instance_id=self.instance_id, action="RESET") + self.sync() + + @jingrow.whitelist() + def increase_disk_size(self, volume_id=None, increment=50): + if not increment: + return + if not volume_id: + volume_id = self.volumes[0].volume_id + + volume = find(self.volumes, lambda v: v.volume_id == volume_id) + volume.size += int(increment) + self.disk_size = self.get_data_volume().size + self.root_disk_size = self.get_root_volume().size + volume.last_updated_at = jingrow.utils.now_datetime() + if self.cloud_provider == "AWS EC2": + self.client().modify_volume(VolumeId=volume.volume_id, Size=volume.size) + elif self.cloud_provider == "OCI": + if ".bootvolume." in volume.volume_id: + self.client(BlockstorageClient).update_boot_volume( + boot_volume_id=volume.volume_id, + update_boot_volume_details=UpdateBootVolumeDetails(size_in_gbs=volume.size), + ) + else: + self.client(BlockstorageClient).update_volume( + volume_id=volume.volume_id, + update_volume_details=UpdateVolumeDetails(size_in_gbs=volume.size), + ) + self.save() + + def get_volumes(self): + if self.cloud_provider == "AWS EC2": + response = self.client().describe_volumes( + Filters=[{"Name": "attachment.instance-id", "Values": [self.instance_id]}] + ) + return response["Volumes"] + if self.cloud_provider == "OCI": + cluster = jingrow.get_pg("Cluster", self.cluster) + return ( + self.client() + .list_boot_volume_attachments( + compartment_id=cluster.oci_tenancy, + availability_domain=self.availability_zone, + instance_id=self.instance_id, + ) + .data + + self.client() + .list_volume_attachments( + compartment_id=cluster.oci_tenancy, + instance_id=self.instance_id, + ) + .data + ) + return None + + def convert_to_gp3(self): + for volume in self.volumes: + if volume.volume_type != "gp3": + volume.volume_type = "gp3" + volume.iops = max(3000, volume.iops) + volume.throughput = 250 if volume.size > 340 else 125 + self.client().modify_volume( + VolumeId=volume.volume_id, + VolumeType=volume.volume_type, + Iops=volume.iops, + Throughput=volume.throughput, + ) + self.save() + + @jingrow.whitelist() + def sync(self, *args, **kwargs): + try: + jingrow.db.get_value(self.pagetype, self.name, "status", for_update=True) + except jingrow.QueryTimeoutError: # lock wait timeout + return None + if self.cloud_provider == "AWS EC2": + return self._sync_aws(*args, **kwargs) + if self.cloud_provider == "OCI": + return self._sync_oci(*args, **kwargs) + if self.cloud_provider == "Hetzner": + return self._sync_hetzner(*args, **kwargs) + return None + + def _sync_hetzner(self, server_instance=None): + is_deleted = False + if not server_instance: + try: + server_instance = self.client().servers.get_by_id(self.instance_id) + except APIException: + is_deleted = True + if server_instance and not is_deleted: + # cluster: Document = jingrow.get_pg("Cluster", self.cluster) + self.status = self.get_hetzner_status_map()[server_instance.status] + self.machine_type = server_instance.server_type.name + self.private_ip_address = server_instance.private_net[0].ip + self.public_ip_address = server_instance.public_net.ipv4.ip + else: + self.status = "Terminated" + self.save() + + def _sync_oci(self, instance=None): # noqa: C901 + if not instance: + instance = self.client().get_instance(instance_id=self.instance_id).data + if instance and instance.lifecycle_state != "TERMINATED": + cluster = jingrow.get_pg("Cluster", self.cluster) + + self.status = self.get_oci_status_map()[instance.lifecycle_state] + + self.ram = instance.shape_config.memory_in_gbs * 1024 + self.vcpu = instance.shape_config.vcpus + self.machine_type = f"{int(self.vcpu)}x{int(instance.shape_config.memory_in_gbs)}" + + for vnic_attachment in ( + self.client() + .list_vnic_attachments(compartment_id=cluster.oci_tenancy, instance_id=self.instance_id) + .data + ): + try: + vnic = self.client(VirtualNetworkClient).get_vnic(vnic_id=vnic_attachment.vnic_id).data + self.public_ip_address = vnic.public_ip + except Exception: + log_error( + title="OCI VNIC Fetch Error", + virtual_machine=self.name, + vnic_attachment=vnic_attachment, + ) + + available_volumes = [] + for volume in self.get_volumes(): + try: + if hasattr(volume, "volume_id"): + volume = self.client(BlockstorageClient).get_volume(volume_id=volume.volume_id).data + else: + volume = ( + self.client(BlockstorageClient) + .get_boot_volume(boot_volume_id=volume.boot_volume_id) + .data + ) + existing_volume = find(self.volumes, lambda v: v.volume_id == volume.id) + if existing_volume: + row = existing_volume + else: + row = jingrow._dict() + row.volume_id = volume.id + row.size = volume.size_in_gbs + + vpus = volume.vpus_per_gb + # Reference: https://docs.oracle.com/en-us/iaas/Content/Block/Concepts/blockvolumeperformance.htm + row.iops = min(1.5 * vpus + 45, 2500 * vpus) * row.size + row.throughput = min(12 * vpus + 360, 20 * vpus + 280) * row.size // 1000 + + if row.volume_id: + available_volumes.append(row.volume_id) + + if not existing_volume and row.volume_id: + self.append("volumes", row) + except Exception: + log_error( + title="OCI Volume Fetch Error", + virtual_machine=self.name, + volume=volume, + ) + if self.volumes: + self.disk_size = self.get_data_volume().size + self.root_disk_size = self.get_root_volume().size + + for volume in list(self.volumes): + if volume.volume_id not in available_volumes: + self.remove(volume) + + else: + self.status = "Terminated" + self.save() + self.update_servers() + + def _sync_aws(self, response=None): # noqa: C901 + if not response: + try: + response = self.client().describe_instances(InstanceIds=[self.instance_id]) + except botocore.exceptions.ClientError as e: + if e.response.get("Error", {}).get("Code") == "InvalidInstanceID.NotFound": + response = {"Reservations": []} + if response["Reservations"]: + instance = response["Reservations"][0]["Instances"][0] + + self.status = self.get_aws_status_map()[instance["State"]["Name"]] + self.machine_type = instance.get("InstanceType") + + self.public_ip_address = instance.get("PublicIpAddress") + self.private_ip_address = instance.get("PrivateIpAddress") + + self.public_dns_name = instance.get("PublicDnsName") + self.private_dns_name = instance.get("PrivateDnsName") + self.platform = instance.get("Architecture", "x86_64") + + attached_volumes = [] + attached_devices = [] + for volume_index, volume in enumerate(self.get_volumes(), start=1): # idx starts from 1 + existing_volume = find(self.volumes, lambda v: v.volume_id == volume["VolumeId"]) + if existing_volume: + row = existing_volume + else: + row = jingrow._dict() + row.volume_id = volume["VolumeId"] + attached_volumes.append(row.volume_id) + row.volume_type = volume["VolumeType"] + row.size = volume["Size"] + row.iops = volume["Iops"] + row.device = volume["Attachments"][0]["Device"] + attached_devices.append(row.device) + + if "Throughput" in volume: + row.throughput = volume["Throughput"] + + row.idx = volume_index + if not existing_volume: + self.append("volumes", row) + + self.disk_size = self.get_data_volume().size + self.root_disk_size = self.get_root_volume().size + + for volume in list(self.volumes): + if volume.volume_id not in attached_volumes: + self.remove(volume) + + for volume in list(self.temporary_volumes): + if volume.device not in attached_devices: + self.remove(volume) + + self.termination_protection = self.client().describe_instance_attribute( + InstanceId=self.instance_id, Attribute="disableApiTermination" + )["DisableApiTermination"]["Value"] + + instance_type_response = self.client().describe_instance_types(InstanceTypes=[self.machine_type]) + self.ram = instance_type_response["InstanceTypes"][0]["MemoryInfo"]["SizeInMiB"] + self.vcpu = instance_type_response["InstanceTypes"][0]["VCpuInfo"]["DefaultVCpus"] + else: + self.status = "Terminated" + self.save() + self.update_servers() + + def get_root_volume(self): + if len(self.volumes) == 1: + return self.volumes[0] + + ROOT_VOLUME_FILTERS = { + "AWS EC2": lambda v: v.device == "/dev/sda1", + "OCI": lambda v: ".bootvolume." in v.volume_id, + } + root_volume_filter = ROOT_VOLUME_FILTERS.get(self.cloud_provider) + volume = find(self.volumes, root_volume_filter) + if volume: # Un-provisioned machines might not have any volumes + return volume + return jingrow._dict({"size": 0}) + + def get_data_volume(self): + if not self.has_data_volume: + return self.get_root_volume() + + if len(self.volumes) == 1: + return self.volumes[0] + + temporary_volume_devices = [x.device for x in self.temporary_volumes] + + DATA_VOLUME_FILTERS = { + "AWS EC2": lambda v: v.device != "/dev/sda1" and v.device not in temporary_volume_devices, + "OCI": lambda v: ".bootvolume." not in v.volume_id and v.device not in temporary_volume_devices, + } + data_volume_filter = DATA_VOLUME_FILTERS.get(self.cloud_provider) + volume = find(self.volumes, data_volume_filter) + if volume: # Un-provisioned machines might not have any volumes + return volume + return jingrow._dict({"size": 0}) + + def update_servers(self): + status_map = { + "Pending": "Pending", + "Running": "Active", + "Terminated": "Archived", + "Stopped": "Pending", + } + for pagetype in server_doctypes: + server = jingrow.get_all(pagetype, {"virtual_machine": self.name}, pluck="name") + if server: + server = server[0] + jingrow.db.set_value(pagetype, server, "ip", self.public_ip_address) + if pagetype in ["Server", "Database Server"]: + jingrow.db.set_value(pagetype, server, "ram", self.ram) + if self.public_ip_address and self.has_value_changed("public_ip_address"): + jingrow.get_pg(pagetype, server).create_dns_record() + jingrow.db.set_value(pagetype, server, "status", status_map[self.status]) + + def update_name_tag(self, name): + if self.cloud_provider == "AWS EC2": + self.client().create_tags( + Resources=[self.instance_id], + Tags=[ + {"Key": "Name", "Value": name}, + ], + ) + + @jingrow.whitelist() + def create_image(self, public=True): + image = jingrow.get_pg( + { + "pagetype": "Virtual Machine Image", + "virtual_machine": self.name, + "public": public, + "has_data_volume": self.has_data_volume, + } + ).insert() + return image.name + + @jingrow.whitelist() + def create_snapshots(self, exclude_boot_volume=False, created_for_site_update=False): + """ + exclude_boot_volume is applicable only for Servers with data volume + """ + if not self.has_data_volume: + exclude_boot_volume = False + + # Store the newly created snapshots reference in the flags + # So that, we can get the correct reference of snapshots created in current session + self.flags.created_snapshots = [] + if self.cloud_provider == "AWS EC2": + self._create_snapshots_aws(exclude_boot_volume, created_for_site_update) + elif self.cloud_provider == "OCI": + self._create_snapshots_oci(exclude_boot_volume) + + def _create_snapshots_aws(self, exclude_boot_volume: bool, created_for_site_update: bool): + response = self.client().create_snapshots( + InstanceSpecification={"InstanceId": self.instance_id, "ExcludeBootVolume": exclude_boot_volume}, + Description=f"Jingrow - {self.name} - {jingrow.utils.now()}", + TagSpecifications=[ + { + "ResourceType": "snapshot", + "Tags": [{"Key": "Name", "Value": f"Jingrow - {self.name} - {jingrow.utils.now()}"}], + }, + ], + ) + for snapshot in response.get("Snapshots", []): + try: + pg = jingrow.get_pg( + { + "pagetype": "Virtual Disk Snapshot", + "virtual_machine": self.name, + "snapshot_id": snapshot["SnapshotId"], + "created_for_site_update": created_for_site_update, + } + ).insert() + self.flags.created_snapshots.append(pg.name) + except Exception: + log_error(title="Virtual Disk Snapshot Error", virtual_machine=self.name, snapshot=snapshot) + + def _create_snapshots_oci(self, exclude_boot_volume: bool): + for volume in self.volumes: + try: + if ".bootvolume." in volume.volume_id: + if exclude_boot_volume: + continue + snapshot = ( + self.client(BlockstorageClient) + .create_boot_volume_backup( + CreateBootVolumeBackupDetails( + boot_volume_id=volume.volume_id, + type="INCREMENTAL", + display_name=f"Jingrow - {self.name} - {volume.name} - {jingrow.utils.now()}", + ) + ) + .data + ) + else: + snapshot = ( + self.client(BlockstorageClient) + .create_volume_backup( + CreateVolumeBackupDetails( + volume_id=volume.volume_id, + type="INCREMENTAL", + display_name=f"Jingrow - {self.name} - {volume.name} - {jingrow.utils.now()}", + ) + ) + .data + ) + pg = jingrow.get_pg( + { + "pagetype": "Virtual Disk Snapshot", + "virtual_machine": self.name, + "snapshot_id": snapshot.id, + } + ).insert() + self.flags.created_snapshots.append(pg.name) + except TransientServiceError: + # We've hit OCI rate limit for creating snapshots + # Let's try again later + pass + except Exception: + log_error(title="Virtual Disk Snapshot Error", virtual_machine=self.name, snapshot=snapshot) + + @jingrow.whitelist() + def disable_termination_protection(self): + if self.cloud_provider == "AWS EC2": + self.client().modify_instance_attribute( + InstanceId=self.instance_id, DisableApiTermination={"Value": False} + ) + self.sync() + + @jingrow.whitelist() + def enable_termination_protection(self): + if self.cloud_provider == "AWS EC2": + self.client().modify_instance_attribute( + InstanceId=self.instance_id, DisableApiTermination={"Value": True} + ) + self.sync() + + @jingrow.whitelist() + def start(self): + if self.cloud_provider == "AWS EC2": + self.client().start_instances(InstanceIds=[self.instance_id]) + elif self.cloud_provider == "OCI": + self.client().instance_action(instance_id=self.instance_id, action="START") + self.sync() + + @jingrow.whitelist() + def stop(self, force=False): + if self.cloud_provider == "AWS EC2": + self.client().stop_instances(InstanceIds=[self.instance_id], Force=bool(force)) + elif self.cloud_provider == "OCI": + self.client().instance_action(instance_id=self.instance_id, action="STOP") + self.sync() + + @jingrow.whitelist() + def force_stop(self): + self.stop(force=True) + + @jingrow.whitelist() + def force_terminate(self): + if not jingrow.conf.developer_mode: + return + if self.cloud_provider == "AWS EC2": + self.client().modify_instance_attribute( + InstanceId=self.instance_id, DisableApiTermination={"Value": False} + ) + self.client().terminate_instances(InstanceIds=[self.instance_id]) + + @jingrow.whitelist() + def terminate(self): + if self.cloud_provider == "AWS EC2": + self.client().terminate_instances(InstanceIds=[self.instance_id]) + elif self.cloud_provider == "OCI": + self.client().terminate_instance(instance_id=self.instance_id) + + @jingrow.whitelist() + def resize(self, machine_type): + if self.cloud_provider == "AWS EC2": + self.client().modify_instance_attribute( + InstanceId=self.instance_id, + InstanceType={"Value": machine_type}, + ) + elif self.cloud_provider == "OCI": + vcpu, ram_in_gbs = map(int, machine_type.split("x")) + self.client().update_instance( + self.instance_id, + UpdateInstanceDetails( + shape_config=UpdateInstanceShapeConfigDetails( + ocpus=vcpu // 2, vcpus=vcpu, memory_in_gbs=ram_in_gbs + ) + ), + ) + self.machine_type = machine_type + self.save() + + @jingrow.whitelist() + def get_ebs_performance(self): + if self.cloud_provider == "AWS EC2": + volume = self.volumes[0] + return volume.iops, volume.throughput + return None + + @jingrow.whitelist() + def update_ebs_performance(self, volume_id, iops, throughput): + if self.cloud_provider == "AWS EC2": + volume = find(self.volumes, lambda v: v.volume_id == volume_id) + new_iops = int(iops) or volume.iops + new_throughput = int(throughput) or volume.throughput + self.client().modify_volume( + VolumeId=volume.volume_id, + Iops=new_iops, + Throughput=new_throughput, + ) + self.sync() + + @jingrow.whitelist() + def get_oci_volume_performance(self): + if self.cloud_provider == "OCI": + volume = self.volumes[0] + return ((volume.iops / volume.size) - 45) / 1.5 + return None + + @jingrow.whitelist() + def update_oci_volume_performance(self, vpus): + if self.cloud_provider == "OCI": + volume = self.volumes[0] + if ".bootvolume." in volume.volume_id: + self.client(BlockstorageClient).update_boot_volume( + boot_volume_id=volume.volume_id, + update_boot_volume_details=UpdateBootVolumeDetails(vpus_per_gb=int(vpus)), + ) + else: + self.client(BlockstorageClient).update_volume( + volume_id=volume.volume_id, + update_volume_details=UpdateVolumeDetails(vpus_per_gb=int(vpus)), + ) + self.sync() + + def client(self, client_type=None): + cluster = jingrow.get_pg("Cluster", self.cluster) + if self.cloud_provider == "AWS EC2": + return boto3.client( + client_type or "ec2", + region_name=self.region, + aws_access_key_id=cluster.aws_access_key_id, + aws_secret_access_key=cluster.get_password("aws_secret_access_key"), + ) + if self.cloud_provider == "OCI": + return (client_type or ComputeClient)(cluster.get_oci_config()) + if self.cloud_provider == "Hetzner": + settings = jingrow.get_single("Jcloud Settings") + api_token = settings.get_password("hetzner_api_token") + return Client(token=api_token) + + return None + + @jingrow.whitelist() + def create_server(self): + document = { + "pagetype": "Server", + "hostname": f"{self.series}{self.index}-{slug(self.cluster)}", + "domain": self.domain, + "cluster": self.cluster, + "provider": self.cloud_provider, + "virtual_machine": self.name, + "team": self.team, + } + + if self.virtual_machine_image: + document["is_server_prepared"] = True + document["is_server_setup"] = True + document["is_server_renamed"] = True + document["is_upstream_setup"] = True + + return jingrow.get_pg(document).insert() + + @jingrow.whitelist() + def create_database_server(self): + document = { + "pagetype": "Database Server", + "hostname": f"{self.series}{self.index}-{slug(self.cluster)}", + "domain": self.domain, + "cluster": self.cluster, + "provider": self.cloud_provider, + "virtual_machine": self.name, + "server_id": self.index, + "is_primary": True, + "team": self.team, + } + + if self.virtual_machine_image: + document["is_server_prepared"] = True + document["is_server_setup"] = True + document["is_server_renamed"] = True + document["mariadb_root_password"] = jingrow.get_pg( + "Virtual Machine Image", self.virtual_machine_image + ).get_password("mariadb_root_password") + + return jingrow.get_pg(document).insert() + + @jingrow.whitelist() + def create_proxy_server(self): + document = { + "pagetype": "Proxy Server", + "hostname": f"{self.series}{self.index}-{slug(self.cluster)}", + "domain": self.domain, + "cluster": self.cluster, + "provider": self.cloud_provider, + "virtual_machine": self.name, + "team": self.team, + } + if self.virtual_machine_image: + document["is_server_setup"] = True + + return jingrow.get_pg(document).insert() + + @jingrow.whitelist() + def create_monitor_server(self): + document = { + "pagetype": "Monitor Server", + "hostname": f"{self.series}{self.index}-{slug(self.cluster)}", + "domain": self.domain, + "cluster": self.cluster, + "provider": self.cloud_provider, + "virtual_machine": self.name, + "team": self.team, + } + if self.virtual_machine_image: + document["is_server_setup"] = True + + return jingrow.get_pg(document).insert() + + @jingrow.whitelist() + def create_log_server(self): + document = { + "pagetype": "Log Server", + "hostname": f"{self.series}{self.index}-{slug(self.cluster)}", + "domain": self.domain, + "cluster": self.cluster, + "provider": self.cloud_provider, + "virtual_machine": self.name, + "team": self.team, + } + if self.virtual_machine_image: + document["is_server_setup"] = True + + return jingrow.get_pg(document).insert() + + @jingrow.whitelist() + def create_registry_server(self): + document = { + "pagetype": "Registry Server", + "hostname": f"{self.series}{self.index}-{slug(self.cluster)}", + "domain": self.domain, + "cluster": self.cluster, + "provider": "AWS EC2", + "virtual_machine": self.name, + "team": self.team, + } + if self.virtual_machine_image: + document["is_server_setup"] = True + + return jingrow.get_pg(document).insert() + + def get_security_groups(self): + groups = [self.security_group_id] + if self.series == "n": + groups.append(jingrow.db.get_value("Cluster", self.cluster, "proxy_security_group_id")) + return groups + + @jingrow.whitelist() + def get_serial_console_credentials(self): + client = self.client("ec2-instance-connect") + client.send_serial_console_ssh_public_key( + InstanceId=self.instance_id, + SSHPublicKey=jingrow.db.get_value("SSH Key", self.ssh_key, "public_key"), + ) + serial_console_endpoint = AWS_SERIAL_CONSOLE_ENDPOINT_MAP[self.region] + username = f"{self.instance_id}.port0" + host = serial_console_endpoint["endpoint"] + return { + "username": username, + "host": host, + "fingerprint": serial_console_endpoint["fingerprint"], + "command": f"ssh {username}@{host}", + } + + @jingrow.whitelist() + def reboot_with_serial_console(self): + if self.cloud_provider == "AWS EC2": + self.get_server().reboot_with_serial_console() + self.sync() + + @classmethod + def bulk_sync_aws(cls): + for cluster in jingrow.get_all( + "Virtual Machine", + ["cluster", "cloud_provider", "max(`index`) as max_index"], + { + "status": ("not in", ("Terminated", "Draft")), + "cloud_provider": "AWS EC2", + }, + group_by="cluster", + ): + CHUNK_SIZE = 25 # Each call will pick up ~50 machines (2 x CHUNK_SIZE) + # Generate closed bounds for 25 indexes at a time + # (1, 25), (26, 50), (51, 75), ... + # We might have uneven chunks because of missing indexes + chunks = [(ii, ii + CHUNK_SIZE - 1) for ii in range(1, cluster.max_index, CHUNK_SIZE)] + for start, end in chunks: + # Pick a random machine + # TODO: This probably should be a method on the Cluster + machines = cls._get_active_machines_within_chunk_range( + cluster.cloud_provider, cluster.cluster, start, end + ) + if not machines: + # There might not be any running machines in the chunk range + continue + + jingrow.enqueue_pg( + "Virtual Machine", + machines[0].name, + method="bulk_sync_aws_cluster", + start=start, + end=end, + queue="sync", + job_id=f"bulk_sync_aws:{cluster.cluster}:{start}-{end}", + deduplicate=True, + ) + + def bulk_sync_aws_cluster(self, start, end): + client = self.client() + machines = self.__class__._get_active_machines_within_chunk_range( + self.cloud_provider, self.cluster, start, end + ) + instance_ids = [machine.instance_id for machine in machines] + response = client.describe_instances(Filters=[{"Name": "instance-id", "Values": instance_ids}]) + for reservation in response["Reservations"]: + for instance in reservation["Instances"]: + machine: VirtualMachine = jingrow.get_pg( + "Virtual Machine", {"instance_id": instance["InstanceId"]} + ) + try: + machine.sync({"Reservations": [{"Instances": [instance]}]}) + jingrow.db.commit() # release lock + except Exception: + log_error("Virtual Machine Sync Error", virtual_machine=machine.name) + jingrow.db.rollback() + + @classmethod + def _get_active_machines_within_chunk_range(cls, provider, cluster, start, end): + return jingrow.get_all( + "Virtual Machine", + fields=["name", "instance_id"], + filters=[ + ["status", "not in", ("Terminated", "Draft")], + ["cloud_provider", "=", provider], + ["cluster", "=", cluster], + ["instance_id", "is", "set"], + ["index", ">=", start], + ["index", "<=", end], + ], + ) + + @classmethod + def bulk_sync_oci(cls): + for cluster in jingrow.get_all( + "Virtual Machine", + ["cluster", "cloud_provider", "max(`index`) as max_index"], + { + "status": ("not in", ("Terminated", "Draft")), + "cloud_provider": "OCI", + }, + group_by="cluster", + ): + CHUNK_SIZE = 15 # Each call will pick up ~30 machines (2 x CHUNK_SIZE) + # Generate closed bounds for 15 indexes at a time + # (1, 15), (16, 30), (31, 45), ... + # We might have uneven chunks because of missing indexes + chunks = [(ii, ii + CHUNK_SIZE - 1) for ii in range(1, cluster.max_index, CHUNK_SIZE)] + for start, end in chunks: + # Pick a random machine + # TODO: This probably should be a method on the Cluster + machines = cls._get_active_machines_within_chunk_range( + cluster.cloud_provider, cluster.cluster, start, end + ) + if not machines: + # There might not be any running machines in the chunk range + continue + + jingrow.enqueue_pg( + "Virtual Machine", + machines[0].name, + method="bulk_sync_oci_cluster", + start=start, + end=end, + queue="sync", + job_id=f"bulk_sync_oci:{cluster.cluster}:{start}-{end}", + deduplicate=True, + ) + + def bulk_sync_oci_cluster(self, start, end): + cluster = jingrow.get_pg("Cluster", self.cluster) + machines = self.__class__._get_active_machines_within_chunk_range( + self.cloud_provider, self.cluster, start, end + ) + instance_ids = set([machine.instance_id for machine in machines]) + response = self.client().list_instances(compartment_id=cluster.oci_tenancy).data + for instance in response: + if instance.id not in instance_ids: + continue + machine: VirtualMachine = jingrow.get_pg("Virtual Machine", {"instance_id": instance.id}) + if has_job_timeout_exceeded(): + return + try: + machine.sync(instance) + jingrow.db.commit() # release lock + except rq.timeouts.JobTimeoutException: + return + except Exception: + log_error("Virtual Machine Sync Error", virtual_machine=machine.name) + jingrow.db.rollback() + + def disable_delete_on_termination_for_all_volumes(self): + attached_volumes = self.client().describe_instance_attribute( + InstanceId=self.instance_id, Attribute="blockDeviceMapping" + ) + + modified_volumes = [] + for volume in attached_volumes["BlockDeviceMappings"]: + volume["Ebs"]["DeleteOnTermination"] = False + volume["Ebs"].pop("AttachTime", None) + volume["Ebs"].pop("Status", None) + modified_volumes.append(volume) + + self.client().modify_instance_attribute( + InstanceId=self.instance_id, BlockDeviceMappings=modified_volumes + ) + + @jingrow.whitelist() + def convert_to_arm(self, virtual_machine_image, machine_type): + return jingrow.new_pg( + "Virtual Machine Migration", + virtual_machine=self.name, + virtual_machine_image=virtual_machine_image, + machine_type=machine_type, + ).insert() + + @jingrow.whitelist() + def attach_new_volume(self, size, iops=None, throughput=None): + if self.cloud_provider != "AWS EC2": + return None + volume_options = { + "AvailabilityZone": self.availability_zone, + "Size": size, + "VolumeType": "gp3", + "TagSpecifications": [ + { + "ResourceType": "volume", + "Tags": [{"Key": "Name", "Value": f"Jingrow - {self.name}"}], + }, + ], + } + if iops: + volume_options["Iops"] = iops + if throughput: + volume_options["Throughput"] = throughput + volume_id = self.client().create_volume(**volume_options)["VolumeId"] + self.wait_for_volume_to_be_available(volume_id) + self.attach_volume(volume_id) + return volume_id + + def wait_for_volume_to_be_available(self, volume_id): + # AWS EC2 specific + while self.get_state_of_volume(volume_id) != "available": + time.sleep(1) + + def get_state_of_volume(self, volume_id): + if self.cloud_provider != "AWS EC2": + raise NotImplementedError + try: + # AWS EC2 specific + # https://docs.aws.amazon.com/ebs/latest/userguide/ebs-describing-volumes.html + return self.client().describe_volumes(VolumeIds=[volume_id])["Volumes"][0]["State"] + except botocore.exceptions.ClientError as e: + if e.response.get("Error", {}).get("Code") == "InvalidVolume.NotFound": + return "deleted" + + def get_volume_modifications(self, volume_id): + if self.cloud_provider != "AWS EC2": + raise NotImplementedError + + # AWS EC2 specific https://docs.aws.amazon.com/ebs/latest/userguide/monitoring-volume-modifications.html + + try: + return self.client().describe_volumes_modifications(VolumeIds=[volume_id])[ + "VolumesModifications" + ][0] + except botocore.exceptions.ClientError as e: + if e.response.get("Error", {}).get("Code") == "InvalidVolumeModification.NotFound": + return None + + def attach_volume(self, volume_id) -> str: + if self.cloud_provider != "AWS EC2": + raise NotImplementedError + # Attach a volume to the instance and return the device name + device_name = self.get_next_volume_device_name() + self.client().attach_volume( + Device=device_name, + InstanceId=self.instance_id, + VolumeId=volume_id, + ) + # add the volume to the list of temporary volumes + self.append("temporary_volumes", {"device": device_name}) + self.save() + # sync + self.sync() + return device_name + + def get_next_volume_device_name(self): + # Hold the lock, so that we dont allocate same device name to multiple volumes + jingrow.db.get_value(self.pagetype, self.name, "status", for_update=True) + # First volume starts from /dev/sdf + used_devices = {v.device for v in self.volumes} | {v.device for v in self.temporary_volumes} + for i in range(5, 26): # 'f' to 'z' + device_name = f"/dev/sd{chr(ord('a') + i)}" + if device_name not in used_devices: + return device_name + jingrow.throw("No device name available for new volume") + return None + + @jingrow.whitelist() + def detach(self, volume_id): + volume = find(self.volumes, lambda v: v.volume_id == volume_id) + if not volume: + return False + self.client().detach_volume( + Device=volume.device, InstanceId=self.instance_id, VolumeId=volume.volume_id + ) + self.sync() + return True + + @jingrow.whitelist() + def delete_volume(self, volume_id): + if self.detach(volume_id): + self.wait_for_volume_to_be_available(volume_id) + self.client().delete_volume(VolumeId=volume_id) + self.sync() + + +get_permission_query_conditions = get_permission_query_conditions_for_pagetype("Virtual Machine") + + +@jingrow.whitelist() +def sync_virtual_machines(): + VirtualMachine.bulk_sync_aws() + VirtualMachine.bulk_sync_oci() + + +def snapshot_virtual_machines(): + machines = jingrow.get_all("Virtual Machine", {"status": "Running"}) + for machine in machines: + # Skip if a snapshot has already been created today + if jingrow.get_all( + "Virtual Disk Snapshot", + {"virtual_machine": machine.name, "creation": (">=", jingrow.utils.today())}, + limit=1, + ): + continue + try: + jingrow.get_pg("Virtual Machine", machine.name).create_snapshots() + jingrow.db.commit() + except Exception: + jingrow.db.rollback() + log_error(title="Virtual Machine Snapshot Error", virtual_machine=machine.name) + + +AWS_SERIAL_CONSOLE_ENDPOINT_MAP = { + "us-east-2": { + "endpoint": "serial-console.ec2-instance-connect.us-east-2.aws", + "fingerprint": "SHA256:EhwPkTzRtTY7TRSzz26XbB0/HvV9jRM7mCZN0xw/d/0", + }, + "us-east-1": { + "endpoint": "serial-console.ec2-instance-connect.us-east-1.aws", + "fingerprint": "SHA256:dXwn5ma/xadVMeBZGEru5l2gx+yI5LDiJaLUcz0FMmw", + }, + "us-west-1": { + "endpoint": "serial-console.ec2-instance-connect.us-west-1.aws", + "fingerprint": "SHA256:OHldlcMET8u7QLSX3jmRTRAPFHVtqbyoLZBMUCqiH3Y", + }, + "us-west-2": { + "endpoint": "serial-console.ec2-instance-connect.us-west-2.aws", + "fingerprint": "SHA256:EMCIe23TqKaBI6yGHainqZcMwqNkDhhAVHa1O2JxVUc", + }, + "af-south-1": { + "endpoint": "ec2-serial-console.af-south-1.api.aws", + "fingerprint": "SHA256:RMWWZ2fVePeJUqzjO5jL2KIgXsczoHlz21Ed00biiWI", + }, + "ap-east-1": { + "endpoint": "ec2-serial-console.ap-east-1.api.aws", + "fingerprint": "SHA256:T0Q1lpiXxChoZHplnAkjbP7tkm2xXViC9bJFsjYnifk", + }, + "ap-south-2": { + "endpoint": "ec2-serial-console.ap-south-2.api.aws", + "fingerprint": "SHA256:WJgPBSwV4/shN+OPITValoewAuYj15DVW845JEhDKRs", + }, + "ap-southeast-3": { + "endpoint": "ec2-serial-console.ap-southeast-3.api.aws", + "fingerprint": "SHA256:5ZwgrCh+lfns32XITqL/4O0zIfbx4bZgsYFqy3o8mIk", + }, + "ap-southeast-4": { + "endpoint": "ec2-serial-console.ap-southeast-4.api.aws", + "fingerprint": "SHA256:Avaq27hFgLvjn5gTSShZ0oV7h90p0GG46wfOeT6ZJvM", + }, + "ap-south-1": { + "endpoint": "serial-console.ec2-instance-connect.ap-south-1.aws", + "fingerprint": "SHA256:oBLXcYmklqHHEbliARxEgH8IsO51rezTPiSM35BsU40", + }, + "ap-northeast-3": { + "endpoint": "ec2-serial-console.ap-northeast-3.api.aws", + "fingerprint": "SHA256:Am0/jiBKBnBuFnHr9aXsgEV3G8Tu/vVHFXE/3UcyjsQ", + }, + "ap-northeast-2": { + "endpoint": "serial-console.ec2-instance-connect.ap-northeast-2.aws", + "fingerprint": "SHA256:FoqWXNX+DZ++GuNTztg9PK49WYMqBX+FrcZM2dSrqrI", + }, + "ap-southeast-1": { + "endpoint": "serial-console.ec2-instance-connect.ap-southeast-1.aws", + "fingerprint": "SHA256:PLFNn7WnCQDHx3qmwLu1Gy/O8TUX7LQgZuaC6L45CoY", + }, + "ap-southeast-2": { + "endpoint": "serial-console.ec2-instance-connect.ap-southeast-2.aws", + "fingerprint": "SHA256:yFvMwUK9lEUQjQTRoXXzuN+cW9/VSe9W984Cf5Tgzo4", + }, + "ap-northeast-1": { + "endpoint": "serial-console.ec2-instance-connect.ap-northeast-2.aws", + "fingerprint": "SHA256:RQfsDCZTOfQawewTRDV1t9Em/HMrFQe+CRlIOT5um4k", + }, + "ca-central-1": { + "endpoint": "serial-console.ec2-instance-connect.ca-central-1.aws", + "fingerprint": "SHA256:P2O2jOZwmpMwkpO6YW738FIOTHdUTyEv2gczYMMO7s4", + }, + "cn-north-1": { + "endpoint": "ec2-serial-console.cn-north-1.api.amazonwebservices.com.cn", + "fingerprint": "SHA256:2gHVFy4H7uU3+WaFUxD28v/ggMeqjvSlgngpgLgGT+Y", + }, + "cn-northwest-1": { + "endpoint": "ec2-serial-console.cn-northwest-1.api.amazonwebservices.com.cn", + "fingerprint": "SHA256:TdgrNZkiQOdVfYEBUhO4SzUA09VWI5rYOZGTogpwmiM", + }, + "eu-central-1": { + "endpoint": "serial-console.ec2-instance-connect.eu-central-1.aws", + "fingerprint": "SHA256:aCMFS/yIcOdOlkXvOl8AmZ1Toe+bBnrJJ3Fy0k0De2c", + }, + "eu-west-1": { + "endpoint": "serial-console.ec2-instance-connect.eu-west-1.aws", + "fingerprint": "SHA256:h2AaGAWO4Hathhtm6ezs3Bj7udgUxi2qTrHjZAwCW6E", + }, + "eu-west-2": { + "endpoint": "serial-console.ec2-instance-connect.eu-west-2.aws", + "fingerprint": "SHA256:a69rd5CE/AEG4Amm53I6lkD1ZPvS/BCV3tTPW2RnJg8", + }, + "eu-south-1": { + "endpoint": "ec2-serial-console.eu-south-1.api.aws", + "fingerprint": "SHA256:lC0kOVJnpgFyBVrxn0A7n99ecLbXSX95cuuS7X7QK30", + }, + "eu-west-3": { + "endpoint": "serial-console.ec2-instance-connect.eu-west-3.aws", + "fingerprint": "SHA256:q8ldnAf9pymeNe8BnFVngY3RPAr/kxswJUzfrlxeEWs", + }, + "eu-south-2": { + "endpoint": "ec2-serial-console.eu-south-2.api.aws", + "fingerprint": "SHA256:GoCW2DFRlu669QNxqFxEcsR6fZUz/4F4n7T45ZcwoEc", + }, + "eu-north-1": { + "endpoint": "serial-console.ec2-instance-connect.eu-north-1.aws", + "fingerprint": "SHA256:tkGFFUVUDvocDiGSS3Cu8Gdl6w2uI32EPNpKFKLwX84", + }, + "eu-central-2": { + "endpoint": "ec2-serial-console.eu-central-2.api.aws", + "fingerprint": "SHA256:8Ppx2mBMf6WdCw0NUlzKfwM4/IfRz4OaXFutQXWp6mk", + }, + "me-south-1": { + "endpoint": "ec2-serial-console.me-south-1.api.aws", + "fingerprint": "SHA256:nPjLLKHu2QnLdUq2kVArsoK5xvPJOMRJKCBzCDqC3k8", + }, + "me-central-1": { + "endpoint": "ec2-serial-console.me-central-1.api.aws", + "fingerprint": "SHA256:zpb5duKiBZ+l0dFwPeyykB4MPBYhI/XzXNeFSDKBvLE", + }, + "sa-east-1": { + "endpoint": "ec2-serial-console.sa-east-1.api.aws", + "fingerprint": "SHA256:rd2+/32Ognjew1yVIemENaQzC+Botbih62OqAPDq1dI", + }, + "us-gov-east-1": { + "endpoint": "serial-console.ec2-instance-connect.us-gov-east-1.amazonaws.com", + "fingerprint": "SHA256:tIwe19GWsoyLClrtvu38YEEh+DHIkqnDcZnmtebvF28", + }, + "us-gov-west-1": { + "endpoint": "serial-console.ec2-instance-connect.us-gov-west-1.amazonaws.com", + "fingerprint": "SHA256:kfOFRWLaOZfB+utbd3bRf8OlPf8nGO2YZLqXZiIw5DQ", + }, +} diff --git a/jcloud/jcloud/pagetype/virtual_machine/virtual_machine_list.js b/jcloud/jcloud/pagetype/virtual_machine/virtual_machine_list.js new file mode 100644 index 0000000..d8e9e7d --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine/virtual_machine_list.js @@ -0,0 +1,13 @@ +jingrow.listview_settings['Virtual Machine'] = { + onload: function (list) { + list.page.add_menu_item(__('Sync'), function () { + jingrow.call({ + method: + 'jcloud.jcloud.pagetype.virtual_machine.virtual_machine.sync_virtual_machines', + callback: function () { + listview.refresh(); + }, + }); + }); + }, +}; diff --git a/jcloud/jcloud/pagetype/virtual_machine_image/__init__.py b/jcloud/jcloud/pagetype/virtual_machine_image/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/virtual_machine_image/patches/rename_aws_fields.py b/jcloud/jcloud/pagetype/virtual_machine_image/patches/rename_aws_fields.py new file mode 100644 index 0000000..b8b5d3f --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_image/patches/rename_aws_fields.py @@ -0,0 +1,12 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.utils.rename_field import rename_field + + +def execute(): + jingrow.reload_pagetype("Virtual Machine Image") + rename_field("Virtual Machine Image", "aws_instance_id", "instance_id") + rename_field("Virtual Machine Image", "aws_ami_id", "image_id") + rename_field("Virtual Machine Image", "aws_snapshot_id", "snapshot_id") diff --git a/jcloud/jcloud/pagetype/virtual_machine_image/patches/set_root_size.py b/jcloud/jcloud/pagetype/virtual_machine_image/patches/set_root_size.py new file mode 100644 index 0000000..2ecbd20 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_image/patches/set_root_size.py @@ -0,0 +1,32 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow + + +def execute(): + # Set `root_size` to size` + jingrow.db.sql("UPDATE `tabVirtual Machine Image` SET `root_size` = `size`") + + # Set `disk_size` and `root_disk_size` on images with multiple volumes + multi_volume_images = jingrow.db.sql( + """ + SELECT image.name + FROM `tabVirtual Machine Image` image + LEFT JOIN `tabVirtual Machine Image Volume` volume + ON volume.parent = image.name + WHERE image.status = 'Available' + GROUP BY image.name + HAVING COUNT(volume.name) > 1 + """, + as_dict=True, + ) + for image_name in multi_volume_images: + image = jingrow.get_pg("Virtual Machine Image", image_name) + image.has_data_volume = True + image.save() + size = image.get_data_volume().size + root_size = image.get_root_volume().size + jingrow.db.set_value("Virtual Machine Image", image.name, "size", size) + jingrow.db.set_value("Virtual Machine Image", image.name, "root_size", root_size) diff --git a/jcloud/jcloud/pagetype/virtual_machine_image/test_virtual_machine_image.py b/jcloud/jcloud/pagetype/virtual_machine_image/test_virtual_machine_image.py new file mode 100644 index 0000000..e5f3851 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_image/test_virtual_machine_image.py @@ -0,0 +1,51 @@ +# Copyright (c) 2022, JINGROW +# See license.txt +from __future__ import annotations + +from typing import TYPE_CHECKING +from unittest.mock import MagicMock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.pagetype.cluster.test_cluster import create_test_cluster +from jcloud.jcloud.pagetype.virtual_machine_image.virtual_machine_image import ( + VirtualMachineImage, +) + +if TYPE_CHECKING: + from jcloud.jcloud.pagetype.cluster.cluster import Cluster + + +@patch.object(VirtualMachineImage, "client", new=MagicMock()) +@patch.object(VirtualMachineImage, "after_insert", new=MagicMock()) +def create_test_virtual_machine_image( + ip: str | None = None, + cluster: Cluster = None, + series: str = "m", +) -> VirtualMachineImage: + """Create test Virtual Machine Image pg""" + if not ip: + ip = jingrow.mock("ipv4") + if not cluster: + cluster = create_test_cluster() + from jcloud.jcloud.pagetype.virtual_machine.test_virtual_machine import ( + create_test_virtual_machine, + ) + + vm = create_test_virtual_machine(cluster=cluster, series=series) + + return jingrow.get_pg( + { + "pagetype": "Virtual Machine Image", + "virtual_machine": vm.name, + "region": vm.region, + "status": "Available", + "image_id": "ami-1234567890", + "mariadb_root_password": "password", + } + ).insert(ignore_if_duplicate=True) + + +class TestVirtualMachineImage(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/virtual_machine_image/virtual_machine_image.js b/jcloud/jcloud/pagetype/virtual_machine_image/virtual_machine_image.js new file mode 100644 index 0000000..70fc316 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_image/virtual_machine_image.js @@ -0,0 +1,62 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Virtual Machine Image', { + refresh: function (frm) { + [ + [__('Sync'), 'sync'], + [__('Delete'), 'delete_image'], + ].forEach(([label, method]) => { + frm.add_custom_button( + label, + () => { + frm.call(method).then((r) => frm.refresh()); + }, + __('Actions'), + ); + }); + frm.add_custom_button( + __('Copy'), + () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Copy'), + fields: [ + { + fieldtype: 'Link', + options: 'Cluster', + label: __('Destination Cluster'), + fieldname: 'cluster', + get_query: () => { + return { + filters: [ + ['name', '!=', frm.pg.cluster], + ['cloud_provider', '=', 'AWS EC2'], + ], + }; + }, + }, + ], + }); + dialog.set_primary_action(__('Copy'), (args) => { + frm + .call('copy_image', { + cluster: args.cluster, + }) + .then((r) => { + console.log(r); + dialog.hide(); + frm.refresh(); + }); + }); + dialog.show(); + }, + __('Actions'), + ); + if (frm.pg.image_id) { + frm.add_web_link( + `https://${frm.pg.region}.console.aws.amazon.com/ec2/v2/home?region=${frm.pg.region}#ImageDetails:imageId=${frm.pg.image_id}`, + __('Visit AWS Dashboard'), + ); + } + }, +}); diff --git a/jcloud/jcloud/pagetype/virtual_machine_image/virtual_machine_image.json b/jcloud/jcloud/pagetype/virtual_machine_image/virtual_machine_image.json new file mode 100644 index 0000000..7b1a883 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_image/virtual_machine_image.json @@ -0,0 +1,190 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-08-30 12:16:32.761458", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "virtual_machine", + "instance_id", + "image_id", + "snapshot_id", + "status", + "size", + "root_size", + "column_break_5", + "cluster", + "region", + "platform", + "series", + "copied_from", + "public", + "credentials_section", + "mariadb_root_password", + "section_break_acrc", + "has_data_volume", + "volumes" + ], + "fields": [ + { + "fieldname": "virtual_machine", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Virtual Machine", + "options": "Virtual Machine", + "reqd": 1, + "set_only_once": 1 + }, + { + "fieldname": "platform", + "fieldtype": "Data", + "label": "Platform", + "read_only": 1 + }, + { + "fetch_from": "cluster.region", + "fieldname": "region", + "fieldtype": "Link", + "label": "Region", + "options": "Cloud Region", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nAvailable\nUnavailable", + "read_only": 1 + }, + { + "fieldname": "column_break_5", + "fieldtype": "Column Break" + }, + { + "fetch_from": "virtual_machine.series", + "fieldname": "series", + "fieldtype": "Select", + "label": "Series", + "options": "n\nf\nm\nc\np\ne\nr", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "size", + "fieldtype": "Int", + "label": "Size", + "read_only": 1 + }, + { + "fieldname": "credentials_section", + "fieldtype": "Section Break", + "label": "Credentials" + }, + { + "fieldname": "mariadb_root_password", + "fieldtype": "Password", + "label": "MariaDB Root Password", + "read_only": 1 + }, + { + "fieldname": "copied_from", + "fieldtype": "Link", + "label": "Copied From", + "options": "Virtual Machine Image", + "read_only": 1 + }, + { + "fetch_from": "virtual_machine.cluster", + "fetch_if_empty": 1, + "fieldname": "cluster", + "fieldtype": "Link", + "in_standard_filter": 1, + "label": "Cluster", + "options": "Cluster", + "reqd": 1 + }, + { + "fetch_from": "virtual_machine.instance_id", + "fieldname": "instance_id", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Instance ID", + "read_only": 1, + "reqd": 1 + }, + { + "fieldname": "image_id", + "fieldtype": "Data", + "in_standard_filter": 1, + "label": "Image ID", + "read_only": 1 + }, + { + "fieldname": "snapshot_id", + "fieldtype": "Data", + "label": "Snapshot ID", + "read_only": 1 + }, + { + "default": "1", + "fieldname": "public", + "fieldtype": "Check", + "label": "Public" + }, + { + "fieldname": "section_break_acrc", + "fieldtype": "Section Break", + "label": "Volumes" + }, + { + "fieldname": "volumes", + "fieldtype": "Table", + "label": "Volumes", + "options": "Virtual Machine Image Volume", + "read_only": 1 + }, + { + "fieldname": "root_size", + "fieldtype": "Int", + "label": "Root Size", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "has_data_volume", + "fieldtype": "Check", + "label": "Has Data Volume", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-01-02 16:51:28.443140", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Virtual Machine Image", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "virtual_machine" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/virtual_machine_image/virtual_machine_image.py b/jcloud/jcloud/pagetype/virtual_machine_image/virtual_machine_image.py new file mode 100644 index 0000000..42bd92b --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_image/virtual_machine_image.py @@ -0,0 +1,268 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import boto3 +import jingrow +from jingrow.core.utils import find +from jingrow.model.document import Document +from oci.core import ComputeClient +from oci.core.models import CreateImageDetails +from tenacity import retry, stop_after_attempt, wait_fixed +from tenacity.retry import retry_if_result + + +class VirtualMachineImage(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + from jcloud.jcloud.pagetype.virtual_machine_image_volume.virtual_machine_image_volume import ( + VirtualMachineImageVolume, + ) + + cluster: DF.Link + copied_from: DF.Link | None + has_data_volume: DF.Check + image_id: DF.Data | None + instance_id: DF.Data + mariadb_root_password: DF.Password | None + platform: DF.Data | None + public: DF.Check + region: DF.Link + root_size: DF.Int + series: DF.Literal["n", "f", "m", "c", "p", "e", "r"] + size: DF.Int + snapshot_id: DF.Data | None + status: DF.Literal["Pending", "Available", "Unavailable"] + virtual_machine: DF.Link + volumes: DF.Table[VirtualMachineImageVolume] + # end: auto-generated types + + PAGETYPE = "Virtual Machine Image" + + def after_insert(self): + self.set_credentials() + if self.copied_from: + self.create_image_from_copy() + else: + self.create_image() + + def create_image(self): + cluster = jingrow.get_pg("Cluster", self.cluster) + if cluster.cloud_provider == "AWS EC2": + volumes = self.get_volumes_from_virtual_machine() + response = self.client.create_image( + InstanceId=self.instance_id, + Name=f"Jingrow {self.name} - {self.virtual_machine}", + BlockDeviceMappings=volumes, + ) + self.image_id = response["ImageId"] + elif cluster.cloud_provider == "OCI": + image = self.client.create_image( + CreateImageDetails( + compartment_id=cluster.oci_tenancy, + display_name=f"Jingrow {self.name} - {self.virtual_machine}", + instance_id=self.instance_id, + ) + ).data + self.image_id = image.id + self.sync() + + def create_image_from_copy(self): + source = jingrow.get_pg("Virtual Machine Image", self.copied_from) + response = self.client.copy_image( + Name=f"Jingrow {self.name} - {self.virtual_machine}", + SourceImageId=source.image_id, + SourceRegion=source.region, + ) + self.image_id = response["ImageId"] + self.sync() + + def set_credentials(self): + if self.series == "m" and jingrow.db.exists("Database Server", self.virtual_machine): + self.mariadb_root_password = jingrow.get_pg("Database Server", self.virtual_machine).get_password( + "mariadb_root_password" + ) + + @jingrow.whitelist() + def sync(self): # noqa: C901 + cluster = jingrow.get_pg("Cluster", self.cluster) + if cluster.cloud_provider == "AWS EC2": + images = self.client.describe_images(ImageIds=[self.image_id])["Images"] + if images: + image = images[0] + self.status = self.get_aws_status_map(image["State"]) + self.platform = image["Architecture"] + volume = find(image["BlockDeviceMappings"], lambda x: "Ebs" in x) + # This information is not accurate for images created from multiple volumes + attached_snapshots = [] + if volume and "SnapshotId" in volume["Ebs"]: + self.snapshot_id = volume["Ebs"]["SnapshotId"] + for volume in image["BlockDeviceMappings"]: + if "Ebs" not in volume: + # We don't care about non-EBS (instance store) volumes + continue + snapshot_id = volume["Ebs"].get("SnapshotId") + if not snapshot_id: + # We don't care about volumes without snapshots + continue + attached_snapshots.append(snapshot_id) + existing = find(self.volumes, lambda x: x.snapshot_id == snapshot_id) + device = volume["DeviceName"] + volume_type = volume["Ebs"]["VolumeType"] + size = volume["Ebs"]["VolumeSize"] + if existing: + existing.device = device + existing.volume_type = volume_type + existing.size = size + else: + self.append( + "volumes", + { + "snapshot_id": snapshot_id, + "device": device, + "volume_type": volume_type, + "size": size, + }, + ) + for volume in list(self.volumes): + if volume.snapshot_id not in attached_snapshots: + self.remove(volume) + + self.size = self.get_data_volume().size + self.root_size = self.get_data_volume().size + else: + self.status = "Unavailable" + elif cluster.cloud_provider == "OCI": + image = self.client.get_image(self.image_id).data + self.status = self.get_oci_status_map(image.lifecycle_state) + if image.size_in_mbs: + self.size = image.size_in_mbs // 1024 + + self.save() + return self.status + + @retry( + retry=retry_if_result(lambda result: result != "Available"), + wait=wait_fixed(60), + stop=stop_after_attempt(10), + ) + def wait_for_availability(self): + """Retries sync until the image is available""" + return self.sync() + + @jingrow.whitelist() + def copy_image(self, cluster: str): + image = jingrow.copy_pg(self) + image.copied_from = self.name + image.cluster = cluster + return image.insert() + + @jingrow.whitelist() + def delete_image(self): + cluster = jingrow.get_pg("Cluster", self.cluster) + if cluster.cloud_provider == "AWS EC2": + self.client.deregister_image(ImageId=self.image_id) + if self.snapshot_id: + self.client.delete_snapshot(SnapshotId=self.snapshot_id) + elif cluster.cloud_provider == "OCI": + self.client.delete_image(self.image_id) + self.sync() + + def get_aws_status_map(self, status): + return { + "pending": "Pending", + "available": "Available", + }.get(status, "Unavailable") + + def get_oci_status_map(self, status): + return { + "PROVISIONING": "Pending", + "IMPORTING": "Pending", + "AVAILABLE": "Available", + "EXPORTING": "Pending", + "DISABLED": "Unavailable", + "DELETED": "Unavailable", + }.get(status, "Unavailable") + + def get_volumes_from_virtual_machine(self): + machine = jingrow.get_pg("Virtual Machine", self.virtual_machine) + volumes = [] + for volume in machine.volumes: + volumes.append( + { + "DeviceName": volume.device, + "Ebs": { + "DeleteOnTermination": True, + "VolumeSize": volume.size, + "VolumeType": volume.volume_type, + }, + } + ) + return volumes + + @property + def client(self): + cluster = jingrow.get_pg("Cluster", self.cluster) + if cluster.cloud_provider == "AWS EC2": + return boto3.client( + "ec2", + region_name=self.region, + aws_access_key_id=cluster.aws_access_key_id, + aws_secret_access_key=cluster.get_password("aws_secret_access_key"), + ) + if cluster.cloud_provider == "OCI": + return ComputeClient(cluster.get_oci_config()) + return None + + @classmethod + def get_available_for_series( + cls, series: str, region: str | None = None, platform: str | None = None + ) -> str | None: + images = jingrow.qb.PageType(cls.PAGETYPE) + get_available_images = ( + jingrow.qb.from_(images) + .select("name") + .where(images.status == "Available") + .where(images.public == 1) + .where( + images.series == series, + ) + .orderby(images.creation, order=jingrow.qb.desc) + ) + if region: + get_available_images = get_available_images.where(images.region == region) + if platform: + get_available_images = get_available_images.where(images.platform == platform) + available_images = get_available_images.run(as_dict=True) + if not available_images: + return None + return available_images[0].name + + def get_root_volume(self): + # This only works for AWS + if len(self.volumes) == 1: + return self.volumes[0] + + volume = find(self.volumes, lambda v: v.device == "/dev/sda1") + if volume: + return volume + return jingrow._dict({"size": 0}) + + def get_data_volume(self): + if not self.has_data_volume: + return self.get_root_volume() + + # This only works for AWS + if len(self.volumes) == 1: + return self.volumes[0] + + volume = find(self.volumes, lambda v: v.device != "/dev/sda1") + if volume: + return volume + return jingrow._dict({"size": 0}) diff --git a/jcloud/jcloud/pagetype/virtual_machine_image_volume/__init__.py b/jcloud/jcloud/pagetype/virtual_machine_image_volume/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/virtual_machine_image_volume/virtual_machine_image_volume.json b/jcloud/jcloud/pagetype/virtual_machine_image_volume/virtual_machine_image_volume.json new file mode 100644 index 0000000..240f056 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_image_volume/virtual_machine_image_volume.json @@ -0,0 +1,63 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2024-09-25 14:07:29.049839", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "snapshot_id", + "device", + "column_break_ygbk", + "volume_type", + "size" + ], + "fields": [ + { + "fieldname": "volume_type", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Volume Type", + "options": "gp3\ngp2", + "read_only": 1 + }, + { + "fieldname": "size", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Size", + "read_only": 1 + }, + { + "fieldname": "column_break_ygbk", + "fieldtype": "Column Break" + }, + { + "fieldname": "device", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Device", + "read_only": 1 + }, + { + "fieldname": "snapshot_id", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Snapshot ID", + "read_only": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-09-25 14:23:33.386098", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Virtual Machine Image Volume", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/virtual_machine_image_volume/virtual_machine_image_volume.py b/jcloud/jcloud/pagetype/virtual_machine_image_volume/virtual_machine_image_volume.py new file mode 100644 index 0000000..13a8bc5 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_image_volume/virtual_machine_image_volume.py @@ -0,0 +1,29 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from __future__ import annotations + +from jingrow.model.document import Document + + +class VirtualMachineImageVolume(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + device: DF.Data | None + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + size: DF.Int + snapshot_id: DF.Data | None + volume_type: DF.Literal["gp3", "gp2"] + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/virtual_machine_temporary_volume/__init__.py b/jcloud/jcloud/pagetype/virtual_machine_temporary_volume/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/virtual_machine_temporary_volume/virtual_machine_temporary_volume.json b/jcloud/jcloud/pagetype/virtual_machine_temporary_volume/virtual_machine_temporary_volume.json new file mode 100644 index 0000000..6e49ada --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_temporary_volume/virtual_machine_temporary_volume.json @@ -0,0 +1,32 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-01-15 11:46:59.223545", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "device" + ], + "fields": [ + { + "fieldname": "device", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Device", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-01-15 11:48:44.056676", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Virtual Machine Temporary Volume", + "owner": "Administrator", + "permissions": [], + "sort_field": "creation", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/virtual_machine_temporary_volume/virtual_machine_temporary_volume.py b/jcloud/jcloud/pagetype/virtual_machine_temporary_volume/virtual_machine_temporary_volume.py new file mode 100644 index 0000000..ea97d9c --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_temporary_volume/virtual_machine_temporary_volume.py @@ -0,0 +1,23 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class VirtualMachineTemporaryVolume(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + device: DF.Data + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/virtual_machine_volume/__init__.py b/jcloud/jcloud/pagetype/virtual_machine_volume/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/virtual_machine_volume/patches/rename_aws_fields.py b/jcloud/jcloud/pagetype/virtual_machine_volume/patches/rename_aws_fields.py new file mode 100644 index 0000000..09f87e5 --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_volume/patches/rename_aws_fields.py @@ -0,0 +1,10 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.utils.rename_field import rename_field + + +def execute(): + jingrow.reload_pagetype("Virtual Machine Volume") + rename_field("Virtual Machine Volume", "aws_volume_id", "volume_id") diff --git a/jcloud/jcloud/pagetype/virtual_machine_volume/virtual_machine_volume.json b/jcloud/jcloud/pagetype/virtual_machine_volume/virtual_machine_volume.json new file mode 100644 index 0000000..fafa50e --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_volume/virtual_machine_volume.json @@ -0,0 +1,126 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2022-06-28 21:42:34.933564", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "volume_id", + "volume_type", + "size", + "column_break_ygbk", + "iops", + "throughput", + "last_updated_at", + "section_break_frlu", + "device", + "column_break_buwy", + "detach", + "delete_volume", + "increase_disk_size", + "update_ebs_performance" + ], + "fields": [ + { + "columns": 1, + "fieldname": "volume_type", + "fieldtype": "Select", + "in_list_view": 1, + "label": "Volume Type", + "options": "gp3\ngp2", + "read_only_depends_on": "eval: pg.volume_type" + }, + { + "columns": 1, + "fieldname": "size", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Size", + "read_only_depends_on": "eval: pg.size" + }, + { + "columns": 1, + "fieldname": "iops", + "fieldtype": "Int", + "in_list_view": 1, + "label": "IOPS", + "read_only_depends_on": "eval: pg.iops" + }, + { + "columns": 1, + "fieldname": "throughput", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Throughput", + "read_only_depends_on": "eval: pg.throughput" + }, + { + "columns": 4, + "fieldname": "volume_id", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Volume ID", + "read_only": 1 + }, + { + "fieldname": "last_updated_at", + "fieldtype": "Datetime", + "label": "Last updated at", + "read_only": 1 + }, + { + "fieldname": "column_break_ygbk", + "fieldtype": "Column Break" + }, + { + "fieldname": "section_break_frlu", + "fieldtype": "Section Break" + }, + { + "columns": 2, + "fieldname": "device", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Device", + "read_only": 1 + }, + { + "fieldname": "column_break_buwy", + "fieldtype": "Column Break" + }, + { + "fieldname": "detach", + "fieldtype": "Button", + "label": "Detach" + }, + { + "fieldname": "increase_disk_size", + "fieldtype": "Button", + "label": "Increase Disk Size" + }, + { + "fieldname": "delete_volume", + "fieldtype": "Button", + "label": "Delete Volume" + }, + { + "fieldname": "update_ebs_performance", + "fieldtype": "Button", + "label": "Update EBS Performance" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2025-02-07 20:38:39.733552", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Virtual Machine Volume", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/virtual_machine_volume/virtual_machine_volume.py b/jcloud/jcloud/pagetype/virtual_machine_volume/virtual_machine_volume.py new file mode 100644 index 0000000..bb6d52a --- /dev/null +++ b/jcloud/jcloud/pagetype/virtual_machine_volume/virtual_machine_volume.py @@ -0,0 +1,32 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from __future__ import annotations + +from jingrow.model.document import Document + + +class VirtualMachineVolume(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + device: DF.Data | None + iops: DF.Int + last_updated_at: DF.Datetime | None + name: DF.Int | None + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + size: DF.Int + throughput: DF.Int + volume_id: DF.Data | None + volume_type: DF.Literal["gp3", "gp2"] + # end: auto-generated types + + pass diff --git a/jcloud/jcloud/pagetype/wechatpay_payment_record/__init__.py b/jcloud/jcloud/pagetype/wechatpay_payment_record/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/wechatpay_payment_record/test_wechatpay_payment_record.py b/jcloud/jcloud/pagetype/wechatpay_payment_record/test_wechatpay_payment_record.py new file mode 100644 index 0000000..c2cbd6d --- /dev/null +++ b/jcloud/jcloud/pagetype/wechatpay_payment_record/test_wechatpay_payment_record.py @@ -0,0 +1,9 @@ +# Copyright (c) 2025, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestWechatpayPaymentRecord(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/wechatpay_payment_record/wechatpay_payment_record.js b/jcloud/jcloud/pagetype/wechatpay_payment_record/wechatpay_payment_record.js new file mode 100644 index 0000000..b0879ac --- /dev/null +++ b/jcloud/jcloud/pagetype/wechatpay_payment_record/wechatpay_payment_record.js @@ -0,0 +1,8 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Wechatpay Payment Record", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/jcloud/pagetype/wechatpay_payment_record/wechatpay_payment_record.json b/jcloud/jcloud/pagetype/wechatpay_payment_record/wechatpay_payment_record.json new file mode 100644 index 0000000..dd2b266 --- /dev/null +++ b/jcloud/jcloud/pagetype/wechatpay_payment_record/wechatpay_payment_record.json @@ -0,0 +1,79 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2025-03-18 22:27:22.084966", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "team", + "payment_id", + "order_id", + "amount", + "status" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_standard_filter": 1, + "label": "Team", + "options": "Team" + }, + { + "fieldname": "payment_id", + "fieldtype": "Data", + "label": "Payment ID", + "read_only": 1, + "unique": 1 + }, + { + "fieldname": "order_id", + "fieldtype": "Data", + "label": "Order ID", + "read_only": 1, + "unique": 1 + }, + { + "fieldname": "amount", + "fieldtype": "Float", + "in_list_view": 1, + "label": "Amount", + "precision": "2", + "read_only": 1 + }, + { + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Pending\nSuccess\nFailed", + "read_only": 1 + } + ], + "grid_page_length": 50, + "index_web_pages_for_search": 1, + "links": [], + "modified": "2025-03-19 04:23:13.203918", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Wechatpay Payment Record", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/wechatpay_payment_record/wechatpay_payment_record.py b/jcloud/jcloud/pagetype/wechatpay_payment_record/wechatpay_payment_record.py new file mode 100644 index 0000000..11baee9 --- /dev/null +++ b/jcloud/jcloud/pagetype/wechatpay_payment_record/wechatpay_payment_record.py @@ -0,0 +1,23 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class WechatpayPaymentRecord(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + amount: DF.Float + order_id: DF.Data | None + payment_id: DF.Data | None + status: DF.Literal["Pending", "Success", "Failed"] + team: DF.Link | None + # end: auto-generated types + pass diff --git a/jcloud/jcloud/pagetype/wireguard_peer/__init__.py b/jcloud/jcloud/pagetype/wireguard_peer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/pagetype/wireguard_peer/templates/wg0.conf b/jcloud/jcloud/pagetype/wireguard_peer/templates/wg0.conf new file mode 100644 index 0000000..4606f2c --- /dev/null +++ b/jcloud/jcloud/pagetype/wireguard_peer/templates/wg0.conf @@ -0,0 +1,10 @@ +[Interface] +Address = {{ wireguard_network }} +PrivateKey = {{ wireguard_private_key }} +{% if peers %}{% for peer in (peers) %} +[Peer] +PublicKey = {{ peer.public_key }} +Endpoint = {{ peer.endpoint }} +AllowedIPs = {{ peer.allowed_ips}} +PersistentKeepalive = 25 +{% endfor %}{% endif %} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/wireguard_peer/test_wireguard_peer.py b/jcloud/jcloud/pagetype/wireguard_peer/test_wireguard_peer.py new file mode 100644 index 0000000..1107442 --- /dev/null +++ b/jcloud/jcloud/pagetype/wireguard_peer/test_wireguard_peer.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestWireguardPeer(JingrowTestCase): + pass diff --git a/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer.js b/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer.js new file mode 100644 index 0000000..5dc1733 --- /dev/null +++ b/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer.js @@ -0,0 +1,48 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Wireguard Peer', { + refresh: function (frm) { + frm.add_fetch('server_name', 'ip', 'ip'); + frm.add_fetch('server_name', 'private_ip', 'private_ip'); + frm.add_fetch('server_name', 'title', 'peer_name'); + + [ + [__('Setup Wireguard'), 'setup_wireguard', false], + [__('Ping Peer'), 'ping_peer', false], + [__('Fetch Private Network'), 'fetch_peer_private_network', false], + [__('Generate Config'), 'generate_config', false], + [__('Generate QR'), 'generate_qr_code', false], + ].forEach(([label, method, confirm, condition]) => { + if (typeof condition === 'undefined' || condition) { + frm.add_custom_button( + label, + () => { + if (confirm) { + jingrow.confirm( + `Are you sure you want to ${label.toLowerCase()}?`, + () => + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }), + ); + } else { + frm.call(method).then((r) => { + if (r.message) { + jingrow.msgprint(r.message); + } else { + frm.refresh(); + } + }); + } + }, + __('Actions'), + ); + } + }); + }, +}); diff --git a/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer.json b/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer.json new file mode 100644 index 0000000..71fb41c --- /dev/null +++ b/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer.json @@ -0,0 +1,178 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-08-07 17:54:33.377864", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "server_section", + "server_type", + "server_name", + "peer_name", + "upstream_proxy", + "column_break_bsvs", + "status", + "ip", + "private_ip", + "peer_private_network", + "wireguard_section", + "wireguard_network", + "peer_ip", + "allowed_ips", + "column_break_aqot", + "is_wireguard_setup", + "private_key", + "public_key", + "config_tab", + "peer_config" + ], + "fields": [ + { + "fieldname": "peer_name", + "fieldtype": "Data", + "label": "Peer Name", + "reqd": 1, + "unique": 1 + }, + { + "default": "Active", + "fieldname": "status", + "fieldtype": "Select", + "label": "Status", + "options": "Active\nBroken\nArchived" + }, + { + "fieldname": "peer_ip", + "fieldtype": "Data", + "label": "Peer IP", + "read_only": 1, + "unique": 1 + }, + { + "fieldname": "column_break_bsvs", + "fieldtype": "Column Break" + }, + { + "fieldname": "private_key", + "fieldtype": "Password", + "label": "Private Key" + }, + { + "fieldname": "public_key", + "fieldtype": "Data", + "label": "Public Key" + }, + { + "description": "Comma Seperated CIDR blocks. EG: 10.122.0.0/20,10.7.0.1/32", + "fieldname": "allowed_ips", + "fieldtype": "Data", + "label": "Allowed IPs", + "read_only": 1 + }, + { + "fieldname": "peer_private_network", + "fieldtype": "Data", + "label": "Peer Private Network" + }, + { + "fieldname": "upstream_proxy", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Upstream Proxy", + "mandatory_depends_on": "eval:pg.setup_wireguard", + "options": "Proxy Server", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "is_wireguard_setup", + "fieldtype": "Check", + "label": "Is Wireguard Setup" + }, + { + "fieldname": "private_ip", + "fieldtype": "Data", + "label": "Private IP" + }, + { + "fetch_from": "upstream_proxy.wireguard_network", + "fieldname": "wireguard_network", + "fieldtype": "Data", + "label": "Wireguard Network", + "read_only": 1 + }, + { + "fieldname": "ip", + "fieldtype": "Data", + "label": "Public IP" + }, + { + "fieldname": "config_tab", + "fieldtype": "Tab Break", + "label": "Config" + }, + { + "fieldname": "peer_config", + "fieldtype": "Code", + "label": "Peer Config", + "options": "conf", + "read_only": 1 + }, + { + "fieldname": "server_type", + "fieldtype": "Select", + "label": "Server Type", + "options": "Server\nDatabase Server", + "reqd": 1 + }, + { + "fieldname": "server_name", + "fieldtype": "Dynamic Link", + "in_list_view": 1, + "label": "Server Name", + "options": "server_type", + "reqd": 1 + }, + { + "fieldname": "server_section", + "fieldtype": "Section Break", + "label": "Server" + }, + { + "fieldname": "wireguard_section", + "fieldtype": "Section Break", + "label": "Wireguard" + }, + { + "fieldname": "column_break_aqot", + "fieldtype": "Column Break" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-03-11 18:18:39.689700", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Wireguard Peer", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "search_fields": "peer_name,status", + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "peer_name" +} \ No newline at end of file diff --git a/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer.py b/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer.py new file mode 100644 index 0000000..55dc7c4 --- /dev/null +++ b/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer.py @@ -0,0 +1,192 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import ipaddress +import json +import subprocess + +import jingrow +from jingrow.model.document import Document + +from jcloud.runner import Ansible +from jcloud.utils import log_error + + +class WireguardPeer(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + allowed_ips: DF.Data | None + ip: DF.Data | None + is_wireguard_setup: DF.Check + peer_config: DF.Code | None + peer_ip: DF.Data | None + peer_name: DF.Data + peer_private_network: DF.Data | None + private_ip: DF.Data | None + private_key: DF.Password | None + public_key: DF.Data | None + server_name: DF.DynamicLink + server_type: DF.Literal["Server", "Database Server"] + status: DF.Literal["Active", "Broken", "Archived"] + upstream_proxy: DF.Link + wireguard_network: DF.Data | None + # end: auto-generated types + + def validate(self): + self.next_ip_address() + if not self.private_ip: + self.allowed_ips = self.peer_ip + else: + self.allowed_ips = f"{self.peer_ip},{self.peer_private_network}" + + def next_ip_address(self): + try: + if self.is_new() and not self.peer_ip: + network_address = ipaddress.ip_network(self.wireguard_network) + ips = jingrow.get_list( + "Wireguard Peer", + filters={"wireguard_network": self.wireguard_network}, + pluck="peer_ip", + fields=["peer_ip"], + ) + if not ips: + self.peer_ip = str(network_address[2]) + return + last_ip_address = ipaddress.ip_address(max(ips)) + next_ip_addr = last_ip_address + 1 + while next_ip_addr not in network_address: + next_ip_addr += 1 + self.peer_ip = str(next_ip_addr) + except Exception: + log_error("Wireguard Peer IP Exception", server=self.as_dict()) + jingrow.throw("Invalid Wireguard Network") + + @jingrow.whitelist() + def setup_wireguard(self): + jingrow.enqueue_pg("Wireguard Peer", self.name, "_setup_peer_wg") + + @jingrow.whitelist() + def ping_peer(self): + try: + ansible = Ansible( + playbook="ping.yml", + server=self, + ) + play = ansible.run() + if play.status == "Success": + if not self.peer_private_network: + self.fetch_peer_private_network(play) + except Exception: + log_error("Server Ping Exception", server=self.as_dict()) + + @jingrow.whitelist() + def fetch_peer_private_network(self, play=None): + if not play: + play = jingrow.get_last_pg( + "Ansible Play", {"status": "Success", "server": self.name, "play": "Ping Server"} + ) + res = jingrow.get_last_pg( + "Ansible Task", {"status": "Success", "play": play.name, "task": "Gather Facts"} + ).result + facts = json.loads(res)["ansible_facts"] + self.private_ip = facts["eth1"]["ipv4"]["address"] + self.peer_private_network = str( + ipaddress.IPv4Network( + f'{facts["eth1"]["ipv4"]["address"]}/{facts["eth1"]["ipv4"]["netmask"]}', + strict=False, + ) + ) + self.save() + + def _setup_peer_wg(self): + proxy = jingrow.get_pg("Proxy Server", self.upstream_proxy) + try: + ansible = Ansible( + playbook="wireguard.yml", + server=self, + variables={ + "wireguard_port": proxy.wireguard_port, + "interface_id": proxy.private_ip_interface_id, + "wireguard_network": self.peer_ip + "/" + self.wireguard_network.split("/")[1], + "wireguard_private_key": self.get_password("private_key") + if self.private_key + else False, + "wireguard_public_key": self.get_password("public_key") + if self.public_key + else False, + "peers": json.dumps( + [ + { + "public_key": proxy.get_password("wireguard_public_key"), + "allowed_ips": self.wireguard_network, + "peer_ip": proxy.name, + } + ] + ), + }, + ) + play = ansible.run() + if play.status == "Success": + self.reload() + self.is_wireguard_setup = True + try: + if not self.private_key and not self.public_key: + self.private_key = jingrow.get_pg( + "Ansible Task", {"play": play.name, "task": "Generate Wireguard Private Key"} + ).output + self.public_key = jingrow.get_pg( + "Ansible Task", {"play": play.name, "task": "Generate Wireguard Public Key"} + ).output + except Exception: + log_error("Wireguard Key Save error", server=self.as_dict()) + if not self.peer_private_network: + self.peer_private_network = jingrow.get_pg( + "Ansible Task", {"play": play.name, "task": "Get Subnet Mask of eth1"} + ).output + self.save() + proxy.reload_wireguard() + except Exception: + log_error("Wireguard Setup Exception", server=self.as_dict()) + + @jingrow.whitelist() + def generate_config(self): + if not self.private_key or not self.public_key: + self.private_key = subprocess.check_output(["wg", "genkey"]).decode().strip() + self.public_key = ( + subprocess.check_output([f"echo '{self.private_key}' | wg pubkey"], shell=True) + .decode() + .strip() + ) + self.save() + proxy = jingrow.get_pg("Proxy Server", self.upstream_proxy) + variables = { + "wireguard_network": self.peer_ip + "/" + self.wireguard_network.split("/")[1], + "wireguard_private_key": self.get_password("private_key"), + "wireguard_port": proxy.wireguard_port, + "peers": [ + { + "public_key": proxy.get_password("wireguard_public_key"), + "endpoint": proxy.name + ":" + str(proxy.wireguard_port), + "allowed_ips": self.wireguard_network, + "peer_ip": proxy.name, + } + ], + } + outputText = jingrow.render_template( + "jcloud/pagetype/wireguard_peer/templates/wg0.conf", variables, is_path=True + ) + self.peer_config = outputText + self.save() + proxy.reload_wireguard() + + @jingrow.whitelist() + def download_config(self): + jingrow.local.response.filename = f"{self.name}.conf" + jingrow.local.response.filecontent = self.peer_config + jingrow.local.response.type = "download" diff --git a/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer_dashboard.py b/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer_dashboard.py new file mode 100644 index 0000000..5ac0535 --- /dev/null +++ b/jcloud/jcloud/pagetype/wireguard_peer/wireguard_peer_dashboard.py @@ -0,0 +1,11 @@ +from jingrow import _ + + +def get_data(): + return { + "fieldname": "Server", + "non_standard_fieldnames": {"Server": "Wireguard Peer"}, + "transactions": [ + {"label": _("Logs"), "items": ["Ansible Play"]}, + ], + } diff --git a/jcloud/jcloud/report/__init__.py b/jcloud/jcloud/report/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/agent_versions/__init__.py b/jcloud/jcloud/report/agent_versions/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/agent_versions/agent_versions.js b/jcloud/jcloud/report/agent_versions/agent_versions.js new file mode 100644 index 0000000..e69b8cc --- /dev/null +++ b/jcloud/jcloud/report/agent_versions/agent_versions.js @@ -0,0 +1,27 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt +/* eslint-disable */ + +jingrow.query_reports['Agent Versions'] = { + onload: function (report) { + report.page.add_button(__('Update Agent'), () => { + let filters = { + server_type: jingrow.query_report.get_filter_value('server_type'), + exclude_self_hosted: jingrow.query_report.get_filter_value( + 'exclude_self_hosted', + ), + }; + let team = jingrow.query_report.get_filter_value('team'); + if (team) { + filters['team'] = team; + } + jingrow + .call('jcloud.jcloud.report.agent_versions.agent_versions.update_agent', { + filters: filters, + }) + .then((r) => { + jingrow.query_report.refresh(); + }); + }); + }, +}; diff --git a/jcloud/jcloud/report/agent_versions/agent_versions.json b/jcloud/jcloud/report/agent_versions/agent_versions.json new file mode 100644 index 0000000..713ae8b --- /dev/null +++ b/jcloud/jcloud/report/agent_versions/agent_versions.json @@ -0,0 +1,73 @@ +{ + "add_total_row": 0, + "columns": [], + "creation": "2023-01-30 17:16:09.288587", + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [ + { + "fieldname": "server_type", + "fieldtype": "Select", + "label": "Server Type", + "mandatory": 0, + "options": "\nServer\nDatabase Server\nProxy Server\nLog Server\nMonitor Server\nRegistry Server\nTrace Server\nAnalytics Server", + "wildcard_filter": 0 + }, + { + "fieldname": "server_name", + "fieldtype": "Data", + "label": "Server Name", + "mandatory": 0, + "options": "", + "wildcard_filter": 0 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "mandatory": 0, + "options": "Team", + "wildcard_filter": 1 + }, + { + "fieldname": "exclude_self_hosted", + "fieldtype": "Check", + "label": "Exclude Self Hosted", + "mandatory": 0, + "wildcard_filter": 0 + }, + { + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "mandatory": 0, + "options": "Cluster", + "wildcard_filter": 0 + } + ], + "idx": 0, + "is_standard": "Yes", + "letterhead": null, + "modified": "2025-01-17 10:49:27.237566", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Agent Versions", + "owner": "Administrator", + "prepared_report": 0, + "ref_pagetype": "Server", + "report_name": "Agent Versions", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + }, + { + "role": "Jcloud Member" + }, + { + "role": "Jcloud Admin" + } + ], + "timeout": 0 +} \ No newline at end of file diff --git a/jcloud/jcloud/report/agent_versions/agent_versions.py b/jcloud/jcloud/report/agent_versions/agent_versions.py new file mode 100644 index 0000000..8b84c39 --- /dev/null +++ b/jcloud/jcloud/report/agent_versions/agent_versions.py @@ -0,0 +1,89 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import json + +import jingrow + +from jcloud.agent import Agent +from jcloud.jcloud.report.server_stats.server_stats import get_servers + + +def execute(filters=None): + jingrow.only_for("System Manager") + columns = [ + { + "fieldname": "server", + "label": jingrow._("Server"), + "fieldtype": "Dynamic Link", + "options": "server_type", + "width": 200, + }, + { + "fieldname": "server_type", + "label": jingrow._("Server Type"), + "fieldtype": "Link", + "options": "PageType", + "width": 140, + }, + { + "fieldname": "commit", + "label": jingrow._("Commit"), + "fieldtype": "Data", + "width": 360, + }, + { + "fieldname": "status", + "label": jingrow._("Status"), + "fieldtype": "Long Text", + "width": 100, + }, + { + "fieldname": "upstream", + "label": jingrow._("Upstream"), + "fieldtype": "Data", + "width": 260, + }, + { + "fieldname": "show", + "label": jingrow._("Show"), + "fieldtype": "Long Text", + "width": 100, + }, + { + "fieldname": "python", + "label": jingrow._("Python Version"), + "fieldtype": "Data", + "width": 100, + }, + ] + + data = get_data(filters) + return columns, data + + +def get_data(filters): + rows = [] + for server in get_servers(filters): + try: + version = Agent(server.name, server.server_type).get_version() + if version is None: + version = {} + except Exception: + version = {} + rows.append( + { + "server": server.name, + "server_type": server.server_type, + **version, + } + ) + return rows + + +@jingrow.whitelist() +def update_agent(filters): + jingrow.only_for("System Manager") + for server in get_servers(jingrow._dict(json.loads(filters))): + server = jingrow.get_pg(server.server_type, server.name) + server.update_agent_ansible() diff --git a/jcloud/jcloud/report/aws_instance_pricing/__init__.py b/jcloud/jcloud/report/aws_instance_pricing/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/aws_instance_pricing/aws_instance_pricing.js b/jcloud/jcloud/report/aws_instance_pricing/aws_instance_pricing.js new file mode 100644 index 0000000..eef1c01 --- /dev/null +++ b/jcloud/jcloud/report/aws_instance_pricing/aws_instance_pricing.js @@ -0,0 +1,5 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt +/* eslint-disable */ + +jingrow.query_reports['AWS Instance Pricing'] = {}; diff --git a/jcloud/jcloud/report/aws_instance_pricing/aws_instance_pricing.json b/jcloud/jcloud/report/aws_instance_pricing/aws_instance_pricing.json new file mode 100644 index 0000000..9d47f07 --- /dev/null +++ b/jcloud/jcloud/report/aws_instance_pricing/aws_instance_pricing.json @@ -0,0 +1,158 @@ +{ + "add_total_row": 0, + "columns": [ + { + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "options": "Cluster", + "width": 0 + }, + { + "fieldname": "instance", + "fieldtype": "Data", + "label": "Instance", + "width": 200 + }, + { + "fieldname": "instance_type", + "fieldtype": "Data", + "label": "Instance Type", + "width": 0 + }, + { + "fieldname": "vcpu", + "fieldtype": "Int", + "label": "vCPU", + "width": 0 + }, + { + "fieldname": "memory", + "fieldtype": "Float", + "label": "Memory", + "width": 0 + }, + { + "fieldname": "processor", + "fieldtype": "Data", + "label": "Processor", + "width": 0 + }, + { + "fieldname": "family", + "fieldtype": "Data", + "label": "Family", + "width": 0 + }, + { + "fieldname": "generation", + "fieldtype": "Int", + "label": "Generation", + "width": 0 + }, + { + "fieldname": "is_latest_generation", + "fieldtype": "Check", + "label": "Is Latest Generation?", + "width": 0 + }, + { + "fieldname": "size", + "fieldtype": "Data", + "label": "Size", + "width": 0 + }, + { + "fieldname": "size_multiplier", + "fieldtype": "Float", + "label": "Size Multiplier", + "width": 0 + }, + { + "fieldname": "on_demand", + "fieldtype": "Float", + "label": "On-Demand", + "width": 0 + }, + { + "fieldname": "1yr_instance", + "fieldtype": "Float", + "label": "1 Year Instance", + "width": 0 + }, + { + "fieldname": "1yr_compute", + "fieldtype": "Float", + "label": "1 Year Compute", + "width": 0 + }, + { + "fieldname": "3yr_instance", + "fieldtype": "Float", + "label": "3 Year Instance", + "width": 0 + }, + { + "fieldname": "3yr_compute", + "fieldtype": "Float", + "label": "3 Year Compute", + "width": 0 + } + ], + "creation": "2022-09-19 17:12:10.701432", + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [ + { + "fieldname": "cluster", + "fieldtype": "Link", + "label": "Cluster", + "mandatory": 0, + "options": "Cluster", + "wildcard_filter": 0 + }, + { + "fieldname": "instance_family", + "fieldtype": "Select", + "label": "Instance Family", + "mandatory": 0, + "options": "\nGeneral Purpose\nCompute Optimized\nMemory Optimized", + "wildcard_filter": 0 + }, + { + "default": "Intel", + "fieldname": "processor", + "fieldtype": "Select", + "label": "Processor", + "mandatory": 0, + "options": "Intel\nAMD\nGraviton\n", + "wildcard_filter": 0 + }, + { + "fieldname": "latest_generation_only", + "fieldtype": "Check", + "label": "Latest Generation Only", + "mandatory": 0, + "wildcard_filter": 0 + } + ], + "idx": 0, + "is_standard": "Yes", + "json": "{}", + "letterhead": null, + "modified": "2023-11-06 19:08:45.728372", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "AWS Instance Pricing", + "owner": "Administrator", + "prepared_report": 0, + "ref_pagetype": "Cluster", + "report_name": "AWS Instance Pricing", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + } + ] +} \ No newline at end of file diff --git a/jcloud/jcloud/report/aws_instance_pricing/aws_instance_pricing.py b/jcloud/jcloud/report/aws_instance_pricing/aws_instance_pricing.py new file mode 100644 index 0000000..af97506 --- /dev/null +++ b/jcloud/jcloud/report/aws_instance_pricing/aws_instance_pricing.py @@ -0,0 +1,210 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import json + +import boto3 +import jingrow +from jingrow.core.utils import find +from jingrow.utils import cint, flt + + +def execute(filters=None): + jingrow.only_for("System Manager") + data = get_data(filters) + columns = jingrow.get_pg("Report", "AWS Instance Pricing").get_columns() + return columns, data + + +def get_data(filters): + if filters.cluster: + clusters = [filters.cluster] + else: + clusters = jingrow.get_all( + "Cluster", filters={"public": 1, "cloud_provider": "AWS EC2"}, pluck="name" + ) + data = [] + for cluster in clusters: + data.extend(get_cluster_data(filters, cluster)) + return data + + +def get_cluster_data(filters, cluster_name): + cluster = jingrow.get_pg("Cluster", cluster_name) + client = boto3.client( + "pricing", + region_name="ap-south-1", + aws_access_key_id=cluster.aws_access_key_id, + aws_secret_access_key=cluster.get_password("aws_secret_access_key"), + ) + + paginator = client.get_paginator("get_products") + + product_filters = [ + {"Type": "TERM_MATCH", "Field": "regionCode", "Value": cluster.region}, + {"Type": "TERM_MATCH", "Field": "capacitystatus", "Value": "Used"}, + {"Type": "TERM_MATCH", "Field": "currentGeneration", "Value": "Yes"}, + {"Type": "TERM_MATCH", "Field": "tenancy", "Value": "Shared"}, + {"Type": "TERM_MATCH", "Field": "preInstalledSw", "Value": "NA"}, + {"Type": "TERM_MATCH", "Field": "operatingSystem", "Value": "Linux"}, + ] + + if filters.instance_family: + product_filters.append( + { + "Type": "TERM_MATCH", + "Field": "instanceFamily", + "Value": filters.instance_family, + } + ) + + response_iterator = paginator.paginate( + ServiceCode="AmazonEC2", Filters=product_filters, PaginationConfig={"PageSize": 100} + ) + rows = [] + for response in response_iterator: + for item in response["PriceList"]: + product = json.loads(item) + if filters.processor: + if filters.processor not in product["product"]["attributes"]["physicalProcessor"]: + continue + + row = { + "cluster": cluster.name, + "instance_type": product["product"]["attributes"]["instanceType"].split(".")[0], + "instance": product["product"]["attributes"]["instanceType"], + "vcpu": cint(product["product"]["attributes"]["vcpu"], 0), + "memory": flt(product["product"]["attributes"]["memory"][:-4]), + } + for term in product["terms"].get("OnDemand", {}).values(): + row["on_demand"] = ( + flt(list(term["priceDimensions"].values())[0]["pricePerUnit"]["USD"]) * 750 + ) + instance_type = parse_instance_type(row["instance"]) + if not instance_type: + continue + + family, generation, processor, size = instance_type + + row.update( + { + "family": family, + "generation": generation, + "processor": processor, + "size": size, + "size_multiplier": parse_size_multiplier(size), + } + ) + rows.append(row) + + latest_generation = max(row["generation"] for row in rows) + for row in rows: + if row["generation"] == latest_generation: + row["is_latest_generation"] = True + + if filters.latest_generation_only: + rows = [row for row in rows if row.get("is_latest_generation")] + + client = boto3.client( + "savingsplans", + aws_access_key_id=cluster.aws_access_key_id, + aws_secret_access_key=cluster.get_password("aws_secret_access_key"), + ) + + response = client.describe_savings_plans_offering_rates( + savingsPlanPaymentOptions=["No Upfront"], + savingsPlanTypes=["Compute", "EC2Instance"], + products=["EC2"], + serviceCodes=["AmazonEC2"], + filters=[ + {"name": "tenancy", "values": ["shared"]}, + {"name": "region", "values": [cluster.region]}, + {"name": "instanceType", "values": [row["instance"] for row in rows]}, + {"name": "productDescription", "values": ["Linux/UNIX"]}, + ], + ) + + for rate in response["searchResults"]: + if "BoxUsage" in rate["usageType"]: + instance = find(rate["properties"], lambda x: x["name"] == "instanceType")["value"] + row = find(rows, lambda x: x["instance"] == instance) + years = rate["savingsPlanOffering"]["durationSeconds"] // 31536000 + plan = ( + "compute" if rate["savingsPlanOffering"]["planType"] == "Compute" else "instance" + ) + row[f"{years}yr_{plan}"] = flt(rate["rate"]) * 750 + + rows.sort(key=lambda x: (x["instance_type"], x["vcpu"], x["memory"])) + return rows + + +FAMILIES = [ + "c", + "d", + "f", + "g", + "hpc", + "inf", + "i", + "mac", + "m", + "p", + "r", + "trn", + "t", + "u", + "vt", + "x", +] +PREFERRED_FAMILIES = [ + "c", + "m", + "r", +] +PROCESSORS = ["a", "g", "i"] + + +def parse_instance_type(instance_type): + instance_type, size = instance_type.split(".") + # Skip metal instances + if "metal" in size: + return + + family = None + for ff in FAMILIES: + if instance_type.startswith(ff): + family = ff + break + + # Ignore other instance families + if family not in PREFERRED_FAMILIES: + return + + rest = instance_type.removeprefix(family) + generation = int(rest[0]) + rest = rest[1:] + + # If processor isn't mentioned, assume it's an Intel + if rest and rest[0] in PROCESSORS: + processor = rest[0] + rest = rest[1:] + else: + processor = "i" + + if rest: + return + + return family, generation, processor, size + + +def parse_size_multiplier(size): + SIZES = { + "medium": 1 / 4, + "large": 1 / 2, + "xlarge": 1, + } + if size in SIZES: + return SIZES[size] + else: + size = size.removesuffix("xlarge") + return float(size) diff --git a/jcloud/jcloud/report/aws_rightsizing_recommendation/__init__.py b/jcloud/jcloud/report/aws_rightsizing_recommendation/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/aws_rightsizing_recommendation/aws_rightsizing_recommendation.js b/jcloud/jcloud/report/aws_rightsizing_recommendation/aws_rightsizing_recommendation.js new file mode 100644 index 0000000..4f589c8 --- /dev/null +++ b/jcloud/jcloud/report/aws_rightsizing_recommendation/aws_rightsizing_recommendation.js @@ -0,0 +1,23 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +jingrow.query_reports['AWS Rightsizing Recommendation'] = { + onload: function (report) { + report.page.add_button(__('Rightsize'), () => { + jingrow + .call( + 'jcloud.jcloud.report.aws_rightsizing_recommendation.aws_rightsizing_recommendation.rightsize', + { + filters: { + resource_type: + jingrow.query_report.get_filter_value('resource_type'), + action_type: jingrow.query_report.get_filter_value('action_type'), + }, + }, + ) + .then((r) => { + jingrow.query_report.refresh(); + }); + }); + }, +}; diff --git a/jcloud/jcloud/report/aws_rightsizing_recommendation/aws_rightsizing_recommendation.json b/jcloud/jcloud/report/aws_rightsizing_recommendation/aws_rightsizing_recommendation.json new file mode 100644 index 0000000..449abf0 --- /dev/null +++ b/jcloud/jcloud/report/aws_rightsizing_recommendation/aws_rightsizing_recommendation.json @@ -0,0 +1,175 @@ +{ + "add_total_row": 1, + "columns": [ + { + "fieldname": "virtual_machine", + "fieldtype": "Link", + "label": "Virtual Machine", + "options": "Virtual Machine", + "width": 0 + }, + { + "fieldname": "resource_type", + "fieldtype": "Data", + "label": "Resource Type", + "options": "", + "width": 0 + }, + { + "fieldname": "estimated_cost", + "fieldtype": "Currency", + "label": "Estimated Cost", + "options": "currency", + "width": 0 + }, + { + "fieldname": "estimated_savings", + "fieldtype": "Currency", + "label": "Estimated Savings", + "options": "currency", + "width": 0 + }, + { + "fieldname": "estimated_savings_percentage", + "fieldtype": "Int", + "label": "Estimated Savings Percentage", + "width": 0 + }, + { + "fieldname": "current_iops", + "fieldtype": "Int", + "label": "Current IOPS", + "width": 0 + }, + { + "fieldname": "current_throughput", + "fieldtype": "Int", + "label": "Current Throughput", + "width": 0 + }, + { + "fieldname": "recommended_iops", + "fieldtype": "Int", + "label": "Recommended IOPS", + "width": 0 + }, + { + "fieldname": "recommended_throughput", + "fieldtype": "Int", + "label": "Recommended Throughput", + "width": 0 + }, + { + "fieldname": "current_instance_type", + "fieldtype": "Data", + "label": "Current Instance Type", + "width": 0 + }, + { + "fieldname": "recommended_instance_type", + "fieldtype": "Data", + "label": "Recommended Instance Type", + "width": 0 + }, + { + "fieldname": "current_usage", + "fieldtype": "Data", + "label": "Current Usage", + "width": 0 + }, + { + "fieldname": "recommended_usage", + "fieldtype": "Data", + "label": "Recommended Usage", + "width": 0 + }, + { + "fieldname": "volume_id", + "fieldtype": "Data", + "label": "Volume ID", + "width": 0 + }, + { + "fieldname": "region", + "fieldtype": "Data", + "label": "Region", + "width": 0 + }, + { + "fieldname": "server_type", + "fieldtype": "Link", + "label": "Server Type", + "options": "PageType", + "width": 0 + }, + { + "fieldname": "server", + "fieldtype": "Dynamic Link", + "label": "Server", + "options": "server_type", + "width": 0 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team", + "width": 0 + }, + { + "fieldname": "public", + "fieldtype": "Check", + "label": "Public", + "width": 0 + }, + { + "fieldname": "currency", + "fieldtype": "Link", + "label": "Currency", + "options": "Currency", + "width": 0 + } + ], + "creation": "2024-09-10 15:22:38.545636", + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [ + { + "default": "Compute", + "fieldname": "resource_type", + "fieldtype": "Select", + "label": "Resource Type", + "mandatory": 0, + "options": "\nCompute\nStorage", + "wildcard_filter": 0 + }, + { + "default": "Rightsize", + "fieldname": "action_type", + "fieldtype": "Select", + "label": "Action Type", + "mandatory": 1, + "options": "Rightsize\nMigrate to Graviton", + "wildcard_filter": 0 + } + ], + "idx": 0, + "is_standard": "Yes", + "json": "", + "letterhead": null, + "modified": "2024-09-17 15:45:42.606684", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "AWS Rightsizing Recommendation", + "owner": "Administrator", + "prepared_report": 0, + "ref_pagetype": "Virtual Machine", + "report_name": "AWS Rightsizing Recommendation", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + } + ] +} \ No newline at end of file diff --git a/jcloud/jcloud/report/aws_rightsizing_recommendation/aws_rightsizing_recommendation.py b/jcloud/jcloud/report/aws_rightsizing_recommendation/aws_rightsizing_recommendation.py new file mode 100644 index 0000000..7565fe4 --- /dev/null +++ b/jcloud/jcloud/report/aws_rightsizing_recommendation/aws_rightsizing_recommendation.py @@ -0,0 +1,170 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +import json + +import boto3 +import jingrow +from jingrow.core.utils import find +from jingrow.utils import cint + + +def execute(filters=None): + jingrow.only_for("System Manager") + columns = jingrow.get_pg("Report", "AWS Rightsizing Recommendation").get_columns() + resource_type = filters.get("resource_type") + columns_to_remove = [] + if resource_type == "Compute": + columns_to_remove = [ + "volume_id", + "current_iops", + "recommended_iops", + "current_throughput", + "recommended_throughput", + ] + elif resource_type == "Storage": + columns_to_remove = ["current_instance_type", "recommended_instance_type"] + columns = [column for column in columns if column.fieldname not in columns_to_remove] + data = get_data(resource_type, filters.get("action_type")) + return columns, data + + +def get_data(resource_type, action_type): # noqa: C901 + settings = jingrow.get_single("Jcloud Settings") + client = boto3.client( + "cost-optimization-hub", + region_name="us-east-1", + aws_access_key_id=settings.aws_access_key_id, + aws_secret_access_key=settings.get_password("aws_secret_access_key"), + ) + + resource_types = { + "Compute": ["Ec2Instance"], + "Storage": ["EbsVolume"], + }.get(resource_type, ["Ec2Instance", "EbsVolume"]) + + action_types = { + "Rightsize": ["Rightsize"], + "Migrate to Graviton": ["MigrateToGraviton"], + }.get(action_type) + + paginator = client.get_paginator("list_recommendations") + response_iterator = paginator.paginate( + filter={ + "resourceTypes": resource_types, + "actionTypes": action_types, + }, + ) + + results = [] + for response in response_iterator: + for row in response["items"]: + resource_type = { + "Ec2Instance": "Virtual Machine", + "EbsVolume": "Virtual Machine Volume", + }[row["currentResourceType"]] + + if resource_type == "Virtual Machine": + virtual_machine = jingrow.get_all( + resource_type, {"instance_id": row["resourceId"]}, pluck="name" + ) + elif resource_type == "Virtual Machine Volume": + virtual_machine = jingrow.get_all( + resource_type, {"volume_id": row["resourceId"]}, pluck="parent" + ) + + if not virtual_machine: + # This resource is not managed by Jcloud. Ignore + continue + virtual_machine = virtual_machine[0] + + server_type = { + "f": "Server", + "m": "Database Server", + "n": "Proxy Server", + }[jingrow.db.get_value("Virtual Machine", virtual_machine, "series")] + + server = jingrow.db.get_value( + server_type, + {"virtual_machine": virtual_machine}, + ["name", "team", "public"], + as_dict=True, + ) + + if not server: + continue + + data = { + "resource_type": resource_type, + "virtual_machine": virtual_machine, + "server_type": server_type, + "server": server.name, + "team": server.team, + "public": server.public, + "region": row["region"], + "estimated_cost": row["estimatedMonthlyCost"], + "estimated_savings": row["estimatedMonthlySavings"], + "estimated_savings_percentage": row["estimatedSavingsPercentage"], + "current_usage": row["currentResourceSummary"], + "recommended_usage": row["recommendedResourceSummary"], + "currency": "USD", + } + + if resource_type == "Virtual Machine": + data["current_instance_type"] = row["currentResourceSummary"] + data["recommended_instance_type"] = row["recommendedResourceSummary"] + elif resource_type == "Virtual Machine Volume": + data["volume_id"] = row["resourceId"] + # Splits "99.0 GB Storage/3000.0 IOPS/125.0 MB/s Throughput" into + # ["99.0 GB Storage", "3000.0 IOPS", "125.0 MB", "/s Throughput"] + _, iops, throughput, _ = row["currentResourceSummary"].split("/") + data["current_iops"] = cint(iops.split()[0]) + data["current_throughput"] = cint(throughput.split()[0]) + + _, iops, throughput, _ = row["recommendedResourceSummary"].split("/") + data["recommended_iops"] = cint(iops.split()[0]) + data["recommended_throughput"] = cint(throughput.split()[0]) + + results.append(data) + results.sort(key=lambda x: x["estimated_savings"], reverse=True) + return results + + +@jingrow.whitelist() +def rightsize(filters): + filters = jingrow._dict(json.loads(filters)) + if filters.resource_type == "Storage": + jingrow.enqueue( + "jcloud.jcloud.report.aws_rightsizing_recommendation.aws_rightsizing_recommendation.rightsize_volumes", + filters=filters, + queue="long", + ) + + +def rightsize_volumes(filters): + for row in execute(filters)[1]: + row = jingrow._dict(row) + + machine = jingrow.get_pg("Virtual Machine", row.virtual_machine) + volume = find(machine.volumes, lambda v: v.volume_id == row.volume_id) + + if not volume: + # This volume is not managed by Jcloud. Ignore + continue + + # Always downgrade performance + iops = min(row.recommended_iops, volume.iops) + throughput = min(row.recommended_throughput, volume.throughput) + + # Already at recommended performance. Ignore + if volume.iops == iops and volume.throughput == throughput: + continue + + try: + machine.update_ebs_performance(volume.volume_id, iops, throughput) + machine.add_comment( + "Comment", + f"Rightsized EBS volume {volume.volume_id} from {volume.iops} IOPS and {volume.throughput} MB/s to {iops} IOPS and {throughput} MB/s", + ) + jingrow.db.commit() + except Exception: + jingrow.db.rollback() diff --git a/jcloud/jcloud/report/bench_memory_limits/__init__.py b/jcloud/jcloud/report/bench_memory_limits/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/bench_memory_limits/bench_memory_limits.js b/jcloud/jcloud/report/bench_memory_limits/bench_memory_limits.js new file mode 100644 index 0000000..24e6214 --- /dev/null +++ b/jcloud/jcloud/report/bench_memory_limits/bench_memory_limits.js @@ -0,0 +1,4 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +jingrow.query_reports['Bench Memory Limits'] = {}; diff --git a/jcloud/jcloud/report/bench_memory_limits/bench_memory_limits.json b/jcloud/jcloud/report/bench_memory_limits/bench_memory_limits.json new file mode 100644 index 0000000..80078ef --- /dev/null +++ b/jcloud/jcloud/report/bench_memory_limits/bench_memory_limits.json @@ -0,0 +1,47 @@ +{ + "add_total_row": 1, + "columns": [], + "creation": "2023-10-17 19:23:39.391050", + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [ + { + "fieldname": "server", + "fieldtype": "Link", + "label": "Server", + "mandatory": 1, + "options": "Server", + "wildcard_filter": 0 + } + ], + "idx": 0, + "is_standard": "Yes", + "letter_head": "", + "letterhead": null, + "modified": "2023-10-20 11:26:37.053554", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Bench Memory Limits", + "owner": "Administrator", + "prepared_report": 0, + "query": "\t\tbench_workloads = {}\n\t\tbenches = jingrow.get_all(\n\t\t\t\"Bench\",\n\t\t\tfilters={\"server\": self.name, \"status\": \"Active\", \"auto_scale_workers\": True},\n\t\t\tpluck=\"name\",\n\t\t)\n\t\tfor bench_name in benches:\n\t\t\tbench = jingrow.get_pg(\"Bench\", bench_name)\n\t\t\tbench_workloads[bench_name] = bench.work_load\n\n\t\ttotal_workload = sum(bench_workloads.values())\n", + "ref_pagetype": "Bench", + "report_name": "Bench Memory Limits", + "report_script": "", + "report_type": "Script Report", + "roles": [ + { + "role": "Jcloud Admin" + }, + { + "role": "System Manager" + }, + { + "role": "Jcloud Member" + }, + { + "role": "Site Manager" + } + ] +} \ No newline at end of file diff --git a/jcloud/jcloud/report/bench_memory_limits/bench_memory_limits.py b/jcloud/jcloud/report/bench_memory_limits/bench_memory_limits.py new file mode 100644 index 0000000..e86a8f7 --- /dev/null +++ b/jcloud/jcloud/report/bench_memory_limits/bench_memory_limits.py @@ -0,0 +1,119 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow + +from jcloud.api.server import prometheus_query + + +def execute(filters=None): + columns = [ + { + "fieldname": "bench", + "label": jingrow._("Bench"), + "fieldtype": "Link", + "options": "Bench", + "width": 200, + }, + { + "fieldname": "workload", + "label": jingrow._("Workload"), + "fieldtype": "Data", + "width": 200, + }, + { + "fieldname": "allocated_ram", + "label": jingrow._("Allocated RAM (based on current workers)"), + "fieldtype": "Float", + "width": 200, + }, + { + "fieldname": "5m_avg_server_ram", + "label": jingrow._("5m average RAM"), + "fieldtype": "Float", + "width": 200, + }, + { + "fieldname": "6h_avg_server_ram", + "label": jingrow._("6h average RAM"), + "fieldtype": "Float", + "width": 200, + }, + { + "fieldname": "max_server_ram", + "label": jingrow._("6h max RAM"), + "fieldtype": "Float", + "width": 200, + }, + ] + + return columns, get_data(filters) + + +def get_data(filters): + server_name = filters.get("server") + benches = jingrow.get_all( + "Bench", + filters={ + "server": server_name, + "status": "Active", + "auto_scale_workers": True, + }, + pluck="name", + ) + server = jingrow.get_pg("Server", server_name) + result = [] + for bench_name in benches: + bench = jingrow.get_pg("Bench", bench_name) + + gn, bg = bench.allocate_workers( + server.workload, server.max_gunicorn_workers, server.max_bg_workers + ) + result.append( + { + "bench": bench_name, + "workload": bench.workload, + "allocated_ram": gn * 150 + bg * (3 * 80), + } + ) + + prom_res = prometheus_query( + f'sum(avg_over_time(container_memory_rss{{instance="{server_name}", name=~".+"}}[5m])) by (name)', + lambda x: x, + "Asia/Kolkata", + 60, + 60, + )["datasets"] + for row in result: + for prom_row in prom_res: + if row["bench"] == prom_row["name"]["name"]: + row["5m_avg_server_ram"] = prom_row["values"][-1] / 1024 / 1024 + break + + prom_res = prometheus_query( + f'sum(avg_over_time(container_memory_rss{{instance="{server_name}", name=~".+"}}[6h])) by (name)', + lambda x: x, + "Asia/Kolkata", + 6 * 3600, + 60, + )["datasets"] + for row in result: + for prom_row in prom_res: + if row["bench"] == prom_row["name"]["name"]: + row["6h_avg_server_ram"] = prom_row["values"][-1] / 1024 / 1024 + break + + prom_res = prometheus_query( + f'sum(max_over_time(container_memory_rss{{instance="{server_name}", name=~".+"}}[6h])) by (name)', + lambda x: x, + "Asia/Kolkata", + 6 * 3600, + 60, + )["datasets"] + for row in result: + for prom_row in prom_res: + if row["bench"] == prom_row["name"]["name"]: + row["max_server_ram"] = prom_row["values"][-1] / 1024 / 1024 + break + + return result diff --git a/jcloud/jcloud/report/binary_log_browser/__init__.py b/jcloud/jcloud/report/binary_log_browser/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/binary_log_browser/binary_log_browser.js b/jcloud/jcloud/report/binary_log_browser/binary_log_browser.js new file mode 100644 index 0000000..85243e5 --- /dev/null +++ b/jcloud/jcloud/report/binary_log_browser/binary_log_browser.js @@ -0,0 +1,45 @@ +// Copyright (c) 2016, JINGROW +// For license information, please see license.txt +/* eslint-disable */ + +jingrow.query_reports['Binary Log Browser'] = { + filters: [ + { + fieldname: 'site', + label: __('Site'), + fieldtype: 'Link', + options: 'Site', + reqd: 1, + }, + { + fieldname: 'start_datetime', + label: __('Start From'), + fieldtype: 'Datetime', + reqd: 1, + }, + { + fieldname: 'stop_datetime', + label: __('End At'), + fieldtype: 'Datetime', + reqd: 1, + }, + { + fieldname: 'pattern', + label: __('Search Pattern'), + fieldtype: 'Data', + default: '.*', + reqd: 1, + }, + { + fieldname: 'max_lines', + label: __('Max Lines'), + fieldtype: 'Int', + default: 4000, + }, + { + fieldname: 'format_queries', + label: __('Format Queries'), + fieldtype: 'Check', + }, + ], +}; diff --git a/jcloud/jcloud/report/binary_log_browser/binary_log_browser.json b/jcloud/jcloud/report/binary_log_browser/binary_log_browser.json new file mode 100644 index 0000000..be4d1d6 --- /dev/null +++ b/jcloud/jcloud/report/binary_log_browser/binary_log_browser.json @@ -0,0 +1,29 @@ +{ + "add_total_row": 0, + "columns": [], + "creation": "2021-10-13 16:36:50.690799", + "disable_prepared_report": 0, + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [], + "idx": 0, + "is_standard": "Yes", + "modified": "2022-11-08 17:10:27.848613", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Binary Log Browser", + "owner": "Administrator", + "prepared_report": 0, + "ref_pagetype": "Site", + "report_name": "Binary Log Browser", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + }, + { + "role": "Site Manager" + } + ] +} \ No newline at end of file diff --git a/jcloud/jcloud/report/binary_log_browser/binary_log_browser.py b/jcloud/jcloud/report/binary_log_browser/binary_log_browser.py new file mode 100644 index 0000000..5432580 --- /dev/null +++ b/jcloud/jcloud/report/binary_log_browser/binary_log_browser.py @@ -0,0 +1,111 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +import pytz +import sqlparse +from jingrow.core.pagetype.access_log.access_log import make_access_log +from jingrow.utils import ( + get_datetime, + get_datetime_str, + get_system_timezone, +) + +from jcloud.agent import Agent + +try: + from jingrow.utils import convert_utc_to_user_timezone +except ImportError: + from jingrow.utils import convert_utc_to_system_timezone as convert_utc_to_user_timezone + + +def execute(filters=None): + jingrow.only_for(["System Manager", "Site Manager"]) + filters.database = jingrow.get_pg("Site", filters.site).fetch_info()["config"]["db_name"] + + make_access_log( + pagetype="Site", + document=filters.site, + file_type="Binary Log", + report_name="Binary Log Browser", + filters=filters, + ) + + data = get_data(filters) + + columns = [ + { + "fieldname": "timestamp", + "label": jingrow._("Timestamp"), + "fieldtype": "Datetime", + "width": 160, + }, + { + "fieldname": "query", + "label": jingrow._("Query"), + "fieldtype": "Data", + "width": 1200, + }, + ] + return columns, data + + +def get_data(filters): + server = jingrow.db.get_value("Site", filters.site, "server") + database_server = jingrow.db.get_value("Server", server, "database_server") + agent = Agent(database_server, "Database Server") + + data = { + "database": filters.database, + "start_datetime": convert_user_timezone_to_utc(filters.start_datetime), + "stop_datetime": convert_user_timezone_to_utc(filters.stop_datetime), + "search_pattern": filters.pattern, + "max_lines": filters.max_lines or 4000, + } + + files = agent.get("database/binary/logs") + + files_in_timespan = get_files_in_timespan(files, data["start_datetime"], data["stop_datetime"]) + + results = [] + for file in files_in_timespan: + rows = agent.post(f"database/binary/logs/{file}", data=data) + for row in rows or []: + if filters.format_queries: + row["query"] = sqlparse.format(row["query"].strip(), keyword_case="upper", reindent=True) + row["timestamp"] = get_datetime_str(convert_utc_to_user_timezone(get_datetime(row["timestamp"]))) + results.append(row) + + if len(results) > data["max_lines"]: + return results + + return results + + +def get_files_in_timespan(files: list[dict[str, str]], start: str, stop: str) -> list[str]: + files.sort(key=lambda f: f["modified"]) + + files_in_timespan = [] + + for file in files: + if file["modified"] > stop: + # This is last file that captures timespan + # Include it and dont process any further. + files_in_timespan.append(file["name"]) + break + + if start > file["modified"]: + # Modified timestamp is *usually* last time when log file was touched, + # i.e. last query logged on file + continue + + files_in_timespan.append(file["name"]) + + return files_in_timespan + + +def convert_user_timezone_to_utc(datetime): + timezone = pytz.timezone(get_system_timezone()) + datetime = get_datetime(datetime) + return get_datetime_str(timezone.localize(datetime).astimezone(pytz.utc)) diff --git a/jcloud/jcloud/report/mariadb_deadlock_browser/__init__.py b/jcloud/jcloud/report/mariadb_deadlock_browser/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/mariadb_deadlock_browser/mariadb_deadlock_browser.js b/jcloud/jcloud/report/mariadb_deadlock_browser/mariadb_deadlock_browser.js new file mode 100644 index 0000000..f30393f --- /dev/null +++ b/jcloud/jcloud/report/mariadb_deadlock_browser/mariadb_deadlock_browser.js @@ -0,0 +1,33 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt +/* eslint-disable */ + +jingrow.query_reports['MariaDB Deadlock Browser'] = { + filters: [ + { + fieldname: 'site', + label: __('Site'), + fieldtype: 'Link', + options: 'Site', + reqd: 1, + }, + { + fieldname: 'start_datetime', + label: __('Start From'), + fieldtype: 'Datetime', + reqd: 1, + }, + { + fieldname: 'stop_datetime', + label: __('End At'), + fieldtype: 'Datetime', + reqd: 1, + }, + { + fieldname: 'max_log_size', + label: __('Max Log Size'), + fieldtype: 'Int', + default: 500, + }, + ], +}; diff --git a/jcloud/jcloud/report/mariadb_deadlock_browser/mariadb_deadlock_browser.json b/jcloud/jcloud/report/mariadb_deadlock_browser/mariadb_deadlock_browser.json new file mode 100644 index 0000000..99245c5 --- /dev/null +++ b/jcloud/jcloud/report/mariadb_deadlock_browser/mariadb_deadlock_browser.json @@ -0,0 +1,28 @@ +{ + "add_total_row": 0, + "columns": [], + "creation": "2023-05-18 21:37:22.195557", + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [], + "idx": 0, + "is_standard": "Yes", + "modified": "2023-05-18 21:38:03.790092", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "MariaDB Deadlock Browser", + "owner": "Administrator", + "prepared_report": 0, + "ref_pagetype": "Site", + "report_name": "MariaDB Deadlock Browser", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + }, + { + "role": "Site Manager" + } + ] +} diff --git a/jcloud/jcloud/report/mariadb_deadlock_browser/mariadb_deadlock_browser.py b/jcloud/jcloud/report/mariadb_deadlock_browser/mariadb_deadlock_browser.py new file mode 100644 index 0000000..b2b3a0e --- /dev/null +++ b/jcloud/jcloud/report/mariadb_deadlock_browser/mariadb_deadlock_browser.py @@ -0,0 +1,296 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import contextlib +import re +from typing import TYPE_CHECKING + +import jingrow +from elasticsearch import Elasticsearch +from jingrow.core.pagetype.access_log.access_log import make_access_log +from jingrow.utils import get_datetime +from jingrow.utils.password import get_decrypted_password + +if TYPE_CHECKING: + from datetime import datetime + + +def fetch_mariadb_error_logs( + site: str, start_datetime: datetime, end_datetime: datetime, log_size: int +) -> list[tuple[str, str]]: + server = jingrow.get_value("Site", site, "server") + database_server = jingrow.get_value("Server", server, "database_server") + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if not log_server: + return [] + + query = { + "bool": { + "filter": [ + { + "bool": { + "filter": [ + { + "bool": { + "minimum_should_match": 1, + "should": [{"term": {"host.name": {"value": database_server}}}], + } + }, + { + "bool": { + "minimum_should_match": 1, + "should": [{"term": {"event.dataset": {"value": "mysql.error"}}}], + } + }, + { + "bool": { + "minimum_should_match": 1, + "should": [{"term": {"log.level": {"value": "Note"}}}], + } + }, + { + "bool": { + "minimum_should_match": 1, + "should": [{"match_phrase": {"message": "InnoDB:"}}], + } + }, + ] + } + }, + { + "range": { + "@timestamp": { + "gte": int(start_datetime.timestamp() * 1000), + "lte": int(end_datetime.timestamp() * 1000), + } + } + }, + ], + "must": [], + "must_not": [], + "should": [], + } + } + + url = f"https://{log_server}/elasticsearch/" + password = get_decrypted_password("Log Server", log_server, "kibana_password") + client = Elasticsearch(url, basic_auth=("jingrow", password)) + + data = client.search( + size=log_size, + index="filebeat-*", + query=query, + ) + + if not data: + return [] + + # prepare logs + log_map = {} + log_timestamp = {} + + for record in data.get("hits", {}).get("hits", []): + if record["_source"]["mysql"] and record["_source"]["mysql"]["thread_id"]: + thread_id = record["_source"]["mysql"]["thread_id"] + if thread_id not in log_map: + log_map[thread_id] = [] + log_timestamp[thread_id] = record["_source"]["@timestamp"] + # Strip `InnoDB: ` -> 8 characters + log_map[thread_id].append((record["_source"]["log"]["offset"], record["_source"]["message"][8:])) + + # merge logs + logs = [] # list of tuples (timestamp, log) + + for thread_id in log_map: + # sort in order of offset + records = sorted(log_map[thread_id], key=lambda x: x[0]) + records = [x[1] for x in records] + logs.append((log_timestamp[thread_id], "".join(records))) + + return logs + + +# Regex for parsing database logs +# *** (1) TRANSACTION: +transaction_pattern = re.compile(r"^\*\*\* \(\d+\) TRANSACTION:") +# TRANSACTION 988653582, ACTIVE 6 sec starting index read +transaction_id_pattern = re.compile(r"TRANSACTION (\d+),") +query_pattern = re.compile(r"MariaDB thread id .*\n([\s\S]*)\*\*\* WAITING FOR THIS LOCK TO BE GRANTED") +actual_transaction_pattern = re.compile(r"\*\*\* WAITING FOR THIS LOCK TO BE GRANTED:\nRECORD LOCKS (.*)\n") +conflicted_transaction_pattern = re.compile(r"\*\*\* CONFLICTING WITH:\nRECORD LOCKS (.*)\n") +trx_id_pattern = re.compile(r"trx id (\d+)") +db_table_pattern = re.compile(r"table `([^`]+)`.`([^`]+)`") + + +class DatabaseTransactionLog: + @staticmethod + def parse(data: str, database: str): + transaction_info = actual_transaction_pattern.search(data).group(1) + found_database = db_table_pattern.search(transaction_info).group(1) + if database != found_database: + return None + + return DatabaseTransactionLog(data) + + def __init__(self, data: str): + self.transaction_id = transaction_id_pattern.search(data).group(1) + actual_transaction_info = actual_transaction_pattern.search(data).group(1) + db_table_info = db_table_pattern.search(actual_transaction_info) + self.database = db_table_info.group(1) + self.table = db_table_info.group(2) + self.query = query_pattern.search(data).group(1) + + conflicted_transaction_info = conflicted_transaction_pattern.search(data).group(1) + self.conflicted_transaction_id = trx_id_pattern.search(conflicted_transaction_info).group(1) + conflicted_db_table = db_table_pattern.search(conflicted_transaction_info) + self.conflicted_table = conflicted_db_table.group(2) + + +def parse_log(log: str, database: str) -> list[DatabaseTransactionLog]: + log_lines = log.split("\n") + log_lines = [line.strip() for line in log_lines] + log_lines = [line for line in log_lines if line != ""] + transactions_content = [] + + started_transaction_index = None + for index, line in enumerate(log_lines): + if transaction_pattern.match(line): + if started_transaction_index is not None: + transactions_content.append("\n".join(log_lines[started_transaction_index:index])) + started_transaction_index = index + + if started_transaction_index is not None: + transactions_content.append("\n".join(log_lines[started_transaction_index:])) + + transactions = [] + for transaction_content in transactions_content: + with contextlib.suppress(Exception): + trx = DatabaseTransactionLog.parse(transaction_content, database) + if trx is not None: + transactions.append(trx) + + return transactions + + +def deadlock_summary(transactions: list[DatabaseTransactionLog]) -> list[dict]: + transaction_map: dict[str, DatabaseTransactionLog] = {} + for transaction in transactions: + transaction_map[transaction.transaction_id] = transaction + + deadlock_transaction_ids = {} + + for transaction in transactions: + # usually if there is a deadlock, there will be two records + # one record for deadlock of query A due to query B + # and another record for deadlock of query B due to query A + # so, we want to record only one instance of deadlock + if ( + transaction.conflicted_transaction_id + and ( + transaction.conflicted_transaction_id not in deadlock_transaction_ids + or deadlock_transaction_ids[transaction.conflicted_transaction_id] + != transaction.transaction_id + ) + and transaction.transaction_id != transaction.conflicted_transaction_id + ): + deadlock_transaction_ids[transaction.transaction_id] = transaction.conflicted_transaction_id + + deadlock_infos = [] + for transaction_id in deadlock_transaction_ids: + if transaction_id not in transaction_map: + continue + if transaction.conflicted_transaction_id not in transaction_map: + continue + transaction = transaction_map[transaction_id] + conflicted_transaction = transaction_map[transaction.conflicted_transaction_id] + deadlock_infos.append( + { + "txn_id": transaction.transaction_id, + "table": transaction.table, + "conflicted_txn_id": transaction.conflicted_transaction_id, + "conflicted_table": transaction.conflicted_table, + "query": transaction.query, + "conflicted_query": conflicted_transaction.query, + } + ) + return deadlock_infos + + +# Report +COLUMNS = [ + { + "fieldname": "timestamp", + "label": "Timestamp", + "fieldtype": "Datetime", + "width": 160, + }, + { + "fieldname": "table", + "label": "Table", + "fieldtype": "Data", + "width": 180, + }, + { + "fieldname": "transaction_id", + "label": "Transaction", + "fieldtype": "Data", + "width": 120, + }, + { + "fieldname": "query", + "label": "Query", + "fieldtype": "Data", + "width": 1400, + }, +] + + +def execute(filters=None): + jingrow.only_for(["System Manager", "Site Manager", "Jcloud Admin", "Jcloud Member"]) + filters.database = jingrow.db.get_value("Site", filters.site, "database_name") + if not filters.database: + jingrow.throw( + f"Database name not found for site {filters.site}\nRun `Sync Info` from Site pagetype actions to set the database name.\nThen retry again." + ) + + make_access_log( + pagetype="Site", + document=filters.site, + file_type="MariaDB Deadlock Browser", + report_name="MariaDB Deadlock Browser", + filters=filters, + ) + records = fetch_mariadb_error_logs( + filters.site, + get_datetime(filters.start_datetime), + get_datetime(filters.stop_datetime), + filters.max_log_size, + ) + data = [] + + for record in records: + timestamp = record[0] + transactions = parse_log(record[1], filters.database) + summaries = deadlock_summary(transactions) + for summary in summaries: + data.append( + { + "timestamp": timestamp, + "table": summary["table"], + "transaction_id": summary["txn_id"], + "query": summary["query"], + } + ) + data.append( + { + "timestamp": "", + "table": summary["conflicted_table"], + "transaction_id": summary["conflicted_txn_id"], + "query": summary["conflicted_query"], + } + ) + data.append({}) # empty line to separate records + + return COLUMNS, data diff --git a/jcloud/jcloud/report/mariadb_locks_list/__init__.py b/jcloud/jcloud/report/mariadb_locks_list/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/mariadb_locks_list/mariadb_locks_list.js b/jcloud/jcloud/report/mariadb_locks_list/mariadb_locks_list.js new file mode 100644 index 0000000..6dc971d --- /dev/null +++ b/jcloud/jcloud/report/mariadb_locks_list/mariadb_locks_list.js @@ -0,0 +1,21 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +jingrow.query_reports['MariaDB Locks List'] = { + after_refresh(report) { + let should_poll = report.get_filter_value('poll'); + if (!should_poll || report.polling_interval) return; + + jingrow.toast( + 'This report will be auto-refreshed every 5 seconds till we find a lock wait.', + ); + + report.polling_interval = setInterval(() => { + if (!report.data.length) { + report.refresh(); + } else { + clearInterval(report.polling_interval); + } + }, 5000); + }, +}; diff --git a/jcloud/jcloud/report/mariadb_locks_list/mariadb_locks_list.json b/jcloud/jcloud/report/mariadb_locks_list/mariadb_locks_list.json new file mode 100644 index 0000000..d33736c --- /dev/null +++ b/jcloud/jcloud/report/mariadb_locks_list/mariadb_locks_list.json @@ -0,0 +1,43 @@ +{ + "add_total_row": 0, + "columns": [], + "creation": "2024-03-24 11:50:26.510957", + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [ + { + "fieldname": "database_server", + "fieldtype": "Link", + "label": "Database Server", + "mandatory": 1, + "options": "Database Server", + "wildcard_filter": 0 + }, + { + "default": "0", + "fieldname": "poll", + "fieldtype": "Check", + "label": "Poll every 5 seconds", + "mandatory": 0, + "wildcard_filter": 0 + } + ], + "idx": 0, + "is_standard": "Yes", + "letterhead": null, + "modified": "2024-03-24 12:46:56.826427", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "MariaDB Locks List", + "owner": "Administrator", + "prepared_report": 0, + "ref_pagetype": "Site", + "report_name": "MariaDB Locks List", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + } + ] +} \ No newline at end of file diff --git a/jcloud/jcloud/report/mariadb_locks_list/mariadb_locks_list.py b/jcloud/jcloud/report/mariadb_locks_list/mariadb_locks_list.py new file mode 100644 index 0000000..21633e0 --- /dev/null +++ b/jcloud/jcloud/report/mariadb_locks_list/mariadb_locks_list.py @@ -0,0 +1,98 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow + +from jcloud.agent import Agent + + +def execute(filters=None): + jingrow.only_for(("System Manager", "Support Team")) + data = get_data(filters) + return get_columns(), data + + +def get_data(filters): + server = jingrow.get_pg("Database Server", filters.database_server) + agent = Agent(server.name, "Database Server") + + data = { + "private_ip": server.private_ip, + "mariadb_root_password": server.get_password("mariadb_root_password"), + } + return agent.post("database/locks", data=data) + + +def get_columns(): + return [ + { + "fieldname": "lock_id", + "label": "Lock ID", + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "trx_id", + "label": "Transaction ID", + "fieldtype": "Data", + "width": 70, + }, + { + "fieldname": "trx_query", + "label": "Query", + "fieldtype": "Data", + "width": 500, + }, + { + "fieldname": "lock_mode", + "label": "Lock Mode", + "fieldtype": "Data", + "width": 70, + }, + { + "fieldname": "lock_type", + "label": "Lock Type", + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "lock_table", + "label": "Lock Table", + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "lock_index", + "label": "Lock Index", + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "trx_state", + "label": "Transaction State", + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "trx_operation_state", + "label": "Transaction Operation State", + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "trx_started", + "label": "Transaction Started At", + "fieldtype": "Data", # Avoid timezones, we only need to compare two txn + "width": 150, + }, + { + "fieldname": "trx_rows_locked", + "label": "Rows Locked", + "fieldtype": "Int", + }, + { + "fieldname": "trx_rows_modified", + "label": "Rows Modified", + "fieldtype": "Int", + }, + ] diff --git a/jcloud/jcloud/report/mariadb_process_list/__init__.py b/jcloud/jcloud/report/mariadb_process_list/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/mariadb_process_list/mariadb_process_list.js b/jcloud/jcloud/report/mariadb_process_list/mariadb_process_list.js new file mode 100644 index 0000000..3cfa36e --- /dev/null +++ b/jcloud/jcloud/report/mariadb_process_list/mariadb_process_list.js @@ -0,0 +1,38 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt +/* eslint-disable */ + +jingrow.query_reports['MariaDB Process List'] = { + onload: function (report) { + report.page.add_button(__('Kill Processes'), () => { + const dialog = new jingrow.ui.Dialog({ + title: __('Kill Processes'), + fields: [ + { + fieldtype: 'Int', + default: 120, + label: __('Kill Processes Running Longer Than (Seconds)'), + fieldname: 'kill_threshold', + }, + ], + }); + + dialog.set_primary_action(__('Kill Processes'), (args) => { + jingrow + .call( + 'jcloud.jcloud.report.mariadb_process_list.mariadb_process_list.kill', + { + database_server: + jingrow.query_report.get_filter_value('database_server'), + kill_threshold: args.kill_threshold, + }, + ) + .then((r) => { + dialog.hide(); + jingrow.query_report.refresh(); + }); + }); + dialog.show(); + }); + }, +}; diff --git a/jcloud/jcloud/report/mariadb_process_list/mariadb_process_list.json b/jcloud/jcloud/report/mariadb_process_list/mariadb_process_list.json new file mode 100644 index 0000000..86a7cf0 --- /dev/null +++ b/jcloud/jcloud/report/mariadb_process_list/mariadb_process_list.json @@ -0,0 +1,38 @@ +{ + "add_total_row": 0, + "columns": [], + "creation": "2022-04-27 21:05:57.049047", + "disable_prepared_report": 0, + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [ + { + "fieldname": "database_server", + "fieldtype": "Link", + "label": "Database Server", + "mandatory": 1, + "options": "Database Server", + "wildcard_filter": 0 + } + ], + "idx": 0, + "is_standard": "Yes", + "modified": "2022-11-08 17:10:07.815004", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "MariaDB Process List", + "owner": "Administrator", + "prepared_report": 0, + "ref_pagetype": "Site", + "report_name": "MariaDB Process List", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + }, + { + "role": "Site Manager" + } + ] +} \ No newline at end of file diff --git a/jcloud/jcloud/report/mariadb_process_list/mariadb_process_list.py b/jcloud/jcloud/report/mariadb_process_list/mariadb_process_list.py new file mode 100644 index 0000000..aff7e25 --- /dev/null +++ b/jcloud/jcloud/report/mariadb_process_list/mariadb_process_list.py @@ -0,0 +1,103 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +import sqlparse +from jingrow.utils import cint + +from jcloud.agent import Agent + + +def execute(filters=None): + jingrow.only_for(["System Manager", "Site Manager"]) + + columns = [ + { + "fieldname": "Id", + "label": jingrow._("ID"), + "fieldtype": "Int", + "width": 70, + }, + { + "fieldname": "User", + "label": jingrow._("User"), + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "Host", + "label": jingrow._("Host"), + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "db", + "label": jingrow._("Database"), + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "Command", + "label": jingrow._("Command"), + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "Time", + "label": jingrow._("Time"), + "fieldtype": "Int", + "width": 70, + }, + { + "fieldname": "State", + "label": jingrow._("State"), + "fieldtype": "Data", + "width": 150, + }, + { + "fieldname": "Info", + "label": jingrow._("Info"), + "fieldtype": "Data", + "width": 400, + }, + { + "fieldname": "Progress", + "label": jingrow._("Progress"), + "fieldtype": "Float", + "width": 80, + }, + ] + + data = get_data(filters) + return columns, data + + +def get_data(filters): + server = jingrow.get_pg("Database Server", filters.database_server) + agent = Agent(server.name, "Database Server") + + data = { + "private_ip": server.private_ip, + "mariadb_root_password": server.get_password("mariadb_root_password"), + } + rows = agent.post("database/processes", data=data) + + for row in rows: + row["Info"] = sqlparse.format( + (row["Info"] or "").strip(), keyword_case="upper", reindent=True + ) + return rows + + +@jingrow.whitelist() +def kill(database_server, kill_threshold): + jingrow.only_for("System Manager") + server = jingrow.get_pg("Database Server", database_server) + agent = Agent(server.name, "Database Server") + + data = { + "private_ip": server.private_ip, + "mariadb_root_password": server.get_password("mariadb_root_password"), + "kill_threshold": cint(kill_threshold), + } + agent.post("database/processes/kill", data=data) diff --git a/jcloud/jcloud/report/mariadb_slow_queries/__init__.py b/jcloud/jcloud/report/mariadb_slow_queries/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/mariadb_slow_queries/mariadb_slow_queries.js b/jcloud/jcloud/report/mariadb_slow_queries/mariadb_slow_queries.js new file mode 100644 index 0000000..725cce8 --- /dev/null +++ b/jcloud/jcloud/report/mariadb_slow_queries/mariadb_slow_queries.js @@ -0,0 +1,82 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt +/* eslint-disable */ + +jingrow.query_reports['MariaDB Slow Queries'] = { + filters: [ + { + fieldname: 'site', + label: __('Site'), + fieldtype: 'Link', + options: 'Site', + reqd: 1, + get_query: function () { + return { + filters: { status: ["!=", "Archived"] }, + }; + }, + }, + { + fieldname: 'start_datetime', + label: __('Start From'), + fieldtype: 'Datetime', + default: jingrow.datetime.add_days(jingrow.datetime.now_datetime(), -1), + reqd: 1, + }, + { + fieldname: 'stop_datetime', + label: __('End At'), + fieldtype: 'Datetime', + default: jingrow.datetime.now_datetime(), + reqd: 1, + }, + { + fieldname: 'normalize_queries', + label: __('Normalize Queries'), + fieldtype: 'Check', + }, + { + fieldname: 'max_lines', + label: __('Max Lines'), + default: 1000, + fieldtype: 'Int', + }, + { + fieldname: 'search_pattern', + label: __('Search Pattern'), + fieldtype: 'Data', + default: '.*', + }, + ], + get_datatable_options(options) { + return Object.assign(options, { + checkboxColumn: true, + }); + }, + + onload(report) { + report.page.add_inner_button(__('Add Selected Indexes'), () => { + let site = report.get_values().site; + let checked_rows = + jingrow.query_report.datatable.rowmanager.getCheckedRows(); + let indexes = checked_rows + .map((i) => jingrow.query_report.data[i]) + .map((row) => row.suggested_index) + .filter(Boolean); + + if (!indexes.length) { + jingrow.throw(__('Please select rows to create indexes')); + } + + jingrow.confirm('Are you sure you want to add these indexes?', () => { + jingrow.xcall( + 'jcloud.jcloud.report.mariadb_slow_queries.mariadb_slow_queries.add_suggested_index', + { + indexes, + name: site, + }, + ); + }); + }); + }, +}; diff --git a/jcloud/jcloud/report/mariadb_slow_queries/mariadb_slow_queries.json b/jcloud/jcloud/report/mariadb_slow_queries/mariadb_slow_queries.json new file mode 100644 index 0000000..7938f54 --- /dev/null +++ b/jcloud/jcloud/report/mariadb_slow_queries/mariadb_slow_queries.json @@ -0,0 +1,30 @@ +{ + "add_total_row": 0, + "columns": [], + "creation": "2024-12-23 11:36:40.301426", + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [], + "idx": 0, + "is_standard": "Yes", + "letterhead": null, + "modified": "2024-12-23 11:36:40.301426", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "MariaDB Slow Queries", + "owner": "Administrator", + "prepared_report": 0, + "ref_pagetype": "Site", + "report_name": "MariaDB Slow Queries", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + }, + { + "role": "Site Manager" + } + ], + "timeout": 0 +} \ No newline at end of file diff --git a/jcloud/jcloud/report/mariadb_slow_queries/mariadb_slow_queries.py b/jcloud/jcloud/report/mariadb_slow_queries/mariadb_slow_queries.py new file mode 100644 index 0000000..3d24caf --- /dev/null +++ b/jcloud/jcloud/report/mariadb_slow_queries/mariadb_slow_queries.py @@ -0,0 +1,192 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import re +from collections import defaultdict + +import jingrow +import requests +import sqlparse +from jingrow.core.pagetype.access_log.access_log import make_access_log +from jingrow.utils import convert_utc_to_timezone, get_system_timezone +from jingrow.utils.password import get_decrypted_password + + +def execute(filters=None): + jingrow.only_for(["System Manager", "Site Manager", "Jcloud Admin", "Jcloud Member"]) + filters.database = jingrow.db.get_value("Site", filters.site, "database_name") + + make_access_log( + pagetype="Site", + document=filters.site, + file_type="MariaDB Slow Query", + report_name="MariaDB Slow Queries", + filters=filters, + ) + + columns = [ + { + "fieldname": "timestamp", + "label": jingrow._("Timestamp"), + "fieldtype": "Datetime", + "width": 160, + }, + { + "fieldname": "query", + "label": jingrow._("Query"), + "fieldtype": "Data", + "width": 1200, + }, + { + "fieldname": "duration", + "label": jingrow._("Duration"), + "fieldtype": "Float", + "width": 140, + }, + { + "fieldname": "rows_examined", + "label": jingrow._("Rows Examined"), + "fieldtype": "Int", + "width": 140, + }, + { + "fieldname": "rows_sent", + "label": jingrow._("Rows Sent"), + "fieldtype": "Int", + "width": 140, + }, + ] + + if filters.normalize_queries: + columns = [c for c in columns if c["fieldname"] not in ("timestamp",)] + columns.append( + { + "fieldname": "count", + "label": jingrow._("Count"), + "fieldtype": "Int", + }, + ) + columns.append( + { + "fieldname": "example", + "label": jingrow._("Example Query"), + "fieldtype": "Data", + "width": 1200, + }, + ) + + data = get_data(filters) + return columns, data + + +def get_data(filters): + from jcloud.utils import convert_user_timezone_to_utc + + rows = get_slow_query_logs( + filters.database, + convert_user_timezone_to_utc(filters.start_datetime), + convert_user_timezone_to_utc(filters.stop_datetime), + filters.search_pattern, + int(filters.max_lines) or 100, + ) + for row in rows: + row["timestamp"] = convert_utc_to_timezone( + jingrow.utils.get_datetime(row["timestamp"]).replace(tzinfo=None), + get_system_timezone(), + ) + + # Filter out queries starting with `SET` + dql_stmt = ("select", "update", "delete", "insert") + rows = [x for x in rows if x["query"].lower().lstrip().startswith(dql_stmt)] + + if filters.normalize_queries: + rows = summarize_by_query(rows) + + return rows + + +def get_slow_query_logs(database, start_datetime, end_datetime, search_pattern, size): + log_server = jingrow.db.get_single_value("Jcloud Settings", "log_server") + if not log_server: + return [] + + url = f"https://{log_server}/elasticsearch/filebeat-*/_search" + password = get_decrypted_password("Log Server", log_server, "kibana_password") + + query = { + "query": { + "bool": { + "filter": [ + {"exists": {"field": "mysql.slowlog.query"}}, + {"match_phrase": {"mysql.slowlog.schema": database}}, + {"range": {"@timestamp": {"gt": start_datetime, "lte": end_datetime}}}, + ], + } + }, + "size": size, + } + + if search_pattern and search_pattern != ".*": + query["query"]["bool"]["filter"].append({"regexp": {"mysql.slowlog.query": search_pattern}}) + + response = requests.post(url, json=query, auth=("jingrow", password)).json() + + out = [] + for d in response["hits"]["hits"]: + data = d["_source"]["mysql"]["slowlog"] + data["timestamp"] = d["_source"]["@timestamp"] + data["duration"] = d["_source"].get("event", {}).get("duration", 0) / 1e9 + out.append(data) + return out + + +def normalize_query(query: str) -> str: + q = sqlparse.parse(query)[0] + for token in q.flatten(): + token_type = str(token.ttype) + if "Token.Literal" in token_type or token_type == "Token.Keyword.Order": + token.value = "?" + + # Format query consistently so identical queries can be matched + q = format_query(q, strip_comments=True) + + # Transform IN parts like this: IN (?, ?, ?) -> IN (?) + return re.sub(r" IN \(\?[\s\n\?\,]*\)", " IN (?)", q, flags=re.IGNORECASE) + + +def format_query(q, strip_comments=False): + return sqlparse.format( + str(q).strip(), + keyword_case="upper", + reindent=True, + strip_comments=strip_comments, + ) + + +def summarize_by_query(data): + queries = defaultdict(lambda: defaultdict(float)) + for row in data: + query = row["query"] + if "SQL_NO_CACHE" in query and "WHERE" not in query: + # These are mysqldump queries, there's no real way to optimize these, it's just dumping entire table. + continue + + normalized_query = normalize_query(query) + entry = queries[normalized_query] + entry["count"] += 1 + entry["query"] = normalized_query + entry["duration"] += row["duration"] + entry["rows_examined"] += row["rows_examined"] + entry["rows_sent"] += row["rows_sent"] + entry["example"] = query + + result = list(queries.values()) + result.sort(key=lambda r: r["duration"] * r["count"], reverse=True) + + return result + + +def get_pagetype_name(table_name: str) -> str: + return table_name.removeprefix("tab") diff --git a/jcloud/jcloud/report/marketplace_app_repository_visibility/__init__.py b/jcloud/jcloud/report/marketplace_app_repository_visibility/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/marketplace_app_repository_visibility/marketplace_app_repository_visibility.js b/jcloud/jcloud/report/marketplace_app_repository_visibility/marketplace_app_repository_visibility.js new file mode 100644 index 0000000..ea74535 --- /dev/null +++ b/jcloud/jcloud/report/marketplace_app_repository_visibility/marketplace_app_repository_visibility.js @@ -0,0 +1,19 @@ +// Copyright (c) 2025, JINGROW +// For license information, please see license.txt + +jingrow.query_reports['Marketplace App Repository Visibility'] = { + filters: [], + onload: async function (report) { + report.page.add_inner_button(__('Send Email to Developers'), () => { + jingrow.confirm('Are you sure you want to send out the e-mails?', () => { + jingrow.xcall( + 'jcloud.jcloud.report.marketplace_app_repository_visibility.marketplace_app_repository_visibility.send_emails', + { + columns: JSON.stringify(report.columns), + data: JSON.stringify(report.data), + }, + ); + }); + }); + }, +}; diff --git a/jcloud/jcloud/report/marketplace_app_repository_visibility/marketplace_app_repository_visibility.json b/jcloud/jcloud/report/marketplace_app_repository_visibility/marketplace_app_repository_visibility.json new file mode 100644 index 0000000..5b7bdc7 --- /dev/null +++ b/jcloud/jcloud/report/marketplace_app_repository_visibility/marketplace_app_repository_visibility.json @@ -0,0 +1,31 @@ +{ + "add_total_row": 0, + "columns": [], + "creation": "2025-01-21 17:35:11.471086", + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [], + "idx": 0, + "is_standard": "Yes", + "letterhead": null, + "modified": "2025-01-21 20:53:27.388232", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Marketplace App Repository Visibility", + "owner": "Administrator", + "prepared_report": 1, + "query": "SELECT \n ma.name AS app_name,\n mav.version AS version,\n mav.source AS source,\n asrc.repository_url AS repository_url,\n asrc.branch AS branch\nFROM \n `tabMarketplace App` ma\nJOIN \n `tabMarketplace App Version` mav ON ma.name = mav.parent\nJOIN \n `tabApp Source` asrc ON mav.source = asrc.name\n", + "ref_pagetype": "Marketplace App", + "report_name": "Marketplace App Repository Visibility", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + }, + { + "role": "Jcloud Admin" + } + ], + "timeout": 3000 +} diff --git a/jcloud/jcloud/report/marketplace_app_repository_visibility/marketplace_app_repository_visibility.py b/jcloud/jcloud/report/marketplace_app_repository_visibility/marketplace_app_repository_visibility.py new file mode 100644 index 0000000..a647a62 --- /dev/null +++ b/jcloud/jcloud/report/marketplace_app_repository_visibility/marketplace_app_repository_visibility.py @@ -0,0 +1,120 @@ +import json + +import jingrow +import requests + + +def send_developer_email(email, app_name, branch, repository_url, version): + dev = jingrow.get_pg("User", {"email": email}) + developer_name = dev.full_name + email_args = { + "recipients": email, + "subject": "Jingrow: Make your app's GitHub Repository Public", + "template": "marketplace_app_visibility", + "args": { + "developer_name": developer_name, + "app_name": app_name, + "branch": branch, + "version": version, + "repository_url": repository_url, + }, + } + jingrow.enqueue(method=jingrow.sendmail, queue="short", timeout=300, **email_args) + + +@jingrow.whitelist() +def send_emails(columns, data): + jingrow.only_for("System Manager") + data = json.loads(data) + for row in data: + visibility = row.get("visibility") + if visibility != "Private": + continue + app_name = row.get("app_name") + branch = row.get("branch") + repository_url = row.get("repository_url") + email = row.get("team") + version = row.get("version") + send_developer_email(email, app_name, branch, repository_url, version) + + +def check_repository_visibility(repository_url, personal_access_token): + try: + repo_parts = repository_url.split("git.jingrow.com:3000/")[1].rstrip(".git").split("/") + owner = repo_parts[0] + repo_name = repo_parts[1] + except IndexError: + return "Error: Invalid repository URL format." + + api_url = f"http://git.jingrow.com:3000/api/v1/repos/{owner}/{repo_name}" + + headers = {"Authorization": f"token {personal_access_token}"} + + try: + response = requests.get(api_url, headers=headers) + + if response.status_code == 200: + repo_data = response.json() + if repo_data.get("private"): + return "Private" + return "Public" + if response.status_code == 404: + return "Private" + return "Private" + except Exception: + return "Error" + + +def execute(filters=None): + jingrow.only_for("System Manager") + + columns = [ + {"fieldname": "app_name", "label": "Application Name", "fieldtype": "Data", "width": 200}, + {"fieldname": "team", "label": "Team", "fieldtype": "Data", "width": 200}, + {"fieldname": "version", "label": "Version", "fieldtype": "Data", "width": 100}, + {"fieldname": "source", "label": "Source", "fieldtype": "Data", "width": 100}, + {"fieldname": "repository_url", "label": "Repository URL", "fieldtype": "Data", "width": 300}, + {"fieldname": "branch", "label": "Branch", "fieldtype": "Data", "width": 100}, + { + "fieldname": "visibility", + "label": "Visibility", + "fieldtype": "Data", + "width": 100, + }, + ] + + data = jingrow.db.sql( + """ + SELECT + ma.name AS app_name, + t.user AS team, + mav.version AS version, + mav.source AS source, + asrc.repository_url AS repository_url, + asrc.branch AS branch + FROM + `tabMarketplace App` ma + JOIN + `tabMarketplace App Version` mav ON ma.name = mav.parent + JOIN + `tabApp Source` asrc ON mav.source = asrc.name + JOIN + `tabTeam` t ON ma.team = t.name + """, + as_dict=True, + ) + personal_access_token = jingrow.db.get_value("Jcloud Settings", "None", "github_pat_token") + + visibility_cache = {} + for row in data: + repo_url = row["repository_url"] + # Check if the visibility status is already cached for this repository URL + if repo_url in visibility_cache: + row["visibility"] = visibility_cache[repo_url] + else: + # Check visibility status and cache it + visibility_status = check_repository_visibility(repo_url, personal_access_token) + row["visibility"] = visibility_status + # Store the result in the cache for future reference + visibility_cache[repo_url] = visibility_status + return columns, data diff --git a/jcloud/jcloud/report/server_stats/__init__.py b/jcloud/jcloud/report/server_stats/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/server_stats/server_stats.js b/jcloud/jcloud/report/server_stats/server_stats.js new file mode 100644 index 0000000..036ebc5 --- /dev/null +++ b/jcloud/jcloud/report/server_stats/server_stats.js @@ -0,0 +1,5 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt +/* eslint-disable */ + +jingrow.query_reports['Server Stats'] = {}; diff --git a/jcloud/jcloud/report/server_stats/server_stats.json b/jcloud/jcloud/report/server_stats/server_stats.json new file mode 100644 index 0000000..6f506b6 --- /dev/null +++ b/jcloud/jcloud/report/server_stats/server_stats.json @@ -0,0 +1,55 @@ +{ + "add_total_row": 0, + "columns": [], + "creation": "2022-12-15 09:04:10.284944", + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [ + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "mandatory": 0, + "options": "Team", + "wildcard_filter": 1 + }, + { + "fieldname": "server_type", + "fieldtype": "Select", + "label": "Server Type", + "mandatory": 0, + "options": "\nServer\nDatabase Server\nProxy Server\nLog Server\nMonitor Server\nRegistry Server\nTrace Server\nAnalytics Server", + "wildcard_filter": 0 + }, + { + "fieldname": "exclude_self_hosted", + "fieldtype": "Check", + "label": "Exclude Self Hosted", + "mandatory": 0, + "wildcard_filter": 0 + } + ], + "idx": 0, + "is_standard": "Yes", + "modified": "2023-05-17 18:33:39.554734", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Server Stats", + "owner": "Administrator", + "prepared_report": 0, + "ref_pagetype": "Server", + "report_name": "Server Stats", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + }, + { + "role": "Jcloud Member" + }, + { + "role": "Jcloud Admin" + } + ] +} \ No newline at end of file diff --git a/jcloud/jcloud/report/server_stats/server_stats.py b/jcloud/jcloud/report/server_stats/server_stats.py new file mode 100644 index 0000000..7557cd0 --- /dev/null +++ b/jcloud/jcloud/report/server_stats/server_stats.py @@ -0,0 +1,238 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.utils import rounded + +from jcloud.api.server import calculate_swap, prometheus_query, total_resource, usage + + +def execute(filters=None): + jingrow.only_for("System Manager") + columns = [ + { + "fieldname": "server", + "label": jingrow._("Server"), + "fieldtype": "Dynamic Link", + "options": "server_type", + "width": 200, + }, + { + "fieldname": "server_type", + "label": jingrow._("Server Type"), + "fieldtype": "Link", + "options": "PageType", + "width": 200, + }, + { + "fieldname": "cpu", + "label": jingrow._("vCPUs"), + "fieldtype": "Int", + "width": 100, + }, + { + "fieldname": "cpu_used", + "label": jingrow._("CPU Utilization(%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "disk", + "label": jingrow._("Space (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "disk_used", + "label": jingrow._("Space Used(%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "disk_free", + "label": jingrow._("Space Free (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "memory", + "label": jingrow._("Memory (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "memory_used", + "label": jingrow._("Memory Used(%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "swap", + "label": jingrow._("Swap (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "swap_used", + "label": jingrow._("Swap Used(%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "memory_required", + "label": jingrow._("Memory Required (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "memory_shortage", + "label": jingrow._("Memory Shortage (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "ram_assigned", + "label": jingrow._("Ram Assigned for Workers (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "new_worker_allocation", + "label": jingrow._("New Worker Allocation"), + "fieldtype": "Check", + "width": 100, + }, + { + "fieldname": "load_1", + "label": jingrow._("Load Average 1 (%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "load_5", + "label": jingrow._("Load Average 5 (%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "load_15", + "label": jingrow._("Load Average 15 (%)"), + "fieldtype": "Float", + "width": 100, + }, + ] + + data = get_data(filters) + return columns, data + + +def calculate_load(server): + query_map = { + "load_1": ( + f"""avg(node_load1{{instance="{server}", job="node"}}) / count(count(node_cpu_seconds_total{{instance="{server}", job="node"}}) by (cpu)) * 100""", + lambda x: x, + ), + "load_5": ( + f"""avg(node_load5{{instance="{server}", job="node"}}) / count(count(node_cpu_seconds_total{{instance="{server}", job="node"}}) by (cpu)) * 100""", + lambda x: x, + ), + "load_15": ( + f"""avg(node_load15{{instance="{server}", job="node"}}) / count(count(node_cpu_seconds_total{{instance="{server}", job="node"}}) by (cpu)) * 100""", + lambda x: x, + ), + } + + result = {} + for usage_type, query in query_map.items(): + response = prometheus_query(query[0], query[1], "Asia/Kolkata", 120, 120)["datasets"] + if response: + result[usage_type] = response[0]["values"][-1] + return result + + +def get_data(filters): + rows = [] + for server in get_servers(filters): + used_data = usage(server.name) + available_data = total_resource(server.name) + swap_memory = calculate_swap(server.name) + load = calculate_load(server.name) + + row = { + "server": server.name, + "server_type": server.server_type, + "cpu": available_data.get("vcpu", 0), + "cpu_used": rounded(used_data.get("vcpu", 0) * 100, 1), + "disk": rounded(available_data.get("disk", 0), 2), + "disk_used": rounded( + (used_data.get("disk", 0) / available_data.get("disk", 1)) * 100, 1 + ), + "disk_free": available_data.get("disk", 0) - used_data.get("disk", 0), + "memory": rounded(available_data.get("memory", 0) / 1024, 2), + "memory_used": rounded( + (used_data.get("memory", 0) / available_data.get("memory", 1)) * 100, 1 + ), + "swap": rounded(swap_memory.get("swap", 0), 1), + "swap_used": rounded(swap_memory.get("swap_used", 0), 1), + "memory_required": rounded(swap_memory.get("required", 0), 1), + "memory_shortage": max( + rounded(swap_memory.get("required", 0), 1) + - rounded(available_data.get("memory", 0) / 1024, 2), + 0, + ), + "load_1": rounded(load.get("load_1", 0), 1), + "load_5": rounded(load.get("load_5", 0), 1), + "load_15": rounded(load.get("load_15", 0), 1), + } + if server.server_type == "Server": + row.update( + { + "new_worker_allocation": jingrow.db.get_value( + server.server_type, server.name, "new_worker_allocation" + ), + "ram_assigned": (jingrow.db.get_value(server.server_type, server.name, "ram") or 0) + / 1024, + } + ) + rows.append(row) + + return rows + + +def get_servers(filters): + server_filters = {"status": "Active", **filters} + + if filters.server_name: + server_filters["name"] = ("like", f"%{filters.server_name}%") + server_filters.pop("server_name", None) + + if filters.exclude_self_hosted: + server_filters["is_self_hosted"] = False + server_filters.pop("exclude_self_hosted", None) + + server_types = ( + [filters.server_type] + if filters.server_type + else [ + "Server", + "Database Server", + "Proxy Server", + "Log Server", + "Monitor Server", + "Registry Server", + "Trace Server", + "Analytics Server", + ] + ) + servers = [] + for server_type in server_types: + server_type_filters = server_filters.copy() + for field in server_filters: + if field == "name": + continue + if not jingrow.get_meta(server_type).has_field(field): + server_type_filters.pop(field, None) + for server in jingrow.get_all(server_type, server_type_filters): + server.update({"server_type": server_type}) + servers.append(server) + return servers diff --git a/jcloud/jcloud/report/shared_app_server_stats/__init__.py b/jcloud/jcloud/report/shared_app_server_stats/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/jcloud/report/shared_app_server_stats/shared_app_server_stats.js b/jcloud/jcloud/report/shared_app_server_stats/shared_app_server_stats.js new file mode 100644 index 0000000..a3004b0 --- /dev/null +++ b/jcloud/jcloud/report/shared_app_server_stats/shared_app_server_stats.js @@ -0,0 +1,7 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt +/* eslint-disable */ + +jingrow.query_reports['Shared App Server Stats'] = { + filters: [], +}; diff --git a/jcloud/jcloud/report/shared_app_server_stats/shared_app_server_stats.json b/jcloud/jcloud/report/shared_app_server_stats/shared_app_server_stats.json new file mode 100644 index 0000000..d148c78 --- /dev/null +++ b/jcloud/jcloud/report/shared_app_server_stats/shared_app_server_stats.json @@ -0,0 +1,32 @@ +{ + "add_total_row": 0, + "columns": [], + "creation": "2023-01-23 10:44:06.770366", + "disable_prepared_report": 1, + "disabled": 0, + "docstatus": 0, + "pagetype": "Report", + "filters": [], + "idx": 0, + "is_standard": "Yes", + "modified": "2023-01-23 10:44:06.770366", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Shared App Server Stats", + "owner": "Administrator", + "prepared_report": 0, + "ref_pagetype": "Server", + "report_name": "Shared App Server Stats", + "report_type": "Script Report", + "roles": [ + { + "role": "System Manager" + }, + { + "role": "Jcloud Admin" + }, + { + "role": "Jcloud Member" + } + ] +} \ No newline at end of file diff --git a/jcloud/jcloud/report/shared_app_server_stats/shared_app_server_stats.py b/jcloud/jcloud/report/shared_app_server_stats/shared_app_server_stats.py new file mode 100644 index 0000000..e46e772 --- /dev/null +++ b/jcloud/jcloud/report/shared_app_server_stats/shared_app_server_stats.py @@ -0,0 +1,202 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.utils import rounded + +from jcloud.api.server import calculate_swap, prometheus_query, total_resource, usage + + +def execute(filters=None): + jingrow.only_for("System Manager") + columns = [ + { + "fieldname": "server", + "label": jingrow._("Server"), + "fieldtype": "Link", + "options": "Server", + "width": 200, + }, + { + "fieldname": "cpu", + "label": jingrow._("vCPUs"), + "fieldtype": "Int", + "width": 100, + }, + { + "fieldname": "cpu_used", + "label": jingrow._("CPU Utilization(%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "disk", + "label": jingrow._("Space (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "disk_used", + "label": jingrow._("Space Used(%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "disk_free", + "label": jingrow._("Space Free (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "memory", + "label": jingrow._("Memory (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "memory_used", + "label": jingrow._("Memory Used(%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "swap", + "label": jingrow._("Swap (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "swap_used", + "label": jingrow._("Swap Used(%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "memory_required", + "label": jingrow._("Memory Required (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "memory_shortage", + "label": jingrow._("Memory Shortage (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "ram_assigned", + "label": jingrow._("Ram Assigned for Workers (GB)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "new_worker_allocation", + "label": jingrow._("New Worker Allocation"), + "fieldtype": "Check", + "width": 100, + }, + { + "fieldname": "load_1", + "label": jingrow._("Load Average 1 (%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "load_5", + "label": jingrow._("Load Average 5 (%)"), + "fieldtype": "Float", + "width": 100, + }, + { + "fieldname": "load_15", + "label": jingrow._("Load Average 15 (%)"), + "fieldtype": "Float", + "width": 100, + }, + ] + + data = get_data() + return columns, data + + +def calculate_load(server): + query_map = { + "load_1": ( + f"""avg(node_load1{{instance="{server}", job="node"}}) / count(count(node_cpu_seconds_total{{instance="{server}", job="node"}}) by (cpu)) * 100""", + lambda x: x, + ), + "load_5": ( + f"""avg(node_load5{{instance="{server}", job="node"}}) / count(count(node_cpu_seconds_total{{instance="{server}", job="node"}}) by (cpu)) * 100""", + lambda x: x, + ), + "load_15": ( + f"""avg(node_load15{{instance="{server}", job="node"}}) / count(count(node_cpu_seconds_total{{instance="{server}", job="node"}}) by (cpu)) * 100""", + lambda x: x, + ), + } + + result = {} + for usage_type, query in query_map.items(): + response = prometheus_query(query[0], query[1], "Asia/Kolkata", 120, 120)["datasets"] + if response: + result[usage_type] = response[0]["values"][-1] + return result + + +def get_data(): + servers = jingrow.db.sql_list( + """ + select + server.name + from + tabServer server + where + ( + server.team like "%%jingrow.com" + or server.team = "" + ) + and server.provider = "AWS EC2" + and status = "Active" + """ + ) + + rows = [] + for server in servers: + used_data = usage(server) + available_data = total_resource(server) + swap_memory = calculate_swap(server) + load = calculate_load(server) + + rows.append( + { + "server": server, + "cpu": available_data.get("vcpu", 0), + "cpu_used": rounded(used_data.get("vcpu", 0) * 100, 1), + "disk": rounded(available_data.get("disk", 0), 2), + "disk_used": rounded( + (used_data.get("disk", 0) / available_data.get("disk", 1)) * 100, 1 + ), + "disk_free": available_data.get("disk", 0) - used_data.get("disk", 0), + "memory": rounded(available_data.get("memory", 0) / 1024, 2), + "memory_used": rounded( + (used_data.get("memory", 0) / available_data.get("memory", 1)) * 100, 1 + ), + "swap": rounded(swap_memory.get("swap", 0), 1), + "swap_used": rounded(swap_memory.get("swap_used", 0), 1), + "memory_required": rounded(swap_memory.get("required", 0), 1), + "memory_shortage": max( + rounded(swap_memory.get("required", 0), 1) + - rounded(available_data.get("memory", 0) / 1024, 2), + 0, + ), + "new_worker_allocation": jingrow.db.get_value( + "Server", server, "new_worker_allocation" + ), + "ram_assigned": (jingrow.db.get_value("Server", server, "ram") or 0) / 1024, + "load_1": rounded(load.get("load_1", 0), 1), + "load_5": rounded(load.get("load_5", 0), 1), + "load_15": rounded(load.get("load_15", 0), 1), + } + ) + + return rows diff --git a/jcloud/jcloud/workspace/jcloud/jcloud.json b/jcloud/jcloud/workspace/jcloud/jcloud.json new file mode 100644 index 0000000..c86da4f --- /dev/null +++ b/jcloud/jcloud/workspace/jcloud/jcloud.json @@ -0,0 +1,225 @@ +{ + "charts": [], + "content": "[{\"id\":\"zYUVFJZX-t\",\"type\":\"header\",\"data\":{\"text\":\"Jingrow\",\"col\":12}},{\"id\":\"VFzeFzlnB5\",\"type\":\"spacer\",\"data\":{\"col\":12}},{\"id\":\"iNJ8WoS9kD\",\"type\":\"header\",\"data\":{\"text\":\"Sites\",\"col\":12}},{\"id\":\"EYeJrbXv1P\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Active Sites\",\"col\":3}},{\"id\":\"7jAM0HBrde\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Broken Sites\",\"col\":3}},{\"id\":\"ttO5vXfzL_\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Pending Sites\",\"col\":3}},{\"id\":\"nvLoQ_N15n\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Suspended Sites\",\"col\":3}},{\"id\":\"AVO8JCkksy\",\"type\":\"spacer\",\"data\":{\"col\":12}},{\"id\":\"aLqo1uNPYu\",\"type\":\"header\",\"data\":{\"text\":\"Benches\",\"col\":12}},{\"id\":\"BcJPVEkRcy\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Active Benches\",\"col\":3}},{\"id\":\"PhHBf-f1ej\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Broken Benches\",\"col\":3}},{\"id\":\"JJB6cOEiXy\",\"type\":\"spacer\",\"data\":{\"col\":12}},{\"id\":\"7a18ugroq8\",\"type\":\"header\",\"data\":{\"text\":\"Servers\",\"col\":12}},{\"id\":\"M3urSE6cor\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Application Server\",\"col\":3}},{\"id\":\"5qOJXl1CkE\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Database Server\",\"col\":3}},{\"id\":\"PWrCW7DruI\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Proxy Server\",\"col\":3}},{\"id\":\"_GVOmg7C_U\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Hybrid Servers\",\"col\":3}},{\"id\":\"XHWA0NCImO\",\"type\":\"spacer\",\"data\":{\"col\":12}},{\"id\":\"X1dteEUHoR\",\"type\":\"header\",\"data\":{\"text\":\"Settings\",\"col\":12}},{\"id\":\"eeEbYjVj_n\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Jcloud Settings\",\"col\":3}},{\"id\":\"EiWS2tWYwu\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Root Domain\",\"col\":3}},{\"id\":\"EoLO5YYzdR\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Tls Certificate\",\"col\":3}},{\"id\":\"-Jx1Irf-28\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Team\",\"col\":3}},{\"id\":\"n-T0ehr2ca\",\"type\":\"spacer\",\"data\":{\"col\":12}},{\"id\":\"xZEclbenJQ\",\"type\":\"header\",\"data\":{\"text\":\"Masters\",\"col\":12}},{\"id\":\"P6nCUrJreH\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"App\",\"col\":3}},{\"id\":\"1kAyHtrIrJ\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"App Source\",\"col\":3}},{\"id\":\"-N-Xb5MVPV\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"App Release\",\"col\":3}},{\"id\":\"ND9oElFB7R\",\"type\":\"spacer\",\"data\":{\"col\":12}},{\"id\":\"0DYlbnEtOT\",\"type\":\"header\",\"data\":{\"text\":\"Subscription\",\"col\":12}},{\"id\":\"eddRzzqwSZ\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Server Plan\",\"col\":3}},{\"id\":\"GYk3Frsy1L\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Site Plan\",\"col\":3}},{\"id\":\"o_aDSbQlPf\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Subscription\",\"col\":3}},{\"id\":\"VZSHmE1jzA\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Invoice\",\"col\":3}},{\"id\":\"B8T-3IZ8Qg\",\"type\":\"spacer\",\"data\":{\"col\":12}},{\"id\":\"WDyzNHuOas\",\"type\":\"header\",\"data\":{\"text\":\"Operations\",\"col\":12}},{\"id\":\"Ziby8rOfsU\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Agent Job\",\"col\":3}},{\"id\":\"Vm4Tn0dxiD\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Ansible Play\",\"col\":3}},{\"id\":\"n6VE51hPkc\",\"type\":\"shortcut\",\"data\":{\"shortcut_name\":\"Jcloud Job\",\"col\":3}}]", + "creation": "2024-07-30 18:43:18.421196", + "custom_blocks": [], + "docstatus": 0, + "pagetype": "Workspace", + "for_user": "", + "hide_custom": 0, + "icon": "tool", + "idx": 0, + "indicator_color": "", + "is_hidden": 1, + "label": "Jcloud", + "links": [], + "modified": "2024-08-02 16:33:40.351403", + "modified_by": "Administrator", + "module": "Jcloud", + "name": "Jcloud", + "number_cards": [], + "owner": "Administrator", + "parent_page": "", + "public": 1, + "quick_lists": [], + "roles": [], + "sequence_id": 28.0, + "shortcuts": [ + { + "color": "Green", + "pg_view": "List", + "label": "Root Domain", + "link_to": "Root Domain", + "stats_filter": "[]", + "type": "PageType" + }, + { + "color": "Grey", + "pg_view": "List", + "label": "Agent Job", + "link_to": "Agent Job", + "stats_filter": "[]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "Server Plan", + "link_to": "Server Plan", + "stats_filter": "[[\"Server Plan\",\"enabled\",\"=\",1,false]]", + "type": "PageType" + }, + { + "color": "Grey", + "pg_view": "List", + "label": "Ansible Play", + "link_to": "Ansible Play", + "stats_filter": "[]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "Team", + "link_to": "Team", + "stats_filter": "[[\"Team\",\"enabled\",\"=\",1,false]]", + "type": "PageType" + }, + { + "color": "Grey", + "pg_view": "List", + "label": "Jcloud Job", + "link_to": "Jcloud Job", + "stats_filter": "[]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "format": "", + "label": "Active Sites", + "link_to": "Site", + "stats_filter": "[[\"Site\",\"status\",\"=\",\"Active\",false]]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "Site Plan", + "link_to": "Site Plan", + "stats_filter": "[[\"Site Plan\",\"enabled\",\"=\",1,false]]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "Active Benches", + "link_to": "Bench", + "stats_filter": "[[\"Bench\",\"status\",\"=\",\"Active\",false]]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "Subscription", + "link_to": "Subscription", + "stats_filter": "[]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "Tls Certificate", + "link_to": "TLS Certificate", + "stats_filter": "[[\"TLS Certificate\",\"status\",\"=\",\"Active\",false]]", + "type": "PageType" + }, + { + "color": "Grey", + "pg_view": "List", + "label": "Invoice", + "link_to": "Invoice", + "stats_filter": "[]", + "type": "PageType" + }, + { + "color": "Red", + "pg_view": "List", + "format": "", + "label": "Broken Sites", + "link_to": "Site", + "stats_filter": "[[\"Site\",\"status\",\"=\",\"Broken\",false]]", + "type": "PageType" + }, + { + "color": "Red", + "pg_view": "List", + "label": "Broken Benches", + "link_to": "Bench", + "stats_filter": "[[\"Bench\",\"status\",\"=\",\"Broken\",false]]", + "type": "PageType" + }, + { + "color": "Grey", + "pg_view": "List", + "label": "App", + "link_to": "App", + "stats_filter": "[]", + "type": "PageType" + }, + { + "color": "Yellow", + "pg_view": "List", + "format": "", + "label": "Pending Sites", + "link_to": "Site", + "stats_filter": "[[\"Site\",\"status\",\"=\",\"Pending\",false]]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "Application Server", + "link_to": "Server", + "stats_filter": "[[\"Server\",\"status\",\"=\",\"Active\",false]]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "App Source", + "link_to": "App Source", + "stats_filter": "[]", + "type": "PageType" + }, + { + "color": "Grey", + "pg_view": "List", + "format": "", + "label": "Suspended Sites", + "link_to": "Site", + "stats_filter": "[[\"Site\",\"status\",\"=\",\"Suspended\",false]]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "Database Server", + "link_to": "Database Server", + "stats_filter": "[[\"Database Server\",\"status\",\"=\",\"Active\",false]]", + "type": "PageType" + }, + { + "color": "Grey", + "pg_view": "List", + "label": "App Release", + "link_to": "App Release", + "stats_filter": "[]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "Proxy Server", + "link_to": "Proxy Server", + "stats_filter": "[[\"Proxy Server\",\"status\",\"=\",\"Active\",false]]", + "type": "PageType" + }, + { + "color": "Green", + "pg_view": "List", + "label": "Hybrid Servers", + "link_to": "Self Hosted Server", + "stats_filter": "[[\"Self Hosted Server\",\"status\",\"=\",\"Active\",false]]", + "type": "PageType" + }, + { + "color": "Grey", + "pg_view": "List", + "label": "Jcloud Settings", + "link_to": "Jcloud Settings", + "type": "PageType" + } + ], + "title": "Jcloud" +} \ No newline at end of file diff --git a/jcloud/marketplace/README.md b/jcloud/marketplace/README.md new file mode 100644 index 0000000..937103a --- /dev/null +++ b/jcloud/marketplace/README.md @@ -0,0 +1,4 @@ +# Marketplace + +## PageTypes at a Glance + diff --git a/jcloud/marketplace/__init__.py b/jcloud/marketplace/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/__init__.py b/jcloud/marketplace/pagetype/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/app_plan_version/__init__.py b/jcloud/marketplace/pagetype/app_plan_version/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/app_plan_version/app_plan_version.json b/jcloud/marketplace/pagetype/app_plan_version/app_plan_version.json new file mode 100644 index 0000000..cf2ac12 --- /dev/null +++ b/jcloud/marketplace/pagetype/app_plan_version/app_plan_version.json @@ -0,0 +1,33 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-01-27 02:04:09.352870", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "version" + ], + "fields": [ + { + "fieldname": "version", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Version", + "options": "Jingrow Version", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-01-27 02:05:51.900721", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "App Plan Version", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/app_plan_version/app_plan_version.py b/jcloud/marketplace/pagetype/app_plan_version/app_plan_version.py new file mode 100644 index 0000000..f78c356 --- /dev/null +++ b/jcloud/marketplace/pagetype/app_plan_version/app_plan_version.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class AppPlanVersion(Document): + pass diff --git a/jcloud/marketplace/pagetype/app_release_approval_code_comments/__init__.py b/jcloud/marketplace/pagetype/app_release_approval_code_comments/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/app_release_approval_code_comments/app_release_approval_code_comments.json b/jcloud/marketplace/pagetype/app_release_approval_code_comments/app_release_approval_code_comments.json new file mode 100644 index 0000000..ec2fd24 --- /dev/null +++ b/jcloud/marketplace/pagetype/app_release_approval_code_comments/app_release_approval_code_comments.json @@ -0,0 +1,65 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2024-08-20 14:44:43.708918", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "filename", + "line_number", + "comment", + "time", + "commented_by" + ], + "fields": [ + { + "fieldname": "filename", + "fieldtype": "Small Text", + "in_list_view": 1, + "label": "File Name", + "reqd": 1 + }, + { + "fieldname": "line_number", + "fieldtype": "Int", + "in_list_view": 1, + "label": "Line Number", + "reqd": 1 + }, + { + "fieldname": "time", + "fieldtype": "Datetime", + "in_list_view": 1, + "label": "Datetime", + "reqd": 1 + }, + { + "fieldname": "commented_by", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Commented By", + "options": "User", + "reqd": 1 + }, + { + "fieldname": "comment", + "fieldtype": "Small Text", + "in_list_view": 1, + "label": "Comment", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2024-08-22 23:00:39.858129", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "App Release Approval Code Comments", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/app_release_approval_code_comments/app_release_approval_code_comments.py b/jcloud/marketplace/pagetype/app_release_approval_code_comments/app_release_approval_code_comments.py new file mode 100644 index 0000000..a9596fc --- /dev/null +++ b/jcloud/marketplace/pagetype/app_release_approval_code_comments/app_release_approval_code_comments.py @@ -0,0 +1,26 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class AppReleaseApprovalCodeComments(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + comment: DF.SmallText + commented_by: DF.Link + filename: DF.SmallText + line_number: DF.Int + parent: DF.Data + parentfield: DF.Data + parenttype: DF.Data + time: DF.Datetime + # end: auto-generated types + pass diff --git a/jcloud/marketplace/pagetype/app_user_review/__init__.py b/jcloud/marketplace/pagetype/app_user_review/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/app_user_review/app_user_review.js b/jcloud/marketplace/pagetype/app_user_review/app_user_review.js new file mode 100644 index 0000000..14f046b --- /dev/null +++ b/jcloud/marketplace/pagetype/app_user_review/app_user_review.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('App User Review', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/marketplace/pagetype/app_user_review/app_user_review.json b/jcloud/marketplace/pagetype/app_user_review/app_user_review.json new file mode 100644 index 0000000..2df0ecc --- /dev/null +++ b/jcloud/marketplace/pagetype/app_user_review/app_user_review.json @@ -0,0 +1,85 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "format:{app}-review-{####}", + "creation": "2022-01-02 00:04:50.287198", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "title", + "rating", + "app", + "reviewer", + "review" + ], + "fields": [ + { + "fieldname": "rating", + "fieldtype": "Rating", + "in_list_view": 1, + "label": "Rating" + }, + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "App", + "options": "Marketplace App", + "reqd": 1 + }, + { + "fieldname": "reviewer", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Reviewer ", + "options": "User", + "reqd": 1 + }, + { + "fieldname": "review", + "fieldtype": "Long Text", + "label": "Review" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [ + { + "group": "General", + "link_pagetype": "Developer Review Reply", + "link_fieldname": "review" + } + ], + "modified": "2022-01-18 08:28:31.459866", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "App User Review", + "name_case": "UPPER CASE", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "app" +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/app_user_review/app_user_review.py b/jcloud/marketplace/pagetype/app_user_review/app_user_review.py new file mode 100644 index 0000000..75a4b62 --- /dev/null +++ b/jcloud/marketplace/pagetype/app_user_review/app_user_review.py @@ -0,0 +1,30 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + + +class AppUserReview(Document): + def after_insert(self): + self.update_average_rating() + + def update_average_rating(self): + ratings = jingrow.db.get_all( + "App User Review", + filters={"app": self.app}, + fields=["rating"], + pluck="rating", + ) + + if ratings: + average_rating = (sum(ratings) / len(ratings)) * 5 + average_rating = round(average_rating, 2) + + jingrow.db.set_value( + "Marketplace App", self.app, "average_rating", average_rating, update_modified=False + ) + else: + jingrow.db.set_value( + "Marketplace App", self.app, "average_rating", 0, update_modified=False + ) diff --git a/jcloud/marketplace/pagetype/app_user_review/patches/add_rating_values_to_apps.py b/jcloud/marketplace/pagetype/app_user_review/patches/add_rating_values_to_apps.py new file mode 100644 index 0000000..f42aec3 --- /dev/null +++ b/jcloud/marketplace/pagetype/app_user_review/patches/add_rating_values_to_apps.py @@ -0,0 +1,29 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow +from tqdm import tqdm + + +def execute(): + marketplace_app_names = jingrow.get_all("Marketplace App", pluck="name") + + for app in tqdm(marketplace_app_names): + ratings = jingrow.db.get_all( + "App User Review", + filters={"app": app}, + fields=["rating"], + pluck="rating", + ) + + if ratings: + average_rating = (sum(ratings) / len(ratings)) * 5 + average_rating = round(average_rating, 2) + + jingrow.db.set_value( + "Marketplace App", app, "average_rating", average_rating, update_modified=False + ) + else: + jingrow.db.set_value( + "Marketplace App", app, "average_rating", 0, update_modified=False + ) diff --git a/jcloud/marketplace/pagetype/app_user_review/test_app_user_review.py b/jcloud/marketplace/pagetype/app_user_review/test_app_user_review.py new file mode 100644 index 0000000..7c81903 --- /dev/null +++ b/jcloud/marketplace/pagetype/app_user_review/test_app_user_review.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestAppUserReview(unittest.TestCase): + pass diff --git a/jcloud/marketplace/pagetype/auto_release_team/__init__.py b/jcloud/marketplace/pagetype/auto_release_team/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/auto_release_team/auto_release_team.json b/jcloud/marketplace/pagetype/auto_release_team/auto_release_team.json new file mode 100644 index 0000000..6bcc9f6 --- /dev/null +++ b/jcloud/marketplace/pagetype/auto_release_team/auto_release_team.json @@ -0,0 +1,35 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-03-23 19:06:56.123030", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Team", + "options": "Team", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2023-03-23 19:08:49.440063", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Auto Release Team", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/auto_release_team/auto_release_team.py b/jcloud/marketplace/pagetype/auto_release_team/auto_release_team.py new file mode 100644 index 0000000..44e17dc --- /dev/null +++ b/jcloud/marketplace/pagetype/auto_release_team/auto_release_team.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class AutoReleaseTeam(Document): + pass diff --git a/jcloud/marketplace/pagetype/developer_review_reply/__init__.py b/jcloud/marketplace/pagetype/developer_review_reply/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/developer_review_reply/developer_review_reply.js b/jcloud/marketplace/pagetype/developer_review_reply/developer_review_reply.js new file mode 100644 index 0000000..60cdcd0 --- /dev/null +++ b/jcloud/marketplace/pagetype/developer_review_reply/developer_review_reply.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Developer Review Reply', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/marketplace/pagetype/developer_review_reply/developer_review_reply.json b/jcloud/marketplace/pagetype/developer_review_reply/developer_review_reply.json new file mode 100644 index 0000000..bf5276e --- /dev/null +++ b/jcloud/marketplace/pagetype/developer_review_reply/developer_review_reply.json @@ -0,0 +1,60 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-01-02 00:12:06.191929", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "review", + "description", + "developer" + ], + "fields": [ + { + "fieldname": "review", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Review", + "options": "App User Review", + "reqd": 1 + }, + { + "fieldname": "description", + "fieldtype": "Long Text", + "label": "Description" + }, + { + "fieldname": "developer", + "fieldtype": "Link", + "label": "Developer", + "options": "User", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-06-29 11:27:59.229923", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Developer Review Reply", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "review" +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/developer_review_reply/developer_review_reply.py b/jcloud/marketplace/pagetype/developer_review_reply/developer_review_reply.py new file mode 100644 index 0000000..9133041 --- /dev/null +++ b/jcloud/marketplace/pagetype/developer_review_reply/developer_review_reply.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class DeveloperReviewReply(Document): + pass diff --git a/jcloud/marketplace/pagetype/developer_review_reply/test_developer_review_reply.py b/jcloud/marketplace/pagetype/developer_review_reply/test_developer_review_reply.py new file mode 100644 index 0000000..476635f --- /dev/null +++ b/jcloud/marketplace/pagetype/developer_review_reply/test_developer_review_reply.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestDeveloperReviewReply(unittest.TestCase): + pass diff --git a/jcloud/marketplace/pagetype/featured_app/__init__.py b/jcloud/marketplace/pagetype/featured_app/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/featured_app/featured_app.json b/jcloud/marketplace/pagetype/featured_app/featured_app.json new file mode 100644 index 0000000..8fdddde --- /dev/null +++ b/jcloud/marketplace/pagetype/featured_app/featured_app.json @@ -0,0 +1,33 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-03-24 22:50:57.189870", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "app" + ], + "fields": [ + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "App", + "options": "Marketplace App", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-03-24 22:50:57.189870", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Featured App", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/featured_app/featured_app.py b/jcloud/marketplace/pagetype/featured_app/featured_app.py new file mode 100644 index 0000000..d06cace --- /dev/null +++ b/jcloud/marketplace/pagetype/featured_app/featured_app.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class FeaturedApp(Document): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_add_on/__init__.py b/jcloud/marketplace/pagetype/marketplace_add_on/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/marketplace_add_on/marketplace_add_on.json b/jcloud/marketplace/pagetype/marketplace_add_on/marketplace_add_on.json new file mode 100644 index 0000000..9da28a9 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_add_on/marketplace_add_on.json @@ -0,0 +1,49 @@ +{ + "actions": [], + "autoname": "autoincrement", + "creation": "2022-09-07 18:18:28.399736", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "type", + "price_usd", + "price_cny" + ], + "fields": [ + { + "fieldname": "type", + "fieldtype": "Data", + "in_list_view": 1, + "in_preview": 1, + "label": "Type" + }, + { + "fieldname": "price_usd", + "fieldtype": "Currency", + "in_list_view": 1, + "in_preview": 1, + "label": "Price (USD)" + }, + { + "fieldname": "price_cny", + "fieldtype": "Currency", + "in_list_view": 1, + "in_preview": 1, + "label": "Price (CNY)" + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2022-09-08 10:48:18.037693", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Marketplace Add On", + "naming_rule": "Autoincrement", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/marketplace_add_on/marketplace_add_on.py b/jcloud/marketplace/pagetype/marketplace_add_on/marketplace_add_on.py new file mode 100644 index 0000000..147976d --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_add_on/marketplace_add_on.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class MarketplaceAddOn(Document): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_app_payment/__init__.py b/jcloud/marketplace/pagetype/marketplace_app_payment/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/marketplace_app_payment/marketplace_app_payment.js b/jcloud/marketplace/pagetype/marketplace_app_payment/marketplace_app_payment.js new file mode 100644 index 0000000..220d74a --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_payment/marketplace_app_payment.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Marketplace App Payment", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/marketplace/pagetype/marketplace_app_payment/marketplace_app_payment.json b/jcloud/marketplace/pagetype/marketplace_app_payment/marketplace_app_payment.json new file mode 100644 index 0000000..df4c362 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_payment/marketplace_app_payment.json @@ -0,0 +1,68 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "field:app", + "creation": "2023-04-19 12:03:55.402147", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "app", + "total_usd", + "total_cny" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "fieldname": "app", + "fieldtype": "Link", + "label": "App", + "options": "Marketplace App", + "unique": 1 + }, + { + "default": "0", + "fieldname": "total_usd", + "fieldtype": "Currency", + "label": "Total (USD)" + }, + { + "default": "0", + "fieldname": "total_cny", + "fieldtype": "Currency", + "label": "Total (CNY)" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2023-04-19 13:32:58.619981", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Marketplace App Payment", + "naming_rule": "By fieldname", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/marketplace_app_payment/marketplace_app_payment.py b/jcloud/marketplace/pagetype/marketplace_app_payment/marketplace_app_payment.py new file mode 100644 index 0000000..3599b51 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_payment/marketplace_app_payment.py @@ -0,0 +1,22 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.model.document import Document + + +class MarketplaceAppPayment(Document): + def has_threshold_passed(self): + exchange_rate = jingrow.db.get_single_value("Jcloud Settings", "usd_rate") + total = self.total_usd + (self.total_cny / exchange_rate) if exchange_rate > 0 else 80 + return total >= jingrow.db.get_single_value("Jcloud Settings", "threshold") + + def get_commission(self, total): + # TODO: Handle partial commission + # if first month collection: $20, second month: $1000 and $500 - cap/threshold + # then commission should be calculated for $520 from second month collection onwards + return ( + total * jingrow.db.get_single_value("Jcloud Settings", "commission") + if self.has_threshold_passed() + else total + ) diff --git a/jcloud/marketplace/pagetype/marketplace_app_payment/test_marketplace_app_payment.py b/jcloud/marketplace/pagetype/marketplace_app_payment/test_marketplace_app_payment.py new file mode 100644 index 0000000..cb900bb --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_payment/test_marketplace_app_payment.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestMarketplaceAppPayment(JingrowTestCase): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_app_plan/__init__.py b/jcloud/marketplace/pagetype/marketplace_app_plan/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/marketplace_app_plan/marketplace_app_plan.js b/jcloud/marketplace/pagetype/marketplace_app_plan/marketplace_app_plan.js new file mode 100644 index 0000000..270e3f6 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_plan/marketplace_app_plan.js @@ -0,0 +1,16 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Marketplace App Plan', { + // refresh: function(frm) { + // } +}); +jingrow.ui.form.on('Marketplace App Plan', { + refresh: function (frm) { + frm.set_query('standard_hosting_plan', () => { + return { + filters: { document_type: 'Site', is_trial_plan: 0 }, + }; + }); + }, +}); diff --git a/jcloud/marketplace/pagetype/marketplace_app_plan/marketplace_app_plan.json b/jcloud/marketplace/pagetype/marketplace_app_plan/marketplace_app_plan.json new file mode 100644 index 0000000..9f8a497 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_plan/marketplace_app_plan.json @@ -0,0 +1,129 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "format:MARKETPLACE-PLAN-{app}-{###}", + "creation": "2021-12-27 14:19:48.420915", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "enabled", + "title", + "app", + "interval", + "section_break_2", + "price_cny", + "column_break_3", + "price_usd", + "plan_features_section", + "available_on_versions", + "features", + "allowed_roles_section", + "roles" + ], + "fields": [ + { + "default": "1", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title" + }, + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "in_preview": 1, + "in_standard_filter": 1, + "label": "App", + "options": "Marketplace App", + "reqd": 1 + }, + { + "default": "Daily", + "fieldname": "interval", + "fieldtype": "Select", + "label": "Interval", + "options": "Daily\nMonthly\nYearly" + }, + { + "fieldname": "section_break_2", + "fieldtype": "Section Break", + "label": "Pricing" + }, + { + "fieldname": "price_cny", + "fieldtype": "Currency", + "label": "Price (CNY)", + "reqd": 1 + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "price_usd", + "fieldtype": "Currency", + "label": "Price (USD)", + "reqd": 1 + }, + { + "fieldname": "plan_features_section", + "fieldtype": "Section Break", + "label": "Plan Features" + }, + { + "fieldname": "available_on_versions", + "fieldtype": "Table", + "label": "Available On Versions", + "options": "App Plan Version" + }, + { + "fieldname": "features", + "fieldtype": "Table", + "label": "Features", + "options": "Plan Feature", + "reqd": 1 + }, + { + "fieldname": "allowed_roles_section", + "fieldtype": "Section Break", + "label": "Allowed Roles" + }, + { + "fieldname": "roles", + "fieldtype": "Table", + "label": "Roles", + "options": "Has Role" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-03-21 13:11:26.446979", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Marketplace App Plan", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/marketplace_app_plan/marketplace_app_plan.py b/jcloud/marketplace/pagetype/marketplace_app_plan/marketplace_app_plan.py new file mode 100644 index 0000000..f4f85ad --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_plan/marketplace_app_plan.py @@ -0,0 +1,85 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +from typing import List + +import jingrow +from jingrow import cint + +from jcloud.jcloud.pagetype.site_plan.plan import Plan + + +class MarketplaceAppPlan(Plan): + dashboard_fields = ["app", "name", "title", "price_cny", "price_usd", "enabled"] + + @staticmethod + def get_list_query(query): + plans = query.run(as_dict=True) + for plan in plans: + plan["features"] = get_app_plan_features(plan.name) + + return plans + + def after_insert(self): + self.update_marketplace_app_subscription_type() + + def on_update(self): + self.update_marketplace_app_subscription_type() + + def update_marketplace_app_subscription_type(self): + if cint(self.price_cny) > 0 or cint(self.price_usd) > 0: + jingrow.db.set_value( + "Marketplace App", + self.app, + "subscription_type", + "Paid", + ) + + @staticmethod + def create_marketplace_app_subscription( + site_name, app_name, plan_name, team_name, while_site_creation=False + ): + marketplace_app = jingrow.db.get_value("Marketplace App", {"app": app_name}) + subscription = jingrow.db.exists( + "Subscription", + { + "site": site_name, + "document_type": "Marketplace App", + "document_name": marketplace_app, + }, + ) + + # If already exists, update the plan and activate + if subscription: + subscription = jingrow.get_pg( + "Subscription", + subscription, + for_update=True, + ) + + subscription.plan = plan_name + subscription.enabled = 1 + subscription.save(ignore_permissions=True) + subscription.reload() + + return subscription + + return jingrow.get_pg( + { + "pagetype": "Subscription", + "document_type": "Marketplace App", + "document_name": app_name, + "plan_type": "Marketplace App Plan", + "plan": plan_name, + "site": site_name, + "team": team_name, + } + ).insert(ignore_permissions=True) + + +def get_app_plan_features(app_plan: str) -> List[str]: + features = jingrow.get_all( + "Plan Feature", filters={"parent": app_plan}, pluck="description", order_by="idx" + ) + + return features diff --git a/jcloud/marketplace/pagetype/marketplace_app_plan/test_marketplace_app_plan.py b/jcloud/marketplace/pagetype/marketplace_app_plan/test_marketplace_app_plan.py new file mode 100644 index 0000000..971f588 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_plan/test_marketplace_app_plan.py @@ -0,0 +1,35 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +import unittest + +import jingrow + +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.marketplace_app.test_marketplace_app import ( + create_test_marketplace_app, +) + + +def create_test_marketplace_app_plan(app: str = "jingrow"): + """Create a test marketplace_app_plan""" + if not jingrow.db.exists("Marketplace App", app): + create_test_app(name=app) + create_test_marketplace_app(app) + + return jingrow.get_pg( + { + "pagetype": "Marketplace App Plan", + "title": "Test Plan", + "price_cny": 1000, + "price_usd": 12, + "app": app, + "versions": [{"version": "Version 14"}], + "features": [{"description": "Feature 1"}], + "enabled": 1, + } + ).insert(ignore_permissions=True) + + +class TestMarketplaceAppPlan(unittest.TestCase): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_app_plans/__init__.py b/jcloud/marketplace/pagetype/marketplace_app_plans/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/marketplace_app_plans/marketplace_app_plans.json b/jcloud/marketplace/pagetype/marketplace_app_plans/marketplace_app_plans.json new file mode 100644 index 0000000..fb45cd4 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_plans/marketplace_app_plans.json @@ -0,0 +1,34 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2021-11-16 20:06:51.033404", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "plan" + ], + "fields": [ + { + "fieldname": "plan", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Plan", + "options": "Marketplace App Plan", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "istable": 1, + "links": [], + "modified": "2021-12-27 14:29:06.633981", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Marketplace App Plans", + "owner": "Administrator", + "permissions": [], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/marketplace_app_plans/marketplace_app_plans.py b/jcloud/marketplace/pagetype/marketplace_app_plans/marketplace_app_plans.py new file mode 100644 index 0000000..2cf761c --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_plans/marketplace_app_plans.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class MarketplaceAppPlans(Document): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_app_subscription/__init__.py b/jcloud/marketplace/pagetype/marketplace_app_subscription/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/marketplace_app_subscription/marketplace_app_subscription.js b/jcloud/marketplace/pagetype/marketplace_app_subscription/marketplace_app_subscription.js new file mode 100644 index 0000000..731b713 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_subscription/marketplace_app_subscription.js @@ -0,0 +1,22 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Marketplace App Subscription', { + refresh: function (frm) { + if (frm.pg.status != 'Active') { + frm.add_custom_button( + __('Activate'), + () => { + frm.call('activate').then(() => { + jingrow.msgprint({ + title: 'Subscription Activated successfully.', + indicator: 'green', + }); + frm.refresh(); + }); + }, + 'Actions', + ); + } + }, +}); diff --git a/jcloud/marketplace/pagetype/marketplace_app_subscription/marketplace_app_subscription.json b/jcloud/marketplace/pagetype/marketplace_app_subscription/marketplace_app_subscription.json new file mode 100644 index 0000000..04b474c --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_subscription/marketplace_app_subscription.json @@ -0,0 +1,134 @@ +{ + "actions": [], + "autoname": "format:app-subscription-{app}-{#####}", + "creation": "2022-03-06 20:52:51.924858", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "site", + "team", + "marketplace_app_plan", + "status", + "app", + "start_date", + "end_date", + "secret_key", + "interval", + "plan", + "while_site_creation", + "subscription" + ], + "fields": [ + { + "fieldname": "site", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Site", + "options": "Site", + "read_only": 1 + }, + { + "default": "Active", + "fieldname": "status", + "fieldtype": "Select", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "Status", + "options": "Active\nInactive\nDisabled" + }, + { + "fieldname": "app", + "fieldtype": "Link", + "in_list_view": 1, + "in_standard_filter": 1, + "label": "App", + "options": "Marketplace App", + "reqd": 1, + "set_only_once": 1 + }, + { + "default": "Today", + "fieldname": "start_date", + "fieldtype": "Date", + "label": "Start date" + }, + { + "fieldname": "end_date", + "fieldtype": "Date", + "label": "End date" + }, + { + "fieldname": "secret_key", + "fieldtype": "Data", + "label": "Secret Key", + "read_only": 1 + }, + { + "fieldname": "interval", + "fieldtype": "Select", + "label": "Interval", + "options": "Daily\nMonthly\nAnnually" + }, + { + "fieldname": "marketplace_app_plan", + "fieldtype": "Link", + "label": "Marketplace App Plan", + "options": "Marketplace App Plan", + "reqd": 1 + }, + { + "fieldname": "plan", + "fieldtype": "Link", + "label": "Plan", + "options": "Site Plan", + "read_only": 1 + }, + { + "default": "0", + "fieldname": "while_site_creation", + "fieldtype": "Check", + "label": "While Site Creation", + "read_only": 1 + }, + { + "fieldname": "team", + "fieldtype": "Link", + "label": "Team", + "options": "Team" + }, + { + "fieldname": "subscription", + "fieldtype": "Data", + "label": "Subscription" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-02-05 23:05:21.509377", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Marketplace App Subscription", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "app", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/marketplace_app_subscription/marketplace_app_subscription.py b/jcloud/marketplace/pagetype/marketplace_app_subscription/marketplace_app_subscription.py new file mode 100644 index 0000000..95fe495 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_subscription/marketplace_app_subscription.py @@ -0,0 +1,164 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow +import requests +from jingrow.model.document import Document + +from jcloud.jcloud.pagetype.site.site import Site + + +class MarketplaceAppSubscription(Document): + def validate(self): + self.set_secret_key() + self.validate_marketplace_app_plan() + self.set_plan() + + def set_secret_key(self): + if not self.secret_key: + self.secret_key = jingrow.generate_hash(length=40) + self.create_site_config_key() + + def create_site_config_key(self): + if not jingrow.db.exists("Site Config Key", {"key": f"sk_{self.app}"}): + jingrow.get_pg( + pagetype="Site Config Key", internal=True, key=f"sk_{self.app}" + ).insert(ignore_permissions=True) + + def validate_marketplace_app_plan(self): + app = jingrow.db.get_value("Marketplace App Plan", self.marketplace_app_plan, "app") + + if app != self.app: + jingrow.throw( + f"Plan {self.marketplace_app_plan} is not for app {jingrow.bold(self.app)}!" + ) + + def set_plan(self): + if not self.plan or self.has_value_changed("marketplace_app_plan"): + self.plan = jingrow.db.get_value( + "Marketplace App Plan", self.marketplace_app_plan, "plan" + ) + + def validate_duplicate_subscription(self): + if not self.site: + return + + already_exists = jingrow.db.exists( + "Marketplace App Subscription", {"app": self.app, "site": self.site} + ) + + if already_exists: + jingrow.throw( + f"Subscription for app '{jingrow.bold(self.app)}' already exists for" + f" site '{jingrow.bold(self.site)}'!" + ) + + def before_insert(self): + self.validate_duplicate_subscription() + + def on_update(self): + if self.has_value_changed("marketplace_app_plan"): + self.plan = jingrow.db.get_value( + "Marketplace App Plan", self.marketplace_app_plan, "plan" + ) + jingrow.db.set_value("Subscription", self.subscription, "plan", self.plan) + + if self.has_value_changed("team"): + jingrow.db.set_value("Subscription", self.subscription, "team", self.team) + + if self.has_value_changed("status"): + jingrow.db.set_value( + "Subscription", self.subscription, "enabled", 1 if self.status == "Active" else 0 + ) + + def after_insert(self): + # TODO: Check if this key already exists + if not self.while_site_creation: + self.set_keys_in_site_config() + + subscription = jingrow.get_pg( + { + "pagetype": "Subscription", + "team": self.team, + "document_type": "Marketplace App", + "document_name": self.app, + "marketplace_app_subscription": self.name, + "plan": jingrow.get_value("Marketplace App Plan", self.marketplace_app_plan, "plan"), + } + ).insert(ignore_permissions=True) + self.subscription = subscription.name + self.save() + + self.update_subscription_hook() + + def set_keys_in_site_config(self): + site_pg: Site = jingrow.get_pg("Site", self.site) + + key_id = f"sk_{self.app}" + secret_key = self.secret_key + + old_config = [ + {"key": x.key, "value": x.value, "type": x.type} + for x in list(filter(lambda x: not x.internal, site_pg.configuration)) + ] + config = [ + {"key": key_id, "value": secret_key, "type": "String"}, + { + "key": "subscription", + "value": {"secret_key": secret_key}, + "type": "JSON", + }, + ] + if "prepaid" == jingrow.db.get_value( + "Saas Settings", self.app, "billing_type" + ) and jingrow.db.get_value("Site", self.site, "trial_end_date"): + config.append( + { + "key": "app_include_js", + "value": [jingrow.db.get_single_value("Jcloud Settings", "app_include_script")], + "type": "JSON", + } + ) + + config = config + old_config + + expiry = jingrow.db.get_value("Site", self.site, "trial_end_date") + if expiry: + config[1]["value"].update({"expiry": str(expiry)}) + + site_pg.update_site_config(config) + + @jingrow.whitelist() + def activate(self): + if self.status == "Active": + jingrow.throw("Subscription is already active.") + + self.status = "Active" + self.save() + + def disable(self): + if self.status == "Disabled": + return + self.status = "Disabled" + self.save(ignore_permissions=True) + jingrow.db.set_value("Subscription", self.subscription, "enabled", 0) + + def update_subscription_hook(self): + # sends app name and plan whenever a subscription is created for other apps + # this can be used for activating and deactivating workspaces + if self.app in ["jerp", "hrms"]: + paths = jingrow.get_list( + "Marketplace App", + {"subscription_update_hook": ("is", "set")}, + pluck="subscription_update_hook", + ) + try: + for path in paths: + requests.post( + f"https://{self.site}/api/method/{path}", + data={"app": self.app, "plan": self.plan}, + ) + except Exception: + pass + else: + return diff --git a/jcloud/marketplace/pagetype/marketplace_app_subscription/test_marketplace_app_subscription.py b/jcloud/marketplace/pagetype/marketplace_app_subscription/test_marketplace_app_subscription.py new file mode 100644 index 0000000..a0f8fa6 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_app_subscription/test_marketplace_app_subscription.py @@ -0,0 +1,45 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +import unittest + +import jingrow + +from jcloud.marketplace.pagetype.marketplace_app_plan.test_marketplace_app_plan import ( + create_test_marketplace_app_plan, +) +from jcloud.jcloud.pagetype.app.test_app import create_test_app +from jcloud.jcloud.pagetype.marketplace_app.test_marketplace_app import ( + create_test_marketplace_app, +) +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.team.test_team import create_test_team + + +def create_test_marketplace_app_subscription( + site: str = None, app: str = None, plan: str = None, team: str = None +): + app = ( + app if app and jingrow.db.exists("Marketplace App", app) else create_test_app().name + ) + create_test_marketplace_app(app) + plan = plan if plan else create_test_marketplace_app_plan().name + team = team if team else create_test_team().name + site = site if site else create_test_site(team=team).name + print(jingrow.db.exists("Marketplace App Plan", plan)) + subscription = jingrow.get_pg( + { + "pagetype": "Subscription", + "document_type": "Marketplace App", + "document_name": app, + "plan_type": "Marketplace App Plan", + "plan": plan, + "site": site, + "team": team, + } + ).insert(ignore_if_duplicate=True) + return subscription + + +class TestMarketplaceAppSubscription(unittest.TestCase): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_promotional_banner/__init__.py b/jcloud/marketplace/pagetype/marketplace_promotional_banner/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/marketplace_promotional_banner/marketplace_promotional_banner.js b/jcloud/marketplace/pagetype/marketplace_promotional_banner/marketplace_promotional_banner.js new file mode 100644 index 0000000..39909a7 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_promotional_banner/marketplace_promotional_banner.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Marketplace Promotional Banner', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/marketplace/pagetype/marketplace_promotional_banner/marketplace_promotional_banner.json b/jcloud/marketplace/pagetype/marketplace_promotional_banner/marketplace_promotional_banner.json new file mode 100644 index 0000000..49f99ce --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_promotional_banner/marketplace_promotional_banner.json @@ -0,0 +1,76 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "format:{marketplace_app}-marketplace-promotion-{##}", + "creation": "2022-07-05 13:26:19.810839", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "marketplace_app", + "is_active", + "column_break_3", + "alert_title", + "alert_message" + ], + "fields": [ + { + "fieldname": "alert_message", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Alert Message", + "reqd": 1 + }, + { + "fieldname": "marketplace_app", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Marketplace App", + "options": "Marketplace App", + "reqd": 1 + }, + { + "default": "0", + "fieldname": "is_active", + "fieldtype": "Check", + "label": "Is Active" + }, + { + "fieldname": "column_break_3", + "fieldtype": "Column Break" + }, + { + "fieldname": "alert_title", + "fieldtype": "Data", + "label": "Alert Title", + "reqd": 1 + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-07-05 13:45:04.722312", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Marketplace Promotional Banner", + "naming_rule": "Expression", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "quick_entry": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/marketplace_promotional_banner/marketplace_promotional_banner.py b/jcloud/marketplace/pagetype/marketplace_promotional_banner/marketplace_promotional_banner.py new file mode 100644 index 0000000..b3d26fe --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_promotional_banner/marketplace_promotional_banner.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class MarketplacePromotionalBanner(Document): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_promotional_banner/test_marketplace_promotional_banner.py b/jcloud/marketplace/pagetype/marketplace_promotional_banner/test_marketplace_promotional_banner.py new file mode 100644 index 0000000..1114207 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_promotional_banner/test_marketplace_promotional_banner.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestMarketplacePromotionalBanner(JingrowTestCase): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_publisher_profile/__init__.py b/jcloud/marketplace/pagetype/marketplace_publisher_profile/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/marketplace_publisher_profile/marketplace_publisher_profile.js b/jcloud/marketplace/pagetype/marketplace_publisher_profile/marketplace_publisher_profile.js new file mode 100644 index 0000000..dff7135 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_publisher_profile/marketplace_publisher_profile.js @@ -0,0 +1,7 @@ +// Copyright (c) 2021, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Marketplace Publisher Profile', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/marketplace/pagetype/marketplace_publisher_profile/marketplace_publisher_profile.json b/jcloud/marketplace/pagetype/marketplace_publisher_profile/marketplace_publisher_profile.json new file mode 100644 index 0000000..0697c50 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_publisher_profile/marketplace_publisher_profile.json @@ -0,0 +1,141 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2021-10-14 19:06:47.319840", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "team", + "display_name", + "email_for_notifications", + "column_break_4", + "website", + "contact_email", + "payouts_tab", + "preferred_payout_method", + "gstin", + "column_break_10", + "bank_account_holder_name", + "bank_account_number", + "other_bank_details", + "paypal_id" + ], + "fields": [ + { + "fieldname": "team", + "fieldtype": "Link", + "in_list_view": 1, + "label": "Team", + "options": "Team", + "reqd": 1, + "unique": 1 + }, + { + "fieldname": "email_for_notifications", + "fieldtype": "Data", + "label": "Email For Notifications", + "options": "Email" + }, + { + "fieldname": "contact_email", + "fieldtype": "Data", + "label": "Contact Email", + "options": "Email" + }, + { + "fieldname": "website", + "fieldtype": "Data", + "label": "Website", + "options": "URL" + }, + { + "fetch_if_empty": 1, + "fieldname": "display_name", + "fieldtype": "Data", + "in_list_view": 1, + "label": "Display Name" + }, + { + "fieldname": "column_break_4", + "fieldtype": "Column Break" + }, + { + "fieldname": "payouts_tab", + "fieldtype": "Tab Break", + "label": "Payouts" + }, + { + "default": "Jingrow Credits", + "fieldname": "preferred_payout_method", + "fieldtype": "Select", + "label": "Preferred Payout Method", + "options": "Jingrow Credits\nBank Transfer\nPayPal" + }, + { + "depends_on": "eval:pg.preferred_payout_method==\"PayPal\"", + "fieldname": "paypal_id", + "fieldtype": "Data", + "label": "PayPal ID", + "mandatory_depends_on": "eval:pg.preferred_payout_method==\"PayPal\"" + }, + { + "depends_on": "eval:pg.preferred_payout_method!=\"Jingrow Credits\"", + "fieldname": "gstin", + "fieldtype": "Data", + "label": "GSTIN (If Applicable)" + }, + { + "fieldname": "column_break_10", + "fieldtype": "Column Break" + }, + { + "depends_on": "eval:pg.preferred_payout_method==\"Bank Transfer\"", + "fieldname": "bank_account_holder_name", + "fieldtype": "Data", + "label": "Bank Account Holder Name", + "mandatory_depends_on": "eval:pg.preferred_payout_method==\"Bank Transfer\"" + }, + { + "depends_on": "eval:pg.preferred_payout_method==\"Bank Transfer\"", + "fieldname": "bank_account_number", + "fieldtype": "Data", + "label": "Bank Account Number", + "mandatory_depends_on": "eval:pg.preferred_payout_method==\"Bank Transfer\"" + }, + { + "depends_on": "eval:pg.preferred_payout_method==\"Bank Transfer\"", + "description": "IFSC Code, Bank Name, Branch etc.", + "fieldname": "other_bank_details", + "fieldtype": "Small Text", + "label": "Other Bank Details", + "mandatory_depends_on": "eval:pg.preferred_payout_method==\"Bank Transfer\"" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2022-10-21 16:20:49.110120", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Marketplace Publisher Profile", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "team", + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/marketplace_publisher_profile/marketplace_publisher_profile.py b/jcloud/marketplace/pagetype/marketplace_publisher_profile/marketplace_publisher_profile.py new file mode 100644 index 0000000..390e2bd --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_publisher_profile/marketplace_publisher_profile.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class MarketplacePublisherProfile(Document): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_publisher_profile/test_marketplace_publisher_profile.py b/jcloud/marketplace/pagetype/marketplace_publisher_profile/test_marketplace_publisher_profile.py new file mode 100644 index 0000000..24c0eea --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_publisher_profile/test_marketplace_publisher_profile.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# See license.txt + +# import jingrow +import unittest + + +class TestMarketplacePublisherProfile(unittest.TestCase): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_settings/__init__.py b/jcloud/marketplace/pagetype/marketplace_settings/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/marketplace/pagetype/marketplace_settings/marketplace_settings.js b/jcloud/marketplace/pagetype/marketplace_settings/marketplace_settings.js new file mode 100644 index 0000000..5a47e2c --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_settings/marketplace_settings.js @@ -0,0 +1,7 @@ +// Copyright (c) 2022, JINGROW +// For license information, please see license.txt + +jingrow.ui.form.on('Marketplace Settings', { + // refresh: function(frm) { + // } +}); diff --git a/jcloud/marketplace/pagetype/marketplace_settings/marketplace_settings.json b/jcloud/marketplace/pagetype/marketplace_settings/marketplace_settings.json new file mode 100644 index 0000000..a13ae6b --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_settings/marketplace_settings.json @@ -0,0 +1,68 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2022-03-24 22:49:45.741578", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "featured_apps_section", + "featured_apps", + "release_auto_approval_section", + "auto_release_apps", + "auto_release_teams" + ], + "fields": [ + { + "fieldname": "featured_apps_section", + "fieldtype": "Section Break", + "label": "Featured Apps" + }, + { + "fieldname": "featured_apps", + "fieldtype": "Table", + "label": "Featured Apps", + "options": "Featured App" + }, + { + "fieldname": "release_auto_approval_section", + "fieldtype": "Section Break", + "label": "Auto Release Approval" + }, + { + "fieldname": "auto_release_apps", + "fieldtype": "Table", + "label": "Auto Release Apps", + "options": "Featured App" + }, + { + "fieldname": "auto_release_teams", + "fieldtype": "Table", + "label": "Auto Release Teams", + "options": "Auto Release Team" + } + ], + "index_web_pages_for_search": 1, + "issingle": 1, + "links": [], + "modified": "2023-03-23 19:08:26.722209", + "modified_by": "Administrator", + "module": "Marketplace", + "name": "Marketplace Settings", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "print": 1, + "read": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [] +} \ No newline at end of file diff --git a/jcloud/marketplace/pagetype/marketplace_settings/marketplace_settings.py b/jcloud/marketplace/pagetype/marketplace_settings/marketplace_settings.py new file mode 100644 index 0000000..2bfdc33 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_settings/marketplace_settings.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +# import jingrow +from jingrow.model.document import Document + + +class MarketplaceSettings(Document): + pass diff --git a/jcloud/marketplace/pagetype/marketplace_settings/test_marketplace_settings.py b/jcloud/marketplace/pagetype/marketplace_settings/test_marketplace_settings.py new file mode 100644 index 0000000..0f6ba96 --- /dev/null +++ b/jcloud/marketplace/pagetype/marketplace_settings/test_marketplace_settings.py @@ -0,0 +1,9 @@ +# Copyright (c) 2022, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestMarketplaceSettings(JingrowTestCase): + pass diff --git a/jcloud/metrics.py b/jcloud/metrics.py new file mode 100644 index 0000000..59ed940 --- /dev/null +++ b/jcloud/metrics.py @@ -0,0 +1,81 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow +from jingrow.utils import cint +from prometheus_client import ( + CollectorRegistry, + Gauge, + generate_latest, +) +from werkzeug.wrappers import Response + + +class MetricsRenderer: + def __init__(self, path, status_code=None): + self.path = path + self.registry = CollectorRegistry(auto_describe=True) + + def get_status(self, metric, pagetype, status_field="status", filters=None): + if filters is None: + filters = {} + c = Gauge(metric, "", [status_field], registry=self.registry) + rows = jingrow.get_all( + pagetype, + fields=[status_field, "count(*) as count"], + filters=filters, + group_by=status_field, + order_by=f"{status_field} asc", + ignore_ifnull=True, + ) + for row in rows: + c.labels(row[status_field]).set(row.count) + + def metrics(self): + suspended_builds = Gauge( + "jcloud_builds_suspended", "Are docker builds suspended", registry=self.registry + ) + suspended_builds.set( + cint(jingrow.db.get_value("Jcloud Settings", None, "suspend_builds")) + ) + self.get_status( + "jcloud_deploy_candidate_total", + "Deploy Candidate", + filters={"status": ("!=", "Success")}, + ) + + self.get_status("jcloud_site_total", "Site", filters={"status": ("!=", "Archived")}) + self.get_status("jcloud_bench_total", "Bench", filters={"status": ("!=", "Archived")}) + self.get_status("jcloud_server_total", "Server") + + self.get_status("jcloud_database_server_total", "Database Server") + self.get_status("jcloud_virtual_machine_total", "Virtual Machine") + + self.get_status( + "jcloud_site_backup_total", "Site Backup", filters={"status": ("!=", "Success")} + ) + self.get_status( + "jcloud_site_update_total", "Site Update", filters={"status": ("!=", "Success")} + ) + self.get_status("jcloud_site_migration_total", "Site Migration") + self.get_status("jcloud_site_upgrade_total", "Version Upgrade") + + self.get_status("jcloud_jcloud_job_total", "Jcloud Job") + self.get_status( + "jcloud_ansible_play_total", "Ansible Play", filters={"status": ("!=", "Success")} + ) + self.get_status( + "jcloud_agent_job_total", "Agent Job", filters={"status": ("!=", "Success")} + ) + + return generate_latest(self.registry).decode("utf-8") + + def can_render(self): + if self.path in ("metrics",): + return True + + def render(self): + response = Response() + response.mimetype = "text" + response.data = self.metrics() + return response diff --git a/jcloud/modules.txt b/jcloud/modules.txt new file mode 100644 index 0000000..d43f5d9 --- /dev/null +++ b/jcloud/modules.txt @@ -0,0 +1,6 @@ +Jcloud +Experimental +Marketplace +SaaS +Partner +Infrastructure \ No newline at end of file diff --git a/jcloud/notifications.py b/jcloud/notifications.py new file mode 100644 index 0000000..7c1e74b --- /dev/null +++ b/jcloud/notifications.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow + + +def get_notification_config(): + return { + "for_pagetype": { + "Site": {"status": "Active"}, + "Bench": {"status": "Active"}, + "Server": {"status": "Active"}, + "Database Server": {"status": "Active"}, + "Proxy Server": {"status": "Active"}, + }, + } diff --git a/jcloud/overrides.py b/jcloud/overrides.py new file mode 100644 index 0000000..da66d0c --- /dev/null +++ b/jcloud/overrides.py @@ -0,0 +1,238 @@ +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +from functools import partial + +import jingrow +from ansible.utils.path import cleanup_tmp_file +from jingrow.core.pagetype.user.user import User, ask_pass_update +from jingrow.handler import is_whitelisted +from jingrow.utils import cint + +from jcloud.runner import constants +from jcloud.utils import _get_current_team, _system_user + + +@jingrow.whitelist(allow_guest=True) +def upload_file(): + if jingrow.session.user == "Guest": + return None + + files = jingrow.request.files + is_private = jingrow.form_dict.is_private + pagetype = jingrow.form_dict.pagetype + docname = jingrow.form_dict.docname + fieldname = jingrow.form_dict.fieldname + file_url = jingrow.form_dict.file_url + folder = jingrow.form_dict.folder or "Home" + method = jingrow.form_dict.method + content = None + filename = None + + if "file" in files: + file = files["file"] + content = file.stream.read() + filename = file.filename + + jingrow.local.uploaded_file = content + jingrow.local.uploaded_filename = filename + + if method: + method = jingrow.get_attr(method) + is_whitelisted(method) + return method() + ret = jingrow.get_pg( + { + "pagetype": "File", + "attached_to_pagetype": pagetype, + "attached_to_name": docname, + "attached_to_field": fieldname, + "folder": folder, + "file_name": filename, + "file_url": file_url, + "is_private": cint(is_private), + "content": content, + } + ) + ret.save() + return ret + + +def on_session_creation(): + from jcloud.utils import get_current_team + + if ( + not jingrow.db.exists("Team", {"user": jingrow.session.user}) + and jingrow.session.data.user_type == "System User" + ): + return + + try: + team = get_current_team(get_pg=True) + route = team.get_route_on_login() + jingrow.local.response.update({"dashboard_route": route}) + except Exception: + pass + + +def before_job(): + jingrow.local.team = _get_current_team + jingrow.local.system_user = _system_user + + +def before_request(): + jingrow.local.team = _get_current_team + jingrow.local.system_user = _system_user + + +def cleanup_ansible_tmp_files(): + if hasattr(constants, "DEFAULT_LOCAL_TMP"): + cleanup_tmp_file(constants.DEFAULT_LOCAL_TMP) + + +def after_job(): + cleanup_ansible_tmp_files() + + +def update_website_context(context): + if (jingrow.request and jingrow.request.path.startswith("/docs")) and not jingrow.db.get_single_value( + "Jcloud Settings", "publish_docs" + ): + raise jingrow.DoesNotExistError + + +def has_permission(pg, ptype, user): + from jcloud.utils import get_current_team, has_role + + if not user: + user = jingrow.session.user + + user_type = jingrow.db.get_value("User", user, "user_type", cache=True) + if user_type == "System User": + return True + + if ptype == "create": + return True + + if has_role("Jcloud Support Agent", user) and ptype == "read": + return True + + team = get_current_team() + child_team_members = [d.name for d in jingrow.db.get_all("Team", {"parent_team": team}, ["name"])] + if pg.team == team or pg.team in child_team_members: + return True + + return False + + +def get_permission_query_conditions_for_pagetype_and_user(pagetype, user): + from jcloud.utils import get_current_team + + if not user: + user = jingrow.session.user + + user_type = jingrow.db.get_value("User", user, "user_type", cache=True) + if user_type == "System User": + return "" + + team = get_current_team() + + return f"(`tab{pagetype}`.`team` = {jingrow.db.escape(team)})" + + +def get_permission_query_conditions_for_pagetype(pagetype): + return partial(get_permission_query_conditions_for_pagetype_and_user, pagetype) + + +class CustomUser(User): + dashboard_fields = ("full_name", "email", "user_image", "enabled", "user_type") + + @staticmethod + def get_list_query(query): + team = jingrow.local.team() + allowed_users = [d.user for d in team.team_members] + User = jingrow.qb.PageType("User") + return query.where(User.name.isin(allowed_users)) + + def autoname(self): + """set name as Email Address""" + if self.get("is_admin") or self.get("is_guest"): + self.name = self.first_name + else: + self.name = self.username + + def validate(self): + # clear new password + self.__new_password = self.new_password + self.new_password = "" + + if not jingrow.flags.in_test: + self.password_strength_test() + + self.populate_role_profile_roles() + self.check_roles_added() + self.set_system_user() + self.set_full_name() + self.check_enable_disable() + self.ensure_unique_roles() + self.remove_all_roles_for_guest() + self.validate_username() + self.remove_disabled_roles() + self.validate_user_email_inbox() + ask_pass_update() + self.validate_allowed_modules() + self.validate_user_image() + self.set_time_zone() + + if self.language == "Loading...": + self.language = None + + if (self.name not in ["Administrator", "Guest"]) and (not self.get_social_login_userid("jingrow")): + self.set_social_login_userid("jingrow", jingrow.generate_hash(length=39)) + + def after_rename(self, old_name, new_name, merge=False): + """ + Changes: + - Excluding update operations on MyISAM tables + """ + myisam_tables = jingrow.db.sql_list( + """SELECT + TABLE_NAME FROM information_schema.TABLES + WHERE + ENGINE='MyISAM' + AND TABLE_SCHEMA NOT IN ('mysql','information_schema','performance_schema') + """ + ) + tables = [x for x in jingrow.db.get_tables() if x not in myisam_tables] + + for tab in tables: + desc = jingrow.db.get_table_columns_description(tab) + has_fields = [] + for d in desc: + if d.get("name") in ["owner", "modified_by"]: + has_fields.append(d.get("name")) + for field in has_fields: + jingrow.db.sql( + """UPDATE `{}` + SET `{}` = {} + WHERE `{}` = {}""".format(tab, field, "%s", field, "%s"), + (new_name, old_name), + ) + + for dt in ["Chat Profile", "Notification Settings"]: + if jingrow.db.exists(dt, old_name): + jingrow.rename_pg(dt, old_name, new_name, force=True, show_alert=False) + + # set username + jingrow.db.sql( + """UPDATE `tabUser` + SET username = %s + WHERE name = %s""", + (new_name, new_name), + ) + + +def before_after_migrate(): + # jingrow.clear_cache() on jcloud doesn't clear everything. See hooks.py + jingrow.cache.flushall() diff --git a/jcloud/partner/__init__.py b/jcloud/partner/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/partner/pagetype/__init__.py b/jcloud/partner/pagetype/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/partner/pagetype/partner_approval_request/__init__.py b/jcloud/partner/pagetype/partner_approval_request/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/partner/pagetype/partner_approval_request/partner_approval_request.js b/jcloud/partner/pagetype/partner_approval_request/partner_approval_request.js new file mode 100644 index 0000000..04ef25d --- /dev/null +++ b/jcloud/partner/pagetype/partner_approval_request/partner_approval_request.js @@ -0,0 +1,8 @@ +// Copyright (c) 2023, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Partner Approval Request", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/partner/pagetype/partner_approval_request/partner_approval_request.json b/jcloud/partner/pagetype/partner_approval_request/partner_approval_request.json new file mode 100644 index 0000000..9151a6a --- /dev/null +++ b/jcloud/partner/pagetype/partner_approval_request/partner_approval_request.json @@ -0,0 +1,104 @@ +{ + "actions": [], + "allow_rename": 1, + "creation": "2023-08-31 16:28:55.148891", + "default_view": "List", + "pagetype": "PageType", + "editable_grid": 1, + "engine": "InnoDB", + "field_order": [ + "requested_by", + "partner", + "key", + "column_break_cujq", + "status", + "send_mail", + "approved_by_jingrow", + "approved_by_partner" + ], + "fields": [ + { + "fieldname": "requested_by", + "fieldtype": "Link", + "label": "Requested By", + "options": "Team" + }, + { + "fieldname": "partner", + "fieldtype": "Link", + "label": "Partner", + "options": "Team" + }, + { + "default": "Pending", + "fieldname": "status", + "fieldtype": "Select", + "label": "Status", + "options": "Pending\nApproved\nRejected", + "read_only": 1 + }, + { + "fieldname": "column_break_cujq", + "fieldtype": "Column Break" + }, + { + "default": "0", + "fieldname": "send_mail", + "fieldtype": "Check", + "label": "Send Mail" + }, + { + "fieldname": "key", + "fieldtype": "Data", + "label": "Key" + }, + { + "default": "0", + "fieldname": "approved_by_jingrow", + "fieldtype": "Check", + "label": "Approved By Jingrow" + }, + { + "default": "0", + "fieldname": "approved_by_partner", + "fieldtype": "Check", + "label": "Approved By Partner" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-08-23 15:21:24.754187", + "modified_by": "Administrator", + "module": "Partner", + "name": "Partner Approval Request", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + }, + { + "create": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "Jcloud Admin", + "share": 1, + "write": 1 + } + ], + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "track_changes": 1 +} \ No newline at end of file diff --git a/jcloud/partner/pagetype/partner_approval_request/partner_approval_request.py b/jcloud/partner/pagetype/partner_approval_request/partner_approval_request.py new file mode 100644 index 0000000..c67f1b6 --- /dev/null +++ b/jcloud/partner/pagetype/partner_approval_request/partner_approval_request.py @@ -0,0 +1,86 @@ +# Copyright (c) 2023, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import jingrow +from jingrow.model.document import Document +from jingrow.utils import get_url + +from jcloud.api.client import dashboard_whitelist + + +class PartnerApprovalRequest(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + approved_by_jingrow: DF.Check + approved_by_partner: DF.Check + key: DF.Data | None + partner: DF.Link | None + requested_by: DF.Link | None + send_mail: DF.Check + status: DF.Literal["Pending", "Approved", "Rejected"] + # end: auto-generated types + + dashboard_fields = ( + "requested_by", + "partner", + "status", + "approved_by_jingrow", + "approved_by_partner", + ) + + @staticmethod + def get_list_query(query, filters=None, **list_args): + data = query.run(as_dict=True) + for d in data: + user = jingrow.db.get_value("Team", d.requested_by, "user") + d.update({"customer_email": user}) + return list(data) + + def before_insert(self): + self.key = jingrow.generate_hash(15) + + def before_save(self): + if self.status == "Pending" and self.approved_by_partner and self.approved_by_jingrow: + self.status = "Approved" + + customer = jingrow.get_pg("Team", self.requested_by) + if not customer.partner_email: + partner = jingrow.get_pg("Team", self.partner) + customer.partner_email = partner.partner_email + customer.partnership_date = jingrow.utils.getdate(self.creation) + customer.save(ignore_permissions=True) + + @dashboard_whitelist() + def approve_partner_request(self): + if self.status == "Pending" and not self.approved_by_jingrow: + self.approved_by_partner = True + self.save(ignore_permissions=True) + self.reload() + self.send_approval_request_email() + + def send_approval_request_email(self): + from jcloud.utils.billing import get_jingrow_io_connection + + client = get_jingrow_io_connection() + email = jingrow.db.get_value("Team", self.partner, "partner_email") + partner_manager = client.get_value("Partner", "success_manager", {"email": email}) + if not partner_manager: + jingrow.throw("Failed to create approval request. Please contact support.") + customer = jingrow.db.get_value("Team", self.requested_by, "user") + + link = get_url(f"/api/method/jcloud.api.partner.approve_partner_request?key={self.key}") + + jingrow.sendmail( + subject="Partner Approval Request", + recipients=partner_manager["success_manager"], + template="partner_approval", + args={"link": link, "user": customer, "partner": email}, + now=True, + ) diff --git a/jcloud/partner/pagetype/partner_approval_request/test_partner_approval_request.py b/jcloud/partner/pagetype/partner_approval_request/test_partner_approval_request.py new file mode 100644 index 0000000..b0ab7f1 --- /dev/null +++ b/jcloud/partner/pagetype/partner_approval_request/test_partner_approval_request.py @@ -0,0 +1,9 @@ +# Copyright (c) 2023, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestPartnerApprovalRequest(JingrowTestCase): + pass diff --git a/jcloud/partner/pagetype/partner_tier/__init__.py b/jcloud/partner/pagetype/partner_tier/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/partner/pagetype/partner_tier/partner_tier.js b/jcloud/partner/pagetype/partner_tier/partner_tier.js new file mode 100644 index 0000000..e87a5bd --- /dev/null +++ b/jcloud/partner/pagetype/partner_tier/partner_tier.js @@ -0,0 +1,8 @@ +// Copyright (c) 2024, JINGROW +// For license information, please see license.txt + +// jingrow.ui.form.on("Partner Tier", { +// refresh(frm) { + +// }, +// }); diff --git a/jcloud/partner/pagetype/partner_tier/partner_tier.json b/jcloud/partner/pagetype/partner_tier/partner_tier.json new file mode 100644 index 0000000..2452b6f --- /dev/null +++ b/jcloud/partner/pagetype/partner_tier/partner_tier.json @@ -0,0 +1,70 @@ +{ + "actions": [], + "allow_rename": 1, + "autoname": "field:title", + "creation": "2024-10-29 23:03:54.259118", + "pagetype": "PageType", + "engine": "InnoDB", + "field_order": [ + "enabled", + "title", + "column_break_fnzp", + "target_in_cny", + "target_in_usd" + ], + "fields": [ + { + "default": "0", + "fieldname": "enabled", + "fieldtype": "Check", + "label": "Enabled" + }, + { + "fieldname": "title", + "fieldtype": "Data", + "label": "Title", + "unique": 1 + }, + { + "fieldname": "column_break_fnzp", + "fieldtype": "Column Break" + }, + { + "fieldname": "target_in_cny", + "fieldtype": "Float", + "label": "Target in CNY" + }, + { + "fieldname": "target_in_usd", + "fieldtype": "Float", + "label": "Target in USD" + } + ], + "index_web_pages_for_search": 1, + "links": [], + "modified": "2024-10-29 23:08:37.110935", + "modified_by": "Administrator", + "module": "Partner", + "name": "Partner Tier", + "naming_rule": "Expression (old style)", + "owner": "Administrator", + "permissions": [ + { + "create": 1, + "delete": 1, + "email": 1, + "export": 1, + "print": 1, + "read": 1, + "report": 1, + "role": "System Manager", + "share": 1, + "write": 1 + } + ], + "show_title_field_in_link": 1, + "sort_field": "modified", + "sort_order": "DESC", + "states": [], + "title_field": "title" +} \ No newline at end of file diff --git a/jcloud/partner/pagetype/partner_tier/partner_tier.py b/jcloud/partner/pagetype/partner_tier/partner_tier.py new file mode 100644 index 0000000..ecaf20c --- /dev/null +++ b/jcloud/partner/pagetype/partner_tier/partner_tier.py @@ -0,0 +1,24 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +# import jingrow +from jingrow.model.document import Document + + +class PartnerTier(Document): + # begin: auto-generated types + # This code is auto-generated. Do not modify anything in this block. + + from typing import TYPE_CHECKING + + if TYPE_CHECKING: + from jingrow.types import DF + + enabled: DF.Check + target_in_cny: DF.Float + target_in_usd: DF.Float + title: DF.Data | None + # end: auto-generated types + + pass diff --git a/jcloud/partner/pagetype/partner_tier/test_partner_tier.py b/jcloud/partner/pagetype/partner_tier/test_partner_tier.py new file mode 100644 index 0000000..62aa1ef --- /dev/null +++ b/jcloud/partner/pagetype/partner_tier/test_partner_tier.py @@ -0,0 +1,9 @@ +# Copyright (c) 2024, JINGROW +# See license.txt + +# import jingrow +from jingrow.tests.utils import JingrowTestCase + + +class TestPartnerTier(JingrowTestCase): + pass diff --git a/jcloud/patches.txt b/jcloud/patches.txt new file mode 100644 index 0000000..461c0c8 --- /dev/null +++ b/jcloud/patches.txt @@ -0,0 +1,141 @@ +[pre_model_sync] +jcloud.patches.v0_0_1.site_history_to_site_activity +jcloud.patches.v0_0_1.user_account_to_team #6 +jcloud.patches.v0_0_1.rename_transaction_currency_to_currency +jcloud.patches.v0_0_1.create_site_plan_change_log +execute:jingrow.reload_pg('jcloud', 'pagetype', 'team') +execute:jingrow.db.set_value('Team', {'enabled': 1}, {'free_credits_allocated': 1}, update_modified=False) +jcloud.patches.v0_0_1.update_proxy_for_suspended_and_inactive_sites +jcloud.patches.v0_0_1.rename_site_backup_fields # 2020-05-22 +jcloud.patches.v0_0_1.rename_archived_sites +jcloud.patches.v0_0_1.create_backup_uploads_folder +jcloud.patches.v0_0_1.mark_deployed_app_releases_as_approved_and_deployable # 2020-22-06 +jcloud.patches.v0_0_1.set_repository_in_jingrow_app # 2020-22-06 +jcloud.patches.v0_0_1.make_apps_in_public_release_group_public +jcloud.patches.v0_0_1.set_host_name_for_sites_with_domains +jcloud.patches.v0_0_1.set_rate_limit_config_based_on_plan +jcloud.patches.v0_0_1.enable_partner_privileges +jcloud.patches.v0_0_1.set_remote_file_location +jcloud.patches.v0_0_1.track_offsite_backups_via_remote_files +jcloud.patches.v0_0_1.move_domains_from_archived_to_active_sites +jcloud.patches.v0_0_1.remove_domains_linked_to_archived_sites +jcloud.patches.v0_0_1.delete_logs_from_archived_sites +jcloud.patches.v0_0_1.add_domains_to_site_config +execute:jingrow.reload_pg('jcloud', 'pagetype', 'Remote File') +# jcloud.patches.v0_0_1.add_site_to_remote_file # 2020-11-12 run via run-patch command in active site state +jcloud.patches.v0_0_1.new_onboarding +jcloud.patches.v0_0_1.remove_obsolete_doctypes +jcloud.patches.v0_0_1.make_default_site_domain +jcloud.patches.v0_0_1.update_site_config_pg +jcloud.patches.v0_0_1.create_certificate_authorities +jcloud.patches.v0_0_1.rename_columns_in_tls_certificate +jcloud.patches.v0_0_1.site_usage_convert_history +jcloud.patches.v0_0_1.set_document_type_in_plan +# Billing Refactor 3 +# these will be run separately with the execute command, to avoid downtime +# jcloud.patches.v0_0_1.create_site_subscriptions +# jcloud.patches.v0_0_1.ple_to_usage_record +# jcloud.patches.v0_0_1.patch_invoice +# jcloud.patches.v0_0_1.create_balance_transactions_from_stripe +# Backups "refactor" +# jcloud.patches.v0_0_1.update_backups_availability +# Update transaction details in paid invoices +# jcloud.jcloud.pagetype.invoice.patches.set_transaction_details +jcloud.patches.v0_0_1.set_billing_name_for_teams +# App Subsystem Refactor +jcloud.patches.v0_0_1.rename_jingrow_app_to_app +jcloud.patches.v0_0_1.rename_release_group_jingrow_app_to_release_group_app +jcloud.patches.v0_0_1.set_hostname_in_server +jcloud.patches.v0_0_1.set_domain_in_site +jcloud.patches.v0_0_1.breakdown_site_usage_into_multiple_fields +jcloud.patches.v0_0_1.create_default_cluster +jcloud.patches.v0_0_1.set_app_title_from_custom_field +jcloud.patches.v0_0_1.set_release_group_version_and_title_from_custom_field +jcloud.patches.v0_0_1.rename_installed_app_to_bench_app +jcloud.patches.v0_0_1.rename_deploy_candidate_app_release_to_deploy_candidate_app +jcloud.patches.v0_0_1.create_app_source_from_app +jcloud.patches.v0_0_1.rename_release_groups +jcloud.patches.v0_0_1.create_app_release_difference_from_deploy_candidate_difference +jcloud.patches.v0_0_1.truncate_server_status_table +jcloud.patches.v0_0_1.set_marketplace_app_app_field_from_name +jcloud.patches.v0_0_1.rename_workers_to_background_workers +jcloud.patches.v0_0_1.set_release_in_bench_app +jcloud.patches.v0_0_1.set_app_title_in_deploy_candidate_app +jcloud.patches.v0_0_1.set_release_group_in_site +jcloud.patches.v0_0_1.set_public_field_in_app_release_based_on_app_source_public +jcloud.patches.v0_0_1.set_team_field_for_permission_checks +jcloud.patches.v0_0_1.set_team_field_in_tls_certificate_based_on_domain_team +jcloud.patches.v0_0_1.create_root_domain_from_jcloud_settings +jcloud.patches.v0_0_1.set_cluster_in_jcloud_settings +jcloud.patches.v0_0_1.remove_period_from_plan +jcloud.patches.v0_0_1.add_site_index_to_site_migration +jcloud.patches.v0_0_1.set_dependencies_in_release_group +jcloud.patches.v0_0_1.set_monitoring_password_in_cluster +jcloud.patches.v0_0_1.add_domains_in_site_config_preview +jcloud.patches.v0_0_1.use_private_ip_for_upstreams +jcloud.jcloud.pagetype.site.patches.set_plan_in_site +jcloud.jcloud.pagetype.app_release.patches.set_status_to_draft +jcloud.patches.v0_0_4.remove_legacy_billing_doctypes +# jcloud.jcloud.pagetype.invoice.patches.set_free_credits # 2021-08-11 run via run-patch command +jcloud.jcloud.pagetype.team.patches.set_payment_mode +jcloud.patches.v0_0_1.add_team_name_as_default_notify_email +jcloud.jcloud.pagetype.team.patches.set_referrer_id +jcloud.jcloud.pagetype.team.patches.set_partner_email +jcloud.jcloud.pagetype.proxy_server.patches.generate_proxysql_monitor_password +jcloud.jcloud.pagetype.virtual_machine.patches.populate_volumes_table +jcloud.jcloud.pagetype.release_group.patches.set_bench_dependency_in_release_group +jcloud.jcloud.pagetype.jcloud_settings.patches.move_stripe_credentials_to_jcloud_settings +jcloud.patches.v0_0_4.disable_subscriptions_for_inactive_sites +jcloud.jcloud.pagetype.virtual_machine.patches.set_naming_fields +jcloud.jcloud.pagetype.virtual_machine.patches.set_virtual_machine_naming_series +jcloud.jcloud.pagetype.virtual_machine.patches.rename_virtual_machines +jcloud.jcloud.pagetype.user_ssh_key.patches.set_existing_keys_as_default +jcloud.jcloud.pagetype.jcloud_settings.patches.set_jcloud_monitoring_password +jcloud.jcloud.pagetype.deploy_candidate_app.patches.set_app_name_to_app +jcloud.jcloud.pagetype.site.patches.set_database_access_credentials +jcloud.jcloud.pagetype.team.patches.set_team_title +jcloud.jcloud.pagetype.telegram_group.patches.create_groups_from_jcloud_settings +jcloud.jcloud.pagetype.app_release.patches.set_clone_directory +jcloud.jcloud.pagetype.database_server_mariadb_variable.patches.add_unique_constraint +jcloud.jcloud.pagetype.release_group.patches.sync_common_site_config +execute:jingrow.delete_pg('Central Site Migration') +execute:jingrow.delete_pg('Central Server') +execute:jingrow.delete_pg('Feature Traction') +jcloud.patches.v0_7_0.set_hostname_abbreviation +jcloud.jcloud.pagetype.cluster.patches.rename_aws_fields +jcloud.jcloud.pagetype.virtual_machine.patches.rename_aws_fields +jcloud.jcloud.pagetype.virtual_machine_image.patches.rename_aws_fields +jcloud.jcloud.pagetype.virtual_disk_snapshot.patches.rename_aws_fields +jcloud.jcloud.pagetype.virtual_machine_volume.patches.rename_aws_fields +jcloud.patches.v0_7_0.convert_marketplace_description_to_html +jcloud.jcloud.pagetype.team.patches.remove_invalid_email_addresses +jcloud.saas.pagetype.product_trial.patches.rename_saas_product_doctypes_to_product_trial + +[post_model_sync] +jcloud.patches.v0_7_0.rename_plan_to_site_plan +jcloud.patches.v0_7_0.migrate_fields_from_plans_to_server_and_marketplace +jcloud.patches.v0_7_0.set_password_config_type +jcloud.jcloud.pagetype.agent_job.patches.update_status_for_undelivered_jobs #2024-04-23 +jcloud.jcloud.pagetype.jcloud_role.patches.migrate_permissions +jcloud.jcloud.pagetype.jcloud_role.patches.change_fields_from_enable_to_allow +jcloud.jcloud.pagetype.stripe_webhook_log.patches.add_payment_method_for_failed_events +jcloud.patches.v0_7_0.add_team_field_for_site_related_doctypes +jcloud.patches.v0_7_0.add_team_field_for_site_backups_archived +jcloud.jcloud.pagetype.server_storage_plan.patches.add_subscription_for_servers_with_additional_disk +jcloud.jcloud.pagetype.jcloud_notification.patches.link_reference_pagetype_to_notifications +jcloud.jcloud.pagetype.site.patches.set_plan_limit_in_site_config +jcloud.jcloud.pagetype.payout_order.patches.change_fields_from_recipient_to_team +jcloud.jcloud.pagetype.payout_order.patches.compute_total_amount +jcloud.jcloud.pagetype.marketplace_app.patches.change_field_from_first_site_creation_to_site_creation +jcloud.jcloud.pagetype.marketplace_app.patches.convert_images_to_webp +jcloud.marketplace.pagetype.app_user_review.patches.add_rating_values_to_apps +jcloud.jcloud.pagetype.site.patches.set_status_wizard_check_next_retry_datetime_in_site +jcloud.patches.v0_7_0.update_enable_performance_tuning +jcloud.jcloud.pagetype.server.patches.set_plan_and_subscription +jcloud.patches.v0_7_0.move_site_db_access_users_to_site_db_perm_manager +jcloud.jcloud.pagetype.drip_email.patches.set_correct_field_for_html +jcloud.patches.v0_7_0.set_label_for_site_database_user +jcloud.jcloud.pagetype.jcloud_settings.patches.set_redis_cache_size +jcloud.jcloud.pagetype.virtual_machine.patches.set_root_disk_size +jcloud.jcloud.pagetype.virtual_machine_image.patches.set_root_size +jcloud.patches.v0_7_0.fix_team_for_tls_certificates diff --git a/jcloud/patches/v0_0_1/add_domains_in_site_config_preview.py b/jcloud/patches/v0_0_1/add_domains_in_site_config_preview.py new file mode 100644 index 0000000..ae41d8b --- /dev/null +++ b/jcloud/patches/v0_0_1/add_domains_in_site_config_preview.py @@ -0,0 +1,29 @@ +"""Add domains key in jcloud's site configuration (No agent job).""" +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +from itertools import groupby + +import jingrow + + +def execute(): + domain_site_list = jingrow.db.sql( + """ + SELECT site_domain.name, site.name + FROM `tabSite Domain` site_domain + JOIN tabSite site + ON site_domain.site = site.name + WHERE + site_domain.name != site_domain.site and + site_domain.status = "Active" and + site.status != "Archived" + ORDER BY + site.name + """ + ) + domain_site_list = groupby(domain_site_list, lambda x: x[1]) + for site, domains_tuple in domain_site_list: + domains = [t[0] for t in list(domains_tuple)] + site = jingrow.get_cached_pg("Site", site) + site._update_configuration({"domains": domains}) diff --git a/jcloud/patches/v0_0_1/add_domains_to_site_config.py b/jcloud/patches/v0_0_1/add_domains_to_site_config.py new file mode 100644 index 0000000..b229e5e --- /dev/null +++ b/jcloud/patches/v0_0_1/add_domains_to_site_config.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.utils.fixtures import sync_fixtures + + +def execute(): + sync_fixtures("jcloud") + domains = jingrow.get_all( + "Site Domain", fields=["site", "domain", "name"], filters={"status": "Active"} + ) + + for domain in domains: + site_pg = jingrow.get_pg("Site", domain.site) + site_pg.add_domain_to_config(domain.domain) diff --git a/jcloud/patches/v0_0_1/add_site_index_to_site_migration.py b/jcloud/patches/v0_0_1/add_site_index_to_site_migration.py new file mode 100644 index 0000000..d2ed5bf --- /dev/null +++ b/jcloud/patches/v0_0_1/add_site_index_to_site_migration.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.get_pg("PageType", "Site Migration").run_module_method("on_pagetype_update") diff --git a/jcloud/patches/v0_0_1/add_site_to_remote_file.py b/jcloud/patches/v0_0_1/add_site_to_remote_file.py new file mode 100644 index 0000000..82c564c --- /dev/null +++ b/jcloud/patches/v0_0_1/add_site_to_remote_file.py @@ -0,0 +1,25 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + """Add a site field to existing Remote File documents to track Offsite Backups via dashboard""" + offsite_backups = jingrow.get_all( + "Site Backup", + fields=["site", "remote_database_file", "remote_public_file", "remote_private_file"], + filters={"offsite": 1}, + ) + + for backup in offsite_backups: + remote_database_file, remote_public_file, remote_private_file = ( + backup.get("remote_database_file"), + backup.get("remote_public_file"), + backup.get("remote_private_file"), + ) + site = backup.get("site") + + for name in [remote_database_file, remote_public_file, remote_private_file]: + jingrow.db.set_value("Remote File", name, "site", site) diff --git a/jcloud/patches/v0_0_1/add_team_name_as_default_notify_email.py b/jcloud/patches/v0_0_1/add_team_name_as_default_notify_email.py new file mode 100644 index 0000000..9555139 --- /dev/null +++ b/jcloud/patches/v0_0_1/add_team_name_as_default_notify_email.py @@ -0,0 +1,15 @@ +import jingrow + +jingrow.reload_pagetype("Team") +jingrow.reload_pagetype("Site") + + +def execute(): + """Sets the value of notify email as team name""" + teams = jingrow.get_all("Team", pluck="name") + for team in teams: + jingrow.db.set_value("Team", team, "notify_email", team) + + sites = jingrow.get_all("Site", fields=["name", "team"]) + for site in sites: + jingrow.db.set_value("Site", site.name, "notify_email", site.team) diff --git a/jcloud/patches/v0_0_1/breakdown_site_usage_into_multiple_fields.py b/jcloud/patches/v0_0_1/breakdown_site_usage_into_multiple_fields.py new file mode 100644 index 0000000..a01b267 --- /dev/null +++ b/jcloud/patches/v0_0_1/breakdown_site_usage_into_multiple_fields.py @@ -0,0 +1,19 @@ +import json + +import jingrow + + +def execute(): + """Convert site._site_usages Data field into individual fields""" + jingrow.reload_pg("jcloud", "pagetype", "site") + non_archived_sites = jingrow.get_all( + "Site", filters={"status": ("!=", "Archived")}, pluck="name" + ) + + for site in non_archived_sites: + site_pg = jingrow.get_pg("Site", site) + parsed_usage = json.loads(site_pg._site_usages or "{}") + site_pg.current_cpu_usage = parsed_usage.get("cpu", 0) * 100 + site_pg.current_database_usage = parsed_usage.get("database", 0) * 100 + site_pg.current_disk_usage = parsed_usage.get("disk", 0) * 100 + site_pg.save() diff --git a/jcloud/patches/v0_0_1/create_app_release_difference_from_deploy_candidate_difference.py b/jcloud/patches/v0_0_1/create_app_release_difference_from_deploy_candidate_difference.py new file mode 100644 index 0000000..5bda46f --- /dev/null +++ b/jcloud/patches/v0_0_1/create_app_release_difference_from_deploy_candidate_difference.py @@ -0,0 +1,44 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "deploy_candidate_difference") + jingrow.reload_pg("jcloud", "pagetype", "deploy_candidate_difference_app") + jingrow.reload_pg("jcloud", "pagetype", "app_release_difference") + + jingrow.db.delete("Deploy Candidate Difference App", {"changed": False}) + differences = jingrow.get_all("Deploy Candidate Difference App", "*") + for difference in differences: + release_difference = jingrow.db.exists( + "App Release Difference", + { + "app": difference.app, + "source_release": difference.source_release, + "destination_release": difference.destination_release, + }, + ) + if not release_difference: + release_difference_pg = jingrow.get_pg( + { + "pagetype": "App Release Difference", + "app": difference.app, + "deploy_type": difference.deploy_type, + "source": jingrow.db.get_value("App Release", difference.source_release, "source"), + "source_release": difference.source_release, + "source_hash": difference.source_hash, + "destination_release": difference.destination_release, + "destination_hash": difference.destination_hash, + "files": difference.files, + "github_diff_url": difference.github_diff_url, + } + ) + release_difference_pg.db_insert() + release_difference = release_difference_pg.name + jingrow.db.set_value( + "Deploy Candidate Difference App", difference.name, "difference", release_difference + ) diff --git a/jcloud/patches/v0_0_1/create_app_source_from_app.py b/jcloud/patches/v0_0_1/create_app_source_from_app.py new file mode 100644 index 0000000..3b60096 --- /dev/null +++ b/jcloud/patches/v0_0_1/create_app_source_from_app.py @@ -0,0 +1,85 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.utils.fixtures import sync_fixtures + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "app") + jingrow.reload_pg("jcloud", "pagetype", "app_source") + jingrow.reload_pg("jcloud", "pagetype", "jingrow_version") + sync_fixtures("jcloud") + jingrow.reload_pg("jcloud", "pagetype", "app_source_version") + jingrow.reload_pg("jcloud", "pagetype", "app_release") + jingrow.reload_pg("jcloud", "pagetype", "app_release_difference") + jingrow.reload_pg("jcloud", "pagetype", "release_group_app") + jingrow.reload_pg("jcloud", "pagetype", "bench_app") + jingrow.reload_pg("jcloud", "pagetype", "deploy_candidate_app") + distinct_apps = jingrow.get_all("App", ["title", "scrubbed"], group_by="scrubbed") + + for distinct_app in distinct_apps: + apps = jingrow.get_all( + "App", "*", {"scrubbed": distinct_app.scrubbed}, order_by="enabled desc" + ) + for app in apps: + versions = set(jingrow.get_all("Release Group", {"app": app.name}, pluck="version")) + if not versions: + groups = jingrow.get_all("Bench", {"app": app.name}, pluck="group") + versions = set( + jingrow.get_all("Release Group", {"name": ("in", groups)}, pluck="version") + ) + if not versions: + continue + source = { + "pagetype": "App Source", + "app": app.name, + "app_title": app.title, + "jingrow": app.jingrow, + "enabled": app.enabled, + "repository_url": app.url, + "repository": app.repo, + "repository_owner": app.repo_owner, + "branch": app.branch, + "github_installation_id": app.installation, + "public": app.public, + "team": app.team, + "versions": [{"version": version} for version in versions], + } + source = jingrow.get_pg(source) + source.name = "TEMP-SOURCE" + source.set_parent_in_children() + source.db_insert() + + for child in source.get_all_children(): + child.db_insert() + + jingrow.db.set_value("App Release", {"app": app.name}, "source", source.name) + jingrow.db.set_value("Bench App", {"app": app.name}, "source", source.name) + jingrow.db.set_value("Deploy Candidate App", {"app": app.name}, "source", source.name) + jingrow.db.set_value("Release Group App", {"app": app.name}, "source", source.name) + jingrow.db.set_value("Release Group App", {"app": app.name}, "title", app.title) + + existing = jingrow.db.exists("App", app.scrubbed, cache=False) + if existing and existing == app.scrubbed: + jingrow.rename_pg("App", app.name, app.scrubbed, merge=True) + else: + jingrow.rename_pg("App", app.name, app.scrubbed) + + old_source_name = source.name + source.reload() + source.autoname() + jingrow.rename_pg("App Source", old_source_name, source.name) + + +def delete(): + jingrow.db.set_value("App Release", {"cloned": False}, "source", None) + for difference in jingrow.get_all("App Release Difference"): + jingrow.delete_pg("App Release Difference", difference.name) + for source in jingrow.get_all("App Source"): + jingrow.delete_pg("App Source", source.name) + jingrow.db.delete( + "Patch Log", {"patch": "jcloud.patches.v0_0_1.create_app_source_from_app"} + ) diff --git a/jcloud/patches/v0_0_1/create_backup_uploads_folder.py b/jcloud/patches/v0_0_1/create_backup_uploads_folder.py new file mode 100644 index 0000000..4df48c0 --- /dev/null +++ b/jcloud/patches/v0_0_1/create_backup_uploads_folder.py @@ -0,0 +1,9 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from jingrow.core.pagetype.file.file import create_new_folder + + +def execute(): + create_new_folder("Backup Uploads", "Home") diff --git a/jcloud/patches/v0_0_1/create_balance_transactions.py b/jcloud/patches/v0_0_1/create_balance_transactions.py new file mode 100644 index 0000000..69af6ca --- /dev/null +++ b/jcloud/patches/v0_0_1/create_balance_transactions.py @@ -0,0 +1,65 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.utils import update_progress_bar + +from jcloud.api.billing import get_stripe + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "balance_transaction") + + partners = jingrow.db.get_all("Team", filters={"jerp_partner": 1}, pluck="name") + for i, name in enumerate(partners): + update_progress_bar("Creating Balance Transactions", i, len(partners)) + + if jingrow.db.exists( + "Balance Transaction", {"team": name, "description": "Initial Balance"} + ): + continue + + team = jingrow.get_pg("Team", name) + balance = team.get_stripe_balance() + if balance != 0: + stripe = get_stripe() + # reset customer balance on Stripe + stripe.Customer.create_balance_transaction( + team.stripe_customer_id, + # multiplied by 100 because Stripe wants amount in cents / paise + amount=int(balance * 100), + currency=team.currency.lower(), + description="Reset customer balance", + idempotency_key=team.name, + ) + free_credits_left = get_free_credits_left(team) + source = "" + if free_credits_left == balance: + source = "Free Credits" + + # set the balance as initial balance here + team.allocate_credit_amount(balance, source=source, remark="Stripe Balance") + + +def get_free_credits_left(team): + invoices = jingrow.db.get_all("Invoice", {"team": team.name, "status": ("!=", "Draft")}) + + settings = jingrow.get_pg("Jcloud Settings") + total_free_credits = ( + settings.free_credits_cny if team.currency == "CNY" else settings.free_credits_usd + ) + + if not invoices: + return total_free_credits + + def sum(list): + total = 0 + for d in list: + total += d + return total + + invoices_total = sum([invoice.total for invoice in invoices]) + if invoices_total < total_free_credits: + return total_free_credits - invoices_total + return 0 diff --git a/jcloud/patches/v0_0_1/create_balance_transactions_from_stripe.py b/jcloud/patches/v0_0_1/create_balance_transactions_from_stripe.py new file mode 100644 index 0000000..1f057ac --- /dev/null +++ b/jcloud/patches/v0_0_1/create_balance_transactions_from_stripe.py @@ -0,0 +1,187 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from datetime import datetime + +import jingrow + +from jcloud.api.billing import get_stripe + +migrated_cache_key = "migrated_teams" + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "balance_transaction") + + skip_teams = list(jingrow.cache().smembers(migrated_cache_key)) + teams = jingrow.db.get_all( + "Team", + filters={"name": ("not in", skip_teams), "stripe_customer_id": ("is", "set")}, + pluck="name", + ) + for name in teams: + try: + create_balance_transactions_for_team(name) + jingrow.cache().sadd(migrated_cache_key, name) + except Exception: + print(f"❗️ Failed for {name}") + print(jingrow.get_traceback()) + + +def create_balance_transactions_for_team(name): + team = jingrow.get_pg("Team", name) + stripe = get_stripe() + # skip if already done + if jingrow.db.exists( + "Balance Transaction", {"source": "Free Credits", "team": team.name} + ): + print(f"Skipping for {team.name}") + return + + print(f"Creating Balance Transactions for {team.name}") + + response = stripe.Customer.list_balance_transactions( + team.stripe_customer_id, limit=100 + ) + transactions = response.data + transactions.reverse() + + if team.free_credits_allocated: + free_credits_left = 1800 if team.currency == "CNY" else 25 + else: + free_credits_left = 0 + + balance_transactions = [] + free_credit_balance_created = False + for transaction in transactions: + amount = transaction.amount * -1 / 100 + type = ( + "Applied To Invoice" if transaction.type == "applied_to_invoice" else "Adjustment" + ) + source = "" + invoice_name = "" + if type == "Adjustment": + if amount > 0: + free_credits = 1800 if transaction.currency == "cny" else 25 + source = ( + "Free Credits" + if team.free_credits_allocated + and amount == free_credits + and not free_credit_balance_created + else "Transferred Credits" + ) + + if type == "Applied To Invoice": + invoice = jingrow.get_pg("Invoice", {"stripe_invoice_id": transaction.invoice}) + invoice_name = invoice.name + free_credits_applied = apply_to_invoice( + invoice, amount, free_credits_left, balance_transactions + ) + free_credits_left = jingrow.utils.rounded(free_credits_left - free_credits_applied, 2) + + bt = create_balance_transaction( + team, + amount=amount, + source=source, + type=type, + invoice=invoice_name, + creation=datetime.fromtimestamp(transaction.created), + ) + balance_transactions.append(bt) + if bt.source == "Free Credits": + free_credit_balance_created = True + + stripe_balance = team.get_stripe_balance() + team_balance = team.get_balance() + if stripe_balance == team_balance: + reset_customer_balance_on_stripe(team) + jingrow.db.commit() + print(f"✅ Successful for {team.name}") + else: + jingrow.db.rollback() + print( + f"❌ Balance mismatch for {team.name}. Team Balance: {team_balance}, Stripe" + f" Balance: {stripe_balance}" + ) + + +def apply_to_invoice(invoice, amount, free_credits_left, balance_transactions): + unallocated_bts = get_last_unallocated_balance_transactions(balance_transactions) + amount_to_apply = amount * -1 + applied = 0 + not_applied = amount_to_apply - applied + free_credits_applied = 0 + _free_credits_left = free_credits_left + for bt in unallocated_bts: + if applied == amount_to_apply: + break + + if _free_credits_left: + to_apply = min(not_applied, _free_credits_left) + if to_apply > bt.unallocated_amount: + to_apply = bt.unallocated_amount + free_credits_applied += to_apply + _free_credits_left -= to_apply + else: + to_apply = not_applied + if to_apply > bt.unallocated_amount: + to_apply = bt.unallocated_amount + + invoice.append( + "credit_allocations", + { + "transaction": bt.name, + "source": bt.source, + "amount": to_apply, + "currency": invoice.currency, + }, + ) + row = bt.append( + "allocated_to", + {"invoice": invoice.name, "amount": to_apply, "currency": invoice.currency}, + ) + bt.save() + bt.reload() + applied += to_apply + not_applied = amount_to_apply - applied + + for row in invoice.credit_allocations: + row.db_insert() + + return free_credits_applied + + +def reset_customer_balance_on_stripe(team): + stripe = get_stripe() + balance = team.get_stripe_balance() + if balance != 0: + stripe.Customer.create_balance_transaction( + team.stripe_customer_id, + # multiplied by 100 because Stripe wants amount in cents / paise + # to reset the balance we should provide negative value of the original value + # but we store the credit balance as a positive and stripe stores it as negative + # so a positive value will negate the value to 0 on stripe + amount=int(balance * 100), + currency=team.currency.lower(), + description="Reset customer balance", + idempotency_key=team.name, + ) + + +def get_last_unallocated_balance_transactions(balance_transactions): + out = [] + adjustments = [d for d in balance_transactions if d.type == "Adjustment"] + for bt in adjustments: + if bt.unallocated_amount > 0: + out.append(bt) + return out + + +def create_balance_transaction(team, **kwargs): + pg = jingrow.get_pg(pagetype="Balance Transaction", team=team.name) + pg.update(kwargs) + pg.insert(ignore_permissions=True) + pg.submit() + pg.reload() + return pg diff --git a/jcloud/patches/v0_0_1/create_certificate_authorities.py b/jcloud/patches/v0_0_1/create_certificate_authorities.py new file mode 100644 index 0000000..b46364a --- /dev/null +++ b/jcloud/patches/v0_0_1/create_certificate_authorities.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + +from jcloud.install import create_certificate_authorities + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "certificate_authority") + if jingrow.get_site_config().developer_mode: + jingrow.conf.developer_mode = 1 + create_certificate_authorities() + jingrow.conf.developer_mode = 0 diff --git a/jcloud/patches/v0_0_1/create_child_table_records_in_teams.py b/jcloud/patches/v0_0_1/create_child_table_records_in_teams.py new file mode 100644 index 0000000..0bfb357 --- /dev/null +++ b/jcloud/patches/v0_0_1/create_child_table_records_in_teams.py @@ -0,0 +1,19 @@ +import jingrow + +from jcloud.utils import log_error + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "team") + teams = jingrow.get_all("Team", pluck="name") + + for name in teams: + try: + team = jingrow.get_pg("Team", name) + team.append("communication_emails", {"type": "invoices", "value": team.name}) + team.append( + "communication_emails", {"type": "marketplace_notifications", "value": team.name} + ) + team.save() + except Exception as e: + log_error(title="Weird Country Name", data=e) diff --git a/jcloud/patches/v0_0_1/create_default_cluster.py b/jcloud/patches/v0_0_1/create_default_cluster.py new file mode 100644 index 0000000..a2d0d48 --- /dev/null +++ b/jcloud/patches/v0_0_1/create_default_cluster.py @@ -0,0 +1,15 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "cluster") + cluster = jingrow.get_pg({"pagetype": "Cluster", "name": "Default", "default": True}) + cluster.insert() + doctypes = ["Server", "Proxy Server", "Database Server", "Bench", "Site"] + for pagetype in doctypes: + jingrow.reload_pg("jcloud", "pagetype", jingrow.scrub(pagetype)) + jingrow.db.set_value(pagetype, {"name": ("like", "%")}, "cluster", "Default") diff --git a/jcloud/patches/v0_0_1/create_invoice_for_past_ples.py b/jcloud/patches/v0_0_1/create_invoice_for_past_ples.py new file mode 100644 index 0000000..cd98872 --- /dev/null +++ b/jcloud/patches/v0_0_1/create_invoice_for_past_ples.py @@ -0,0 +1,162 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from datetime import datetime + +import jingrow + +from jcloud.api.billing import get_stripe +from jcloud.jcloud.pagetype.team.team_invoice import TeamInvoice + +# - Cancel subscription +# - Create old invoices +# - Create running draft invoice + +migrated_cache_key = "migrated_teams" +traceback_cache_key = "team_traceback" +total_match_failure_key = "total_match_failure" +failed_subscriptions = "failed_subscriptions" + + +def execute(): + skip_teams = list(jingrow.cache().smembers(migrated_cache_key)) + for d in jingrow.db.get_all("Team", filters={"name": ("not in", skip_teams)}): + migrate_team(d.name) + + +def migrate_team(team): + try: + cancel_subscription(team) + last_invoice_period_end = create_past_invoices(team) + create_draft_invoice(team, last_invoice_period_end) + jingrow.db.commit() + jingrow.cache().sadd(migrated_cache_key, team) + log(team, message="migration_success") + except Exception: + jingrow.cache().hset(traceback_cache_key, team, jingrow.get_traceback()) + jingrow.db.rollback() + log(team, message="migration_failed ❌") + print() + + +def cancel_subscription(team): + stripe = get_stripe() + subscription_id = jingrow.db.get_value( + "Subscription", {"team": team}, "stripe_subscription_id" + ) + if subscription_id: + try: + stripe.Subscription.delete(subscription_id) + except Exception: + log(team, message="cancel_subscription_failed ❌") + jingrow.cache().sadd(failed_subscriptions, subscription_id) + + +def create_past_invoices(team): + team = jingrow.get_pg("Team", team) + stripe = get_stripe() + res = stripe.Invoice.list(customer=team.stripe_customer_id) + # remove the invoice with 0 amount + invoices = [d for d in res["data"] if d["total"] != 0] + # sort into ascending order of creation + invoices.reverse() + + last_invoice_period_end = None + + for index, invoice in enumerate(invoices): + i = jingrow.new_pg("Invoice") + i.team = team.name + i.customer_name = jingrow.utils.get_fullname(team.user) + i.customer_email = team.user + i.currency = team.currency + i.period_start = datetime.fromtimestamp(invoice["period_start"]) + if index != 0: + i.period_start = jingrow.utils.add_days(i.period_start, 1) + i.period_end = datetime.fromtimestamp(invoice["period_end"]) + i.stripe_invoice_id = invoice["id"] + i.starting_balance = invoice["starting_balance"] / 100 + i.ending_balance = (invoice["ending_balance"] or 0) / 100 + i.amount_due = invoice["amount_due"] / 100 + i.amount_paid = invoice["amount_paid"] / 100 + i.stripe_invoice_url = invoice["hosted_invoice_url"] + + if invoice["status"] == "paid": + i.payment_date = datetime.fromtimestamp(invoice["status_transitions"]["paid_at"]) + i.status = "Paid" + else: + i.status = "Overdue" + i.payment_attempt_count = invoice.get("attempt_count") + + last_invoice_period_end = i.period_end + + i.save() + i.reload() + + entries = jingrow.db.get_all( + "Payment Ledger Entry", + filters={ + "creation": ("between", [i.period_start, i.period_end]), + "purpose": "Site Consumption", + "team": team.name, + "docstatus": 1, + "free_usage": False, + }, + ) + for e in entries: + ledger_entry = jingrow.get_pg("Payment Ledger Entry", e.name) + TeamInvoice( + team, i.period_start.month, i.period_start.year + ).update_ledger_entry_in_invoice(ledger_entry, i) + + i.reload() + if i.total == (invoice["total"] / 100): + log( + team.name, + invoice=i.name, + stripe_invoice=invoice["id"], + message="total_match_success", + ) + i.db_set("docstatus", 1) + else: + log( + team.name, + invoice=i.name, + stripe_invoice=invoice["id"], + message="total_match_failure ❌", + ) + jingrow.cache().sadd(total_match_failure_key, i.name) + return last_invoice_period_end + + +def create_draft_invoice(team, last_invoice_period_end=None): + if not last_invoice_period_end: + # no invoices has been created yet + # create an invoice when they joined + period_start = jingrow.db.get_value("Team", team, "creation") + else: + period_start = jingrow.utils.add_days(last_invoice_period_end, 1) + + invoice = TeamInvoice(team, period_start.month, period_start.year).create(period_start) + + entries = jingrow.db.get_all( + "Payment Ledger Entry", + filters={ + "creation": ("between", [invoice.period_start, invoice.period_end]), + "purpose": "Site Consumption", + "team": team, + "docstatus": 1, + "free_usage": False, + }, + ) + for e in entries: + ledger_entry = jingrow.get_pg("Payment Ledger Entry", e.name) + ledger_entry.update_usage_in_invoice() + + log(team, invoice=invoice.name, message="created_draft_invoice") + + +def log(team, invoice=None, stripe_invoice=None, message=None): + text = " \t ".join([team, invoice or "", stripe_invoice or "", message or ""]) + jingrow.cache().lpush("migrate_log", text) + print(text) diff --git a/jcloud/patches/v0_0_1/create_root_domain_from_jcloud_settings.py b/jcloud/patches/v0_0_1/create_root_domain_from_jcloud_settings.py new file mode 100644 index 0000000..38ddc6f --- /dev/null +++ b/jcloud/patches/v0_0_1/create_root_domain_from_jcloud_settings.py @@ -0,0 +1,27 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "root_domain") + jcloud_settings = jingrow.get_pg("Jcloud Settings", "Jcloud Settings") + if ( + jcloud_settings.domain + and jcloud_settings.aws_secret_access_key + and not jingrow.db.exists("Root Domain", jcloud_settings.domain) + ): + default_cluster = jingrow.db.get_value("Cluster", {"default": True}) + jingrow.get_pg( + { + "pagetype": "Root Domain", + "name": jcloud_settings.domain, + "default_cluster": default_cluster, + "dns_provider": jcloud_settings.dns_provider, + "aws_access_key_id": jcloud_settings.aws_access_key_id, + "aws_secret_access_key": jcloud_settings.get_password("aws_secret_access_key"), + } + ).insert() diff --git a/jcloud/patches/v0_0_1/create_site_plan_change_log.py b/jcloud/patches/v0_0_1/create_site_plan_change_log.py new file mode 100644 index 0000000..a3e7ab7 --- /dev/null +++ b/jcloud/patches/v0_0_1/create_site_plan_change_log.py @@ -0,0 +1,11 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "site_plan_change") + for site in jingrow.db.get_all("Site", {"status": "Active"}, ["name"]): + jingrow.get_pg("Site", site.name)._create_initial_site_plan_change() diff --git a/jcloud/patches/v0_0_1/create_site_subscriptions.py b/jcloud/patches/v0_0_1/create_site_subscriptions.py new file mode 100644 index 0000000..be43261 --- /dev/null +++ b/jcloud/patches/v0_0_1/create_site_subscriptions.py @@ -0,0 +1,43 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.utils import update_progress_bar + + +def execute(): + jingrow.db.sql("DROP TABLE IF EXISTS `tabSubscription`") + + jingrow.reload_pg("jcloud", "pagetype", "subscription", force=True) + jingrow.reload_pg("jcloud", "pagetype", "site") + + active_sites = jingrow.db.get_all( + "Site", + filters={"status": "Active", "free": False, "team": ("is", "set")}, + fields=["name", "team", "plan"], + ) + for i, site in enumerate(active_sites): + update_progress_bar("Creating Subscriptions", i, len(active_sites)) + + # skip if already exists + if jingrow.db.exists( + "Subscription", + {"team": site.team, "document_type": "Site", "document_name": site.name}, + ): + continue + + try: + jingrow.get_pg( + pagetype="Subscription", + enabled=1, + team=site.team, + document_type="Site", + document_name=site.name, + plan=site.plan, + interval="Daily", + ).insert() + except jingrow.DuplicateEntryError: + print(f"Failed to create subscription for site {site}") + + print() diff --git a/jcloud/patches/v0_0_1/delete_logs_from_archived_sites.py b/jcloud/patches/v0_0_1/delete_logs_from_archived_sites.py new file mode 100644 index 0000000..c08d093 --- /dev/null +++ b/jcloud/patches/v0_0_1/delete_logs_from_archived_sites.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + +from jcloud.jcloud.pagetype.site.site import delete_logs + + +def execute(): + archived_sites = jingrow.get_all("Site", filters={"status": "Archived"}) + for site in archived_sites: + delete_logs(site.name) diff --git a/jcloud/patches/v0_0_1/enable_partner_privileges.py b/jcloud/patches/v0_0_1/enable_partner_privileges.py new file mode 100644 index 0000000..8a1ef6d --- /dev/null +++ b/jcloud/patches/v0_0_1/enable_partner_privileges.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + for d in jingrow.db.get_all("Team"): + team = jingrow.get_pg("Team", d.name) + if team.has_partner_account_on_jerp_com(): + team.enable_jerp_partner_privileges() diff --git a/jcloud/patches/v0_0_1/make_apps_in_public_release_group_public.py b/jcloud/patches/v0_0_1/make_apps_in_public_release_group_public.py new file mode 100644 index 0000000..f034368 --- /dev/null +++ b/jcloud/patches/v0_0_1/make_apps_in_public_release_group_public.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pagetype("Release Group") + jingrow.reload_pagetype("Jingrow App") + groups = jingrow.get_all("Release Group", filters={"public": True}) + for group in groups: + for app in jingrow.get_pg("Release Group", group.name).apps: + jingrow.db.set_value("Jingrow App", app.app, "public", True) diff --git a/jcloud/patches/v0_0_1/make_default_site_domain.py b/jcloud/patches/v0_0_1/make_default_site_domain.py new file mode 100644 index 0000000..f684a8e --- /dev/null +++ b/jcloud/patches/v0_0_1/make_default_site_domain.py @@ -0,0 +1,12 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "site") + jingrow.reload_pg("jcloud", "pagetype", "site_domain") + for site in jingrow.db.get_all("Site", {"status": ("!=", "Archived")}, pluck="name"): + jingrow.get_pg("Site", site)._create_default_site_domain() diff --git a/jcloud/patches/v0_0_1/mark_deployed_app_releases_as_approved_and_deployable.py b/jcloud/patches/v0_0_1/mark_deployed_app_releases_as_approved_and_deployable.py new file mode 100644 index 0000000..b15b4a7 --- /dev/null +++ b/jcloud/patches/v0_0_1/mark_deployed_app_releases_as_approved_and_deployable.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pagetype("App Release") + benches = jingrow.get_all( + "Bench", fields=["name", "candidate"], filters={"status": ("!=", "Archived")} + ) + candidates = list(set(bench.candidate for bench in benches)) + for candidate in candidates: + for app in jingrow.get_pg("Deploy Candidate", candidate).apps: + jingrow.db.set_value("App Release", app.release, "status", "Approved") + jingrow.db.set_value("App Release", app.release, "deployable", 1) diff --git a/jcloud/patches/v0_0_1/move_domains_from_archived_to_active_sites.py b/jcloud/patches/v0_0_1/move_domains_from_archived_to_active_sites.py new file mode 100644 index 0000000..5a147c4 --- /dev/null +++ b/jcloud/patches/v0_0_1/move_domains_from_archived_to_active_sites.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + domains = jingrow.get_all( + "Site Domain", + fields=["site", "domain", "name"], + filters={"status": "Active", "site": ("like", "%.archived%")}, + ) + for domain in domains: + archived_site = jingrow.get_value( + "Site", domain.site, ["status", "subdomain", "team"], as_dict=True + ) + + if archived_site.status != "Archived": + continue + + active_site = jingrow.db.get_value( + "Site", + {"subdomain": archived_site.subdomain, "status": ("!=", "Archived")}, + ["name", "team"], + as_dict=True, + ) + if active_site and archived_site.team == active_site.team: + jingrow.db.set_value("Site Domain", domain.name, "site", active_site.name) diff --git a/jcloud/patches/v0_0_1/new_onboarding.py b/jcloud/patches/v0_0_1/new_onboarding.py new file mode 100644 index 0000000..e7ef32b --- /dev/null +++ b/jcloud/patches/v0_0_1/new_onboarding.py @@ -0,0 +1,43 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.utils import update_progress_bar + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "team") + jingrow.reload_pg("jcloud", "pagetype", "team_onboarding") + + teams = jingrow.db.get_all("Team") + for i, team in enumerate(teams): + update_progress_bar("Updating onboarding", i, len(teams)) + + pg = jingrow.get_pg("Team", team) + + if pg.onboarding: + continue + + pg.initialize_onboarding_steps() + + if pg.jerp_partner: + update_onboarding(pg, "Add Billing Information", "Skipped") + update_onboarding(pg, "Transfer Credits", "Skipped") + update_onboarding(pg, "Create Site", "Skipped") + + if pg.default_payment_method: + update_onboarding(pg, "Add Billing Information", "Completed") + + if jingrow.db.count("Site", {"team": pg.name}) > 0: + update_onboarding(pg, "Create Site", "Completed") + + pg.save() + + print() + + +def update_onboarding(team, step_name, status): + for step in team.onboarding: + if step.step_name == step_name: + step.status = status diff --git a/jcloud/patches/v0_0_1/patch_invoice.py b/jcloud/patches/v0_0_1/patch_invoice.py new file mode 100644 index 0000000..1d2d513 --- /dev/null +++ b/jcloud/patches/v0_0_1/patch_invoice.py @@ -0,0 +1,36 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "invoice") + jingrow.reload_pg("jcloud", "pagetype", "invoice_item") + + # invoice site usage -> invoice item + jingrow.db.sql( + """ + update + `tabInvoice Item` i, + `tabInvoice Site Usage` u + set + i.document_type = 'Site', + i.document_name = u.site, + i.plan = u.plan + where + u.parent = i.parent + and u.idx = i.idx + """ + ) + + # compute applied_credits + jingrow.db.sql( + """ + update + tabInvoice + set + applied_credits = -1 * (starting_balance - ending_balance) + """ + ) diff --git a/jcloud/patches/v0_0_1/ple_to_usage_record.py b/jcloud/patches/v0_0_1/ple_to_usage_record.py new file mode 100644 index 0000000..905c7d4 --- /dev/null +++ b/jcloud/patches/v0_0_1/ple_to_usage_record.py @@ -0,0 +1,56 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "usage_record") + # PLE to Usage Record + jingrow.db.sql( + """ + insert into + `tabUsage Record` ( + `name`, + `creation`, + `modified`, + `modified_by`, + `owner`, + `team`, + `document_type`, + `document_name`, + `date`, + `plan`, + `currency`, + `amount`, + `interval`, + `invoice`, + `remark`, + `docstatus` + ) + select + CONCAT('UT', SUBSTR(ple.name, 4)), + ple.creation, + ple.modified, + ple.modified_by, + ple.owner, + ple.team, + 'Site', + ple.site, + ple.date, + ple.plan, + ple.currency, + ple.amount * -1, + 'Daily', + ple.invoice, + ple.remark, + 1 + from + `tabPayment Ledger Entry` ple + where + ple.purpose = 'Site Consumption' + and ple.docstatus = 1 + and ple.free_usage = 0 + """ + ) diff --git a/jcloud/patches/v0_0_1/remove_domains_linked_to_archived_sites.py b/jcloud/patches/v0_0_1/remove_domains_linked_to_archived_sites.py new file mode 100644 index 0000000..9f615df --- /dev/null +++ b/jcloud/patches/v0_0_1/remove_domains_linked_to_archived_sites.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + domains = jingrow.get_all( + "Site Domain", filters={"status": "Active", "site": ("like", "%.archived%")} + ) + for domain in domains: + jingrow.delete_pg("Site Domain", domain.name) diff --git a/jcloud/patches/v0_0_1/remove_obsolete_doctypes.py b/jcloud/patches/v0_0_1/remove_obsolete_doctypes.py new file mode 100644 index 0000000..1be8e48 --- /dev/null +++ b/jcloud/patches/v0_0_1/remove_obsolete_doctypes.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + obsolete_doctypes = [ + "Credit Ledger Entry", + "Custom Domain", + "Site Analytics", + "Site History", + "Site Usage Ledger Entry", + "Usage Report", + "User Account", + ] + for pagetype in obsolete_doctypes: + if jingrow.db.exists("PageType", pagetype): + jingrow.delete_pg("PageType", pagetype) diff --git a/jcloud/patches/v0_0_1/remove_period_from_plan.py b/jcloud/patches/v0_0_1/remove_period_from_plan.py new file mode 100644 index 0000000..0e3d791 --- /dev/null +++ b/jcloud/patches/v0_0_1/remove_period_from_plan.py @@ -0,0 +1,9 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + jingrow.db.sql_ddl("ALTER TABLE `tabPlan` DROP COLUMN `period`") diff --git a/jcloud/patches/v0_0_1/rename_archived_sites.py b/jcloud/patches/v0_0_1/rename_archived_sites.py new file mode 100644 index 0000000..cef0cac --- /dev/null +++ b/jcloud/patches/v0_0_1/rename_archived_sites.py @@ -0,0 +1,15 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + +from jcloud.jcloud.pagetype.site.site import release_name + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "site") + sites = jingrow.get_all("Site", filters={"status": "Archived"}) + for site in sites: + release_name(site.name) + jingrow.db.commit() diff --git a/jcloud/patches/v0_0_1/rename_columns_in_tls_certificate.py b/jcloud/patches/v0_0_1/rename_columns_in_tls_certificate.py new file mode 100644 index 0000000..2f75900 --- /dev/null +++ b/jcloud/patches/v0_0_1/rename_columns_in_tls_certificate.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.utils.rename_field import rename_field + + +def execute(): + jingrow.reload_pagetype("TLS Certificate") + rename_field("TLS Certificate", "expiry", "expires_on") + rename_field("TLS Certificate", "privkey", "private_key") + rename_field("TLS Certificate", "fullchain", "full_chain") + rename_field("TLS Certificate", "chain", "intermediate_chain") diff --git a/jcloud/patches/v0_0_1/rename_deploy_candidate_app_release_to_deploy_candidate_app.py b/jcloud/patches/v0_0_1/rename_deploy_candidate_app_release_to_deploy_candidate_app.py new file mode 100644 index 0000000..6ac9843 --- /dev/null +++ b/jcloud/patches/v0_0_1/rename_deploy_candidate_app_release_to_deploy_candidate_app.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + from_pagetype = "Deploy Candidate App Release" + to_pagetype = "Deploy Candidate App" + if jingrow.db.table_exists(from_pagetype) and not jingrow.db.table_exists(to_pagetype): + jingrow.rename_pg("PageType", from_pagetype, to_pagetype, force=True) diff --git a/jcloud/patches/v0_0_1/rename_installed_app_to_bench_app.py b/jcloud/patches/v0_0_1/rename_installed_app_to_bench_app.py new file mode 100644 index 0000000..6db5fba --- /dev/null +++ b/jcloud/patches/v0_0_1/rename_installed_app_to_bench_app.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + from_pagetype = "Installed App" + to_pagetype = "Bench App" + if jingrow.db.table_exists(from_pagetype) and not jingrow.db.table_exists(to_pagetype): + jingrow.rename_pg("PageType", from_pagetype, to_pagetype, force=True) diff --git a/jcloud/patches/v0_0_1/rename_jingrow_app_to_app.py b/jcloud/patches/v0_0_1/rename_jingrow_app_to_app.py new file mode 100644 index 0000000..5836295 --- /dev/null +++ b/jcloud/patches/v0_0_1/rename_jingrow_app_to_app.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + from_pagetype = "Jingrow App" + to_pagetype = "App" + if jingrow.db.table_exists(from_pagetype) and not jingrow.db.table_exists(to_pagetype): + jingrow.rename_pg("PageType", from_pagetype, to_pagetype, force=True) diff --git a/jcloud/patches/v0_0_1/rename_release_group_jingrow_app_to_release_group_app.py b/jcloud/patches/v0_0_1/rename_release_group_jingrow_app_to_release_group_app.py new file mode 100644 index 0000000..a91aa3b --- /dev/null +++ b/jcloud/patches/v0_0_1/rename_release_group_jingrow_app_to_release_group_app.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + from_pagetype = "Release Group Jingrow App" + to_pagetype = "Release Group App" + if jingrow.db.table_exists(from_pagetype) and not jingrow.db.table_exists(to_pagetype): + jingrow.rename_pg("PageType", from_pagetype, to_pagetype, force=True) diff --git a/jcloud/patches/v0_0_1/rename_release_groups.py b/jcloud/patches/v0_0_1/rename_release_groups.py new file mode 100644 index 0000000..144341d --- /dev/null +++ b/jcloud/patches/v0_0_1/rename_release_groups.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "release_group") + jingrow.reload_pg("jcloud", "pagetype", "release_group_app") + jingrow.reload_pg("jcloud", "pagetype", "release_group_server") + + groups = jingrow.get_all("Release Group") + for group in groups: + old_group_name = group.name + group = jingrow.get_pg("Release Group", group.name) + group.set_new_name(force=True) + jingrow.rename_pg("Release Group", old_group_name, group.name, force=True) diff --git a/jcloud/patches/v0_0_1/rename_site_backup_fields.py b/jcloud/patches/v0_0_1/rename_site_backup_fields.py new file mode 100644 index 0000000..8fc15ec --- /dev/null +++ b/jcloud/patches/v0_0_1/rename_site_backup_fields.py @@ -0,0 +1,16 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "site_backup") + jingrow.db.sql( + """ + UPDATE `tabSite Backup` + SET `database_file` = `database`, `database_url` = `url`, `database_size` = `size` + WHERE `database` IS NOT NULL + """ + ) diff --git a/jcloud/patches/v0_0_1/rename_transaction_currency_to_currency.py b/jcloud/patches/v0_0_1/rename_transaction_currency_to_currency.py new file mode 100644 index 0000000..ec44c22 --- /dev/null +++ b/jcloud/patches/v0_0_1/rename_transaction_currency_to_currency.py @@ -0,0 +1,11 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.utils.rename_field import rename_field + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "team") + rename_field("Team", "transaction_currency", "currency") diff --git a/jcloud/patches/v0_0_1/rename_workers_to_background_workers.py b/jcloud/patches/v0_0_1/rename_workers_to_background_workers.py new file mode 100644 index 0000000..1ebc12f --- /dev/null +++ b/jcloud/patches/v0_0_1/rename_workers_to_background_workers.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.model.utils.rename_field import rename_field + + +def execute(): + jingrow.reload_pagetype("Bench") + rename_field("Bench", "workers", "background_workers") diff --git a/jcloud/patches/v0_0_1/set_app_title_from_custom_field.py b/jcloud/patches/v0_0_1/set_app_title_from_custom_field.py new file mode 100644 index 0000000..ae84b11 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_app_title_from_custom_field.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "app") + apps = jingrow.get_all("App", ["name", "_title"], {"title": ("is", "not set")}) + for app in apps: + jingrow.db.set_value("App", app.name, "title", app._title) diff --git a/jcloud/patches/v0_0_1/set_app_title_in_deploy_candidate_app.py b/jcloud/patches/v0_0_1/set_app_title_in_deploy_candidate_app.py new file mode 100644 index 0000000..1338b9e --- /dev/null +++ b/jcloud/patches/v0_0_1/set_app_title_in_deploy_candidate_app.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + apps = jingrow.db.sql( + """ + SELECT + candidate.name, app.title + FROM + `tabDeploy Candidate App` candidate + LEFT JOIN + `tabApp` app + ON + candidate.app = app.name + """, + as_dict=True, + ) + + for app in apps: + jingrow.db.set_value("Deploy Candidate App", app.name, "title", app.title) diff --git a/jcloud/patches/v0_0_1/set_billing_name_for_teams.py b/jcloud/patches/v0_0_1/set_billing_name_for_teams.py new file mode 100644 index 0000000..c2de383 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_billing_name_for_teams.py @@ -0,0 +1,17 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + scheme_before = jingrow.db.auto_commit_on_many_writes + jingrow.db.auto_commit_on_many_writes = True + jingrow.reload_pg("jcloud", "pagetype", "team") + + teams = jingrow.get_all("Team", pluck="name") + for team in teams: + jingrow.get_pg("Team", team).save() + + jingrow.db.auto_commit_on_many_writes = scheme_before diff --git a/jcloud/patches/v0_0_1/set_cluster_in_jcloud_settings.py b/jcloud/patches/v0_0_1/set_cluster_in_jcloud_settings.py new file mode 100644 index 0000000..0468952 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_cluster_in_jcloud_settings.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "jerp_app") + jingrow.reload_pg("jcloud", "pagetype", "jcloud_settings") + jingrow.clear_cache() + jcloud_settings = jingrow.get_pg("Jcloud Settings", "Jcloud Settings") + if not jcloud_settings.get("cluster"): + jcloud_settings.cluster = jingrow.db.get_value( + "Root Domain", jcloud_settings.domain, "default_cluster" + ) + jcloud_settings.save() diff --git a/jcloud/patches/v0_0_1/set_dependencies_in_release_group.py b/jcloud/patches/v0_0_1/set_dependencies_in_release_group.py new file mode 100644 index 0000000..bb4cf82 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_dependencies_in_release_group.py @@ -0,0 +1,23 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "release_group_dependency") + jingrow.reload_pg("jcloud", "pagetype", "release_group") + + for name in jingrow.db.get_all("Release Group", pluck="name"): + release_group = jingrow.get_pg("Release Group", name) + release_group.extend( + "dependencies", + [ + {"dependency": "NVM_VERSION", "version": "0.36.0"}, + {"dependency": "NODE_VERSION", "version": "12.19.0"}, + {"dependency": "PYTHON_VERSION", "version": "3.7"}, + {"dependency": "WKHTMLTOPDF_VERSION", "version": "0.12.5"}, + ], + ) + release_group.db_update_all() diff --git a/jcloud/patches/v0_0_1/set_document_type_in_plan.py b/jcloud/patches/v0_0_1/set_document_type_in_plan.py new file mode 100644 index 0000000..e9c9935 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_document_type_in_plan.py @@ -0,0 +1,10 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "plan") + jingrow.db.sql('update tabPlan set document_type = "Site", `interval` = "Daily"') diff --git a/jcloud/patches/v0_0_1/set_domain_in_site.py b/jcloud/patches/v0_0_1/set_domain_in_site.py new file mode 100644 index 0000000..f2729bb --- /dev/null +++ b/jcloud/patches/v0_0_1/set_domain_in_site.py @@ -0,0 +1,13 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "site") + domain = jingrow.db.get_single_value("Jcloud Settings", "domain") + jingrow.db.sql( + "UPDATE tabSite SET domain = %s WHERE IFNULL(domain, '') = ''", (domain,) + ) diff --git a/jcloud/patches/v0_0_1/set_host_name_for_sites_with_domains.py b/jcloud/patches/v0_0_1/set_host_name_for_sites_with_domains.py new file mode 100644 index 0000000..c3a3301 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_host_name_for_sites_with_domains.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pagetype("Site") + domains = jingrow.get_all( + "Site Domain", + fields=["site", "domain"], + filters={"status": "Active"}, + group_by="site", + ) + for domain in domains: + site = jingrow.get_pg("Site", domain.site) + if site.status == "Active": + site.set_host_name(domain.domain) diff --git a/jcloud/patches/v0_0_1/set_hostname_in_server.py b/jcloud/patches/v0_0_1/set_hostname_in_server.py new file mode 100644 index 0000000..73c0cde --- /dev/null +++ b/jcloud/patches/v0_0_1/set_hostname_in_server.py @@ -0,0 +1,17 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + doctypes = ["Server", "Proxy Server", "Database Server"] + for pagetype in doctypes: + jingrow.reload_pg("jcloud", "pagetype", jingrow.scrub(pagetype)) + servers = jingrow.get_all(pagetype, {"hostname": ("is", "not set")}) + domain = jingrow.db.get_single_value("Jcloud Settings", "domain") + for server in servers: + hostname = server.name.replace(f".{domain}", "") + jingrow.db.set_value(pagetype, server.name, "hostname", hostname) + jingrow.db.set_value(pagetype, server.name, "domain", domain) diff --git a/jcloud/patches/v0_0_1/set_marketplace_app_app_field_from_name.py b/jcloud/patches/v0_0_1/set_marketplace_app_app_field_from_name.py new file mode 100644 index 0000000..50bc853 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_marketplace_app_app_field_from_name.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "marketplace_app") + apps = jingrow.get_all("Marketplace App", {"app": ("is", "not set")}) + for app in apps: + jingrow.db.set_value( + "Marketplace App", app.name, "app", app.name, update_modified=False, modified=False + ) diff --git a/jcloud/patches/v0_0_1/set_monitoring_password_in_cluster.py b/jcloud/patches/v0_0_1/set_monitoring_password_in_cluster.py new file mode 100644 index 0000000..28e4465 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_monitoring_password_in_cluster.py @@ -0,0 +1,12 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "cluster") + for name in jingrow.get_all("Cluster", pluck="name"): + cluster = jingrow.get_pg("Cluster", name) + cluster.save() diff --git a/jcloud/patches/v0_0_1/set_public_field_in_app_release_based_on_app_source_public.py b/jcloud/patches/v0_0_1/set_public_field_in_app_release_based_on_app_source_public.py new file mode 100644 index 0000000..26e5195 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_public_field_in_app_release_based_on_app_source_public.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "app_release") + jingrow.db.sql( + """ + UPDATE `tabApp Release` as `release` + INNER JOIN `tabApp Source` as source + ON `release`.source = `source`.name + SET `release`.public = `source`.public + """ + ) diff --git a/jcloud/patches/v0_0_1/set_rate_limit_config_based_on_plan.py b/jcloud/patches/v0_0_1/set_rate_limit_config_based_on_plan.py new file mode 100644 index 0000000..4390c4e --- /dev/null +++ b/jcloud/patches/v0_0_1/set_rate_limit_config_based_on_plan.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + +from jcloud.jcloud.pagetype.site_plan.site_plan import get_plan_config +from jcloud.utils import log_error + + +def execute(): + sites = jingrow.get_all( + "Site", fields=["name", "plan"], filters={"status": ("!=", "Archived")} + ) + for site in sites: + if not site.plan: + continue + plan_config = get_plan_config(site.plan) + site_pg = jingrow.get_pg("Site", site) + try: + site_pg.update_site_config(plan_config) + except Exception: + log_error("Rate Limit Patch Failure", site=site.name) diff --git a/jcloud/patches/v0_0_1/set_release_group_in_site.py b/jcloud/patches/v0_0_1/set_release_group_in_site.py new file mode 100644 index 0000000..c2d0810 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_release_group_in_site.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + sites = jingrow.db.sql( + """ + SELECT + site.name, bench.group + FROM + `tabSite` site + LEFT JOIN + `tabBench` bench + ON + site.bench = bench.name + """, + as_dict=True, + ) + + for site in sites: + jingrow.db.set_value("Site", site.name, "group", site.group) diff --git a/jcloud/patches/v0_0_1/set_release_group_version_and_title_from_custom_field.py b/jcloud/patches/v0_0_1/set_release_group_version_and_title_from_custom_field.py new file mode 100644 index 0000000..7d0ae9f --- /dev/null +++ b/jcloud/patches/v0_0_1/set_release_group_version_and_title_from_custom_field.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "release_group") + groups = jingrow.get_all( + "Release Group", ["name", "_title"], {"title": ("is", "not set")} + ) + for group in groups: + jingrow.db.set_value("Release Group", group.name, "title", group._title) + + groups = jingrow.get_all( + "Release Group", ["name", "_version"], {"version": ("is", "not set")} + ) + for group in groups: + jingrow.db.set_value("Release Group", group.name, "version", group._version) diff --git a/jcloud/patches/v0_0_1/set_release_in_bench_app.py b/jcloud/patches/v0_0_1/set_release_in_bench_app.py new file mode 100644 index 0000000..9747036 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_release_in_bench_app.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + apps = jingrow.db.sql( + """ + SELECT + app.name, `release`.name as 'release' + FROM + `tabBench App` app + LEFT JOIN + `tabApp Release` `release` + ON + (`release`.hash = app.hash AND `release`.source = app.source) + """, + as_dict=True, + ) + + for app in apps: + jingrow.db.set_value("Bench App", app.name, "release", app.release) diff --git a/jcloud/patches/v0_0_1/set_remote_file_location.py b/jcloud/patches/v0_0_1/set_remote_file_location.py new file mode 100644 index 0000000..01a4384 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_remote_file_location.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.desk.pagetype.tag.tag import add_tag + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "remote_file") + remote_files = ( + x["name"] for x in jingrow.get_all("Remote File", [["bucket", "like", ""]]) + ) + uploads_bucket = jingrow.db.get_single_value("Jcloud Settings", "remote_uploads_bucket") + + for remote_file in remote_files: + jingrow.db.set_value("Remote File", remote_file, "bucket", uploads_bucket) + add_tag("Site Upload", "Remote File", remote_file) + + jingrow.db.commit() diff --git a/jcloud/patches/v0_0_1/set_repository_in_jingrow_app.py b/jcloud/patches/v0_0_1/set_repository_in_jingrow_app.py new file mode 100644 index 0000000..e283c14 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_repository_in_jingrow_app.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pagetype("Jingrow App") + apps = jingrow.get_all( + "Jingrow App", fields=["name", "url"], filters={"repo": ("is", "not set")} + ) + for app in apps: + repo = app.url.split("/")[-1].replace(".git", "") + jingrow.db.set_value("Jingrow App", app.name, "repo", repo) diff --git a/jcloud/patches/v0_0_1/set_team_field_for_permission_checks.py b/jcloud/patches/v0_0_1/set_team_field_for_permission_checks.py new file mode 100644 index 0000000..d8d5a1b --- /dev/null +++ b/jcloud/patches/v0_0_1/set_team_field_for_permission_checks.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + updates = [ + ["Site Domain", "Site", "site"], + ["App Release", "App Source", "source"], + ["Deploy Candidate", "Release Group", "group"], + ["Deploy Candidate Difference", "Release Group", "group"], + ["Deploy", "Release Group", "group"], + ["Bench", "Release Group", "group"], + ] + for target_pagetype, source_pagetype, link_fieldname in updates: + jingrow.reload_pg("jcloud", "pagetype", jingrow.scrub(target_pagetype)) + jingrow.db.sql( + f""" + UPDATE `tab{target_pagetype}` as target + INNER JOIN `tab{source_pagetype}` as source + ON `target`.`{link_fieldname}` = `source`.`name` + SET `target`.team = `source`.team + WHERE ifnull(`target`.team, '') = "" + """ + ) diff --git a/jcloud/patches/v0_0_1/set_team_field_in_tls_certificate_based_on_domain_team.py b/jcloud/patches/v0_0_1/set_team_field_in_tls_certificate_based_on_domain_team.py new file mode 100644 index 0000000..2fe3d82 --- /dev/null +++ b/jcloud/patches/v0_0_1/set_team_field_in_tls_certificate_based_on_domain_team.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "site_domain") + jingrow.reload_pg("jcloud", "pagetype", "tls_certificate") + certificates = jingrow.get_all( + "TLS Certificate", ["name", "domain"], {"wildcard": False} + ) + for certificate in certificates: + team = jingrow.db.get_value("Site Domain", certificate.domain, "team") + jingrow.db.set_value("TLS Certificate", certificate.name, "team", team) diff --git a/jcloud/patches/v0_0_1/site_history_to_site_activity.py b/jcloud/patches/v0_0_1/site_history_to_site_activity.py new file mode 100644 index 0000000..73fc9de --- /dev/null +++ b/jcloud/patches/v0_0_1/site_history_to_site_activity.py @@ -0,0 +1,13 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + from_pagetype = "Site History" + to_pagetype = "Site Activity" + if jingrow.db.table_exists(from_pagetype) and not jingrow.db.table_exists(to_pagetype): + jingrow.rename_pg("PageType", from_pagetype, to_pagetype, force=True) diff --git a/jcloud/patches/v0_0_1/site_usage_convert_history.py b/jcloud/patches/v0_0_1/site_usage_convert_history.py new file mode 100644 index 0000000..1ed49cf --- /dev/null +++ b/jcloud/patches/v0_0_1/site_usage_convert_history.py @@ -0,0 +1,31 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +from math import ceil + +import jingrow +from jingrow.utils import cint + + +def execute(): + pagetype = "Site Usage" + jingrow.reload_pagetype(pagetype) + scheme_before = jingrow.db.auto_commit_on_many_writes + jingrow.db.auto_commit_on_many_writes = True + records = jingrow.get_all(pagetype, pluck="name") + total = len(records) + + for current, record in enumerate(records): + print(f"Updated {current} of {total}", end="\r") + + fields = ["backups", "database", "public", "private"] + current_values = jingrow.db.get_value(pagetype, record, fields) + + for field, value in zip(fields, current_values): + value = ceil(cint(value) / (1024**2)) + jingrow.get_pg(pagetype, record).db_set(field, value, update_modified=False) + + jingrow.db.commit() + jingrow.db.auto_commit_on_many_writes = scheme_before + print(f"{total} {pagetype} records updated") diff --git a/jcloud/patches/v0_0_1/track_offsite_backups_via_remote_files.py b/jcloud/patches/v0_0_1/track_offsite_backups_via_remote_files.py new file mode 100644 index 0000000..addffbe --- /dev/null +++ b/jcloud/patches/v0_0_1/track_offsite_backups_via_remote_files.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import json + +import jingrow +from jingrow.desk.pagetype.tag.tag import add_tag + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "site_backup") + bucket = jingrow.db.get_single_value("Jcloud Settings", "aws_s3_bucket") + offsite_backups = [ + jingrow.get_pg("Site Backup", x["name"]) + for x in jingrow.get_all("Site Backup", {"offsite": 1}) + ] + + for offsite_backup in offsite_backups: + offsite_job_payload = json.loads(offsite_backup.offsite_backup or "{}") + if offsite_job_payload: + remote_database = offsite_job_payload.get(offsite_backup.get("database_file")) + remote_public = offsite_job_payload.get(offsite_backup.get("public_file")) + remote_private = offsite_job_payload.get(offsite_backup.get("private_file")) + + if remote_database: + remote_file = jingrow.get_pg( + { + "pagetype": "Remote File", + "file_name": offsite_backup.database_file, + "file_path": remote_database, + "file_size": offsite_backup.database_size, + "file_type": "application/x-gzip", + "bucket": bucket, + } + ) + remote_file.save() + add_tag("Offsite Backup", remote_file.pagetype, remote_file.name) + offsite_backup.remote_database_file = remote_file.name + + if remote_public: + remote_file = jingrow.get_pg( + { + "pagetype": "Remote File", + "file_name": offsite_backup.public_file, + "file_path": remote_public, + "file_size": offsite_backup.public_size, + "file_type": "application/x-tar", + "bucket": bucket, + } + ) + remote_file.save() + add_tag("Offsite Backup", remote_file.pagetype, remote_file.name) + offsite_backup.remote_public_file = remote_file.name + + if remote_private: + remote_file = jingrow.get_pg( + { + "pagetype": "Remote File", + "file_name": offsite_backup.private_file, + "file_path": remote_private, + "file_size": offsite_backup.private_size, + "file_type": "application/x-tar", + "bucket": bucket, + } + ) + remote_file.save() + add_tag("Offsite Backup", remote_file.pagetype, remote_file.name) + offsite_backup.remote_private_file = remote_file.name + + offsite_backup.save() + + jingrow.db.commit() diff --git a/jcloud/patches/v0_0_1/truncate_server_status_table.py b/jcloud/patches/v0_0_1/truncate_server_status_table.py new file mode 100644 index 0000000..b52211b --- /dev/null +++ b/jcloud/patches/v0_0_1/truncate_server_status_table.py @@ -0,0 +1,9 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.db.sql("TRUNCATE `tabServer Status`") diff --git a/jcloud/patches/v0_0_1/update_backups_availability.py b/jcloud/patches/v0_0_1/update_backups_availability.py new file mode 100644 index 0000000..86c8a11 --- /dev/null +++ b/jcloud/patches/v0_0_1/update_backups_availability.py @@ -0,0 +1,13 @@ +import jingrow + + +def execute(): + jingrow.db.sql( + "update `tabSite Backup` set files_availability = 'Available' where `site` not" + " like '%jingrow.cloud.archived%'" + ) + jingrow.db.sql( + "update `tabSite Backup` set files_availability = 'Unavailable' where `site`" + " like '%jingrow.cloud.archived%'" + ) + jingrow.db.commit() diff --git a/jcloud/patches/v0_0_1/update_proxy_for_suspended_and_inactive_sites.py b/jcloud/patches/v0_0_1/update_proxy_for_suspended_and_inactive_sites.py new file mode 100644 index 0000000..03882fb --- /dev/null +++ b/jcloud/patches/v0_0_1/update_proxy_for_suspended_and_inactive_sites.py @@ -0,0 +1,19 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.utils.fixtures import sync_fixtures + + +def execute(): + sync_fixtures("jcloud") + sites = jingrow.get_all( + "Site", + fields=["name", "status"], + filters={"status": ("in", ("Suspended", "Inactive"))}, + ) + for site in sites: + site = jingrow.get_pg("Site", site.name) + proxy_status = {"Suspended": "suspended", "Inactive": "deactivated"} + site.update_site_status_on_proxy(proxy_status[site.status], skip_reload=True) diff --git a/jcloud/patches/v0_0_1/update_site_config_pg.py b/jcloud/patches/v0_0_1/update_site_config_pg.py new file mode 100644 index 0000000..a5c2e41 --- /dev/null +++ b/jcloud/patches/v0_0_1/update_site_config_pg.py @@ -0,0 +1,28 @@ +import json + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "site") + jingrow.reload_pg("jcloud", "pagetype", "site config") + sites = jingrow.get_all("Site", {"status": ("!=", "Archived")}) + + commit_scheme = jingrow.db.auto_commit_on_many_writes + jingrow.db.auto_commit_on_many_writes = 1 + + for _site in sites: + site = jingrow.get_pg("Site", _site.name) + if site.configuration: + continue + print(f"Updating Site Config for {site.name}") + config = json.loads(site.config) + for key, value in config.items(): + if isinstance(value, (dict, list)): + value = json.dumps(value) + else: + value = value + site.append("configuration", {"key": key, "value": value}) + site.save() + + jingrow.db.auto_commit_on_many_writes = commit_scheme diff --git a/jcloud/patches/v0_0_1/use_private_ip_for_upstreams.py b/jcloud/patches/v0_0_1/use_private_ip_for_upstreams.py new file mode 100644 index 0000000..521473c --- /dev/null +++ b/jcloud/patches/v0_0_1/use_private_ip_for_upstreams.py @@ -0,0 +1,28 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow +from jingrow.utils.fixtures import sync_fixtures + +from jcloud.agent import Agent + + +def execute(): + settings = jingrow.get_pg("Jcloud Settings", "Jcloud Settings") + settings.agent_repository_owner = "jingrow" + + settings.agent_github_access_token = input("GitHub Access Token: ") + settings.save() + + sync_fixtures("jcloud") + + servers = jingrow.get_all( + "Server", {"is_upstream_setup": True, "status": "Active"}, ["name", "proxy_server"] + ) + for server in servers: + proxy_server = jingrow.get_pg("Proxy Server", server.proxy_server) + proxy_server.update_agent_ansible() + + agent = Agent(server.proxy_server, "Proxy Server") + agent.update_upstream_private_ip(server.name) diff --git a/jcloud/patches/v0_0_1/user_account_to_team.py b/jcloud/patches/v0_0_1/user_account_to_team.py new file mode 100644 index 0000000..2e0e840 --- /dev/null +++ b/jcloud/patches/v0_0_1/user_account_to_team.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + jingrow.reload_pg("jcloud", "pagetype", "site") + jingrow.reload_pg("jcloud", "pagetype", "account_request") + jingrow.reload_pg("jcloud", "pagetype", "team") + jingrow.reload_pg("jcloud", "pagetype", "team_member") + + user_accounts = jingrow.db.sql( + "SELECT user, account_key, creation FROM `tabUser Account`", as_dict=1 + ) + enabled_users = [d.name for d in jingrow.db.get_all("User", {"enabled": 1}, ["name"])] + + users = [d.user for d in user_accounts] + # create team for Administrator too + if "Administrator" not in users: + user_accounts.append(jingrow._dict({"user": "Administrator"})) + + for d in user_accounts: + if not d.user: + continue + # create team + team = jingrow.new_pg("Team") + team.name = d.user + team.append("team_members", {"user": d.user}) + team.enabled = d.user in enabled_users + team.creation = d.creation + team.modified = d.modified + team.insert() + + # create account request + if d.account_key: + account_request = jingrow.new_pg("Account Request") + account_request.request_key = d.account_key + account_request.email = d.user + account_request.team = d.user + account_request.role = "Jcloud Admin" + account_request.creation = d.creation + account_request.insert() + + # update team in sites + jingrow.db.set_value("Site", {"owner": d.user}, "team", team.name) + + jingrow.delete_pg_if_exists("PageType", "User Account") diff --git a/jcloud/patches/v0_0_4/disable_subscriptions_for_inactive_sites.py b/jcloud/patches/v0_0_4/disable_subscriptions_for_inactive_sites.py new file mode 100644 index 0000000..2a879b7 --- /dev/null +++ b/jcloud/patches/v0_0_4/disable_subscriptions_for_inactive_sites.py @@ -0,0 +1,21 @@ +# Copyright (c) 2022, JINGROW +# For license information, please see license.txt + +import jingrow + + +def execute(): + subscription = jingrow.qb.PageType("Subscription") + site = jingrow.qb.PageType("Site") + + inactive_sites = ( + jingrow.qb.from_(subscription) + .left_join(site) + .on(subscription.document_name == site.name) + .select(subscription.name) + .where(site.status.isin(["Archived", "Broken", "Suspended"])) + ).run(pluck=True) + + jingrow.db.set_value( + "Subscription", {"enabled": 1, "name": ("in", inactive_sites)}, "enabled", 0 + ) diff --git a/jcloud/patches/v0_0_4/remove_legacy_billing_doctypes.py b/jcloud/patches/v0_0_4/remove_legacy_billing_doctypes.py new file mode 100644 index 0000000..305e9b4 --- /dev/null +++ b/jcloud/patches/v0_0_4/remove_legacy_billing_doctypes.py @@ -0,0 +1,11 @@ +# Copyright (c) 2021, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + # these doctypes are only deleted from PageType table, their tables will exist + doctypes = ["Payment", "Payment Ledger Entry"] + jingrow.db.sql("DELETE from tabPageType where name in %s", [doctypes]) diff --git a/jcloud/patches/v0_7_0/add_team_field_for_site_backups_archived.py b/jcloud/patches/v0_7_0/add_team_field_for_site_backups_archived.py new file mode 100644 index 0000000..5210499 --- /dev/null +++ b/jcloud/patches/v0_7_0/add_team_field_for_site_backups_archived.py @@ -0,0 +1,18 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + + +import jingrow +from tqdm import tqdm + + +def execute(): + sites = jingrow.get_all("Site", fields=["name", "team"], filters={"status": "Archived"}) + + for site in tqdm(sites): + jingrow.db.set_value( + "Site Backup", + {"site": site["name"], "files_availability": "Available"}, + "team", + site["team"], + ) diff --git a/jcloud/patches/v0_7_0/add_team_field_for_site_related_doctypes.py b/jcloud/patches/v0_7_0/add_team_field_for_site_related_doctypes.py new file mode 100644 index 0000000..b0fa09c --- /dev/null +++ b/jcloud/patches/v0_7_0/add_team_field_for_site_related_doctypes.py @@ -0,0 +1,17 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + + +import jingrow +from tqdm import tqdm + + +def execute(): + sites = jingrow.get_all( + "Site", fields=["name", "team"], filters={"status": ("!=", "Archived")} + ) + + for site in tqdm(sites): + jingrow.db.set_value("Site Update", {"site": site["name"]}, "team", site["team"]) + jingrow.db.set_value("Site Backup", {"site": site["name"]}, "team", site["team"]) + jingrow.db.set_value("Site Activity", {"site": site["name"]}, "team", site["team"]) diff --git a/jcloud/patches/v0_7_0/convert_marketplace_description_to_html.py b/jcloud/patches/v0_7_0/convert_marketplace_description_to_html.py new file mode 100644 index 0000000..b3280d7 --- /dev/null +++ b/jcloud/patches/v0_7_0/convert_marketplace_description_to_html.py @@ -0,0 +1,9 @@ +import jingrow + + +def execute(): + apps = jingrow.get_all("Marketplace App", fields=["name", "long_description"]) + + for app in apps: + html = jingrow.utils.md_to_html(app["long_description"]) + jingrow.db.set_value("Marketplace App", app["name"], "long_description", html) diff --git a/jcloud/patches/v0_7_0/fix_team_for_tls_certificates.py b/jcloud/patches/v0_7_0/fix_team_for_tls_certificates.py new file mode 100644 index 0000000..e23a033 --- /dev/null +++ b/jcloud/patches/v0_7_0/fix_team_for_tls_certificates.py @@ -0,0 +1,15 @@ +# Copyright (c) 2025, JINGROW +# For license information, please see license.txt + + +import jingrow +from tqdm import tqdm + + +def execute(): + site_domains = jingrow.get_all("Site Domain", fields=["tls_certificate", "team"]) + + for domain in tqdm(site_domains): + jingrow.db.set_value( + "TLS Certificate", domain["tls_certificate"], "team", domain["team"], update_modified=False + ) diff --git a/jcloud/patches/v0_7_0/migrate_fields_from_plans_to_server_and_marketplace.py b/jcloud/patches/v0_7_0/migrate_fields_from_plans_to_server_and_marketplace.py new file mode 100644 index 0000000..b1cabf1 --- /dev/null +++ b/jcloud/patches/v0_7_0/migrate_fields_from_plans_to_server_and_marketplace.py @@ -0,0 +1,51 @@ +import jingrow + + +def execute(): + plans = jingrow.get_all( + "Site Plan", + { + "document_type": ( + "in", + ["Server", "Database Server", "Proxy Server", "Self Hosted Server"], + ) + }, + pluck="name", + ) + + for plan in plans: + plan_pg = jingrow.get_pg("Site Plan", plan) + server_plan_pg = jingrow.get_pg( + { + "pagetype": "Server Plan", + "name": plan_pg.name, + "title": plan_pg.plan_title, + "price_cny": plan_pg.price_cny, + "price_usd": plan_pg.price_usd, + "server_type": plan_pg.document_type, + "cluster": plan_pg.cluster, + "instance_type": plan_pg.instance_type, + "vcpu": plan_pg.vcpu, + "memory": plan_pg.memory, + "disk": plan_pg.disk, + "enabled": plan_pg.enabled, + } + ) + server_plan_pg.roles = plan_pg.roles + server_plan_pg.insert(ignore_if_duplicate=True) + + for marketplace_plan in jingrow.get_all("Marketplace App Plan", pluck="name"): + map_pg = jingrow.get_pg("Marketplace App Plan", marketplace_plan) + plan = jingrow.get_all( + "Site Plan", {"name": map_pg.plan}, ["plan_title", "price_usd", "price_cny"] + ) + + if plan: + plan = plan[0] + else: + continue + + map_pg.title = plan.plan_title + map_pg.price_cny = plan.price_cny + map_pg.price_usd = plan.price_usd + map_pg.save() diff --git a/jcloud/patches/v0_7_0/move_site_db_access_users_to_site_db_perm_manager.py b/jcloud/patches/v0_7_0/move_site_db_access_users_to_site_db_perm_manager.py new file mode 100644 index 0000000..65c7d67 --- /dev/null +++ b/jcloud/patches/v0_7_0/move_site_db_access_users_to_site_db_perm_manager.py @@ -0,0 +1,33 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt +import jingrow + + +def execute(): + sites = jingrow.get_all( + "Site", + filters={ + "status": ("!=", "Archived"), + "is_database_access_enabled": 1, + "database_access_mode": ["in", ("read_only", "read_write")], + }, + pluck="name", + ) + if sites: + for site_name in sites: + site = jingrow.get_pg("Site", site_name) + db_user = jingrow.get_pg( + { + "pagetype": "Site Database User", + "site": site.name, + "team": site.team, + "mode": site.database_access_mode, + "user_created_in_database": True, + "user_added_in_proxysql": True, + "username": site.database_access_user, + "password": site.get_password("database_access_password"), + } + ) + db_user.flags.ignore_after_insert_hooks = True + db_user.insert(ignore_permissions=True) + jingrow.db.set_value("Site Database User", db_user.name, "status", "Active") diff --git a/jcloud/patches/v0_7_0/rename_plan_to_site_plan.py b/jcloud/patches/v0_7_0/rename_plan_to_site_plan.py new file mode 100644 index 0000000..4254cee --- /dev/null +++ b/jcloud/patches/v0_7_0/rename_plan_to_site_plan.py @@ -0,0 +1,8 @@ +import jingrow + + +def execute(): + from_pagetype = "Plan" + to_pagetype = "Site Plan" + if jingrow.db.table_exists(from_pagetype) and not jingrow.db.table_exists(to_pagetype): + jingrow.rename_pg("PageType", from_pagetype, to_pagetype, force=True) diff --git a/jcloud/patches/v0_7_0/set_hostname_abbreviation.py b/jcloud/patches/v0_7_0/set_hostname_abbreviation.py new file mode 100644 index 0000000..6de813f --- /dev/null +++ b/jcloud/patches/v0_7_0/set_hostname_abbreviation.py @@ -0,0 +1,15 @@ +import jingrow + +from jcloud.jcloud.pagetype.server.server import get_hostname_abbreviation + + +def execute(): + for pagetype in ["Server", "Database Server", "Proxy Server"]: + jingrow.reload_pg("jcloud", "pagetype", pagetype) + + for pg in jingrow.get_all(pagetype, fields=["name", "hostname"]): + abbr = get_hostname_abbreviation(pg.hostname) + + jingrow.db.set_value( + pagetype, pg.name, "hostname_abbreviation", abbr, update_modified=False + ) diff --git a/jcloud/patches/v0_7_0/set_label_for_site_database_user.py b/jcloud/patches/v0_7_0/set_label_for_site_database_user.py new file mode 100644 index 0000000..d3940c2 --- /dev/null +++ b/jcloud/patches/v0_7_0/set_label_for_site_database_user.py @@ -0,0 +1,12 @@ +import jingrow + + +def execute(): + db_users = jingrow.get_all( + "Site Database User", + filters={"status": ("!=", "Archived")}, + fields=["name", "username"], + ) + + for db_user in db_users: + jingrow.db.set_value("Site Database User", db_user.name, "label", f"User {db_user.username}") diff --git a/jcloud/patches/v0_7_0/set_password_config_type.py b/jcloud/patches/v0_7_0/set_password_config_type.py new file mode 100644 index 0000000..e2b7983 --- /dev/null +++ b/jcloud/patches/v0_7_0/set_password_config_type.py @@ -0,0 +1,25 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def execute(): + secret_keys = jingrow.get_all( + "Site Config Key", filters={"type": "Password"}, pluck="key" + ) + + site_config_keys_that_should_be_secret = jingrow.get_all( + "Site Config", filters={"key": ("in", secret_keys)}, pluck="name" + ) + + for key in site_config_keys_that_should_be_secret: + jingrow.db.set_value("Site Config", key, "type", "Password") + + common_site_config_keys_that_should_be_secret = jingrow.get_all( + "Common Site Config", filters={"key": ("in", secret_keys)}, pluck="name" + ) + + for key in common_site_config_keys_that_should_be_secret: + jingrow.db.set_value("Common Site Config", key, "type", "Password") diff --git a/jcloud/patches/v0_7_0/update_enable_performance_tuning.py b/jcloud/patches/v0_7_0/update_enable_performance_tuning.py new file mode 100644 index 0000000..4233241 --- /dev/null +++ b/jcloud/patches/v0_7_0/update_enable_performance_tuning.py @@ -0,0 +1,12 @@ +import jingrow + + +def execute(): + # Set enable_performance_tuning to True for all records in Team PageType + # Default value in Team pagetype has been set to True + jingrow.db.sql( + """ + UPDATE `tabTeam` + SET enable_performance_tuning = 1 + """ + ) diff --git a/jcloud/playbooks/agent_sentry.yml b/jcloud/playbooks/agent_sentry.yml new file mode 100644 index 0000000..1f17930 --- /dev/null +++ b/jcloud/playbooks/agent_sentry.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Agent Sentry + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: agent_sentry diff --git a/jcloud/playbooks/agent_set_proxy_ip.yml b/jcloud/playbooks/agent_set_proxy_ip.yml new file mode 100644 index 0000000..e1fa78c --- /dev/null +++ b/jcloud/playbooks/agent_set_proxy_ip.yml @@ -0,0 +1,8 @@ +--- +- name: Set Proxy IP for Agent + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: agent_set_proxy_ip diff --git a/jcloud/playbooks/analytics.yml b/jcloud/playbooks/analytics.yml new file mode 100644 index 0000000..8651fab --- /dev/null +++ b/jcloud/playbooks/analytics.yml @@ -0,0 +1,15 @@ +--- +- name: Setup Analytics Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: nginx + - role: agent + - role: node_exporter + - role: filebeat + - role: docker + - role: plausible diff --git a/jcloud/playbooks/aws.yml b/jcloud/playbooks/aws.yml new file mode 100644 index 0000000..12c79b2 --- /dev/null +++ b/jcloud/playbooks/aws.yml @@ -0,0 +1,8 @@ +--- +- name: Prepare AWS EC2 Server + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: aws diff --git a/jcloud/playbooks/central_site_migration.yml b/jcloud/playbooks/central_site_migration.yml new file mode 100644 index 0000000..fe7828a --- /dev/null +++ b/jcloud/playbooks/central_site_migration.yml @@ -0,0 +1,5 @@ +--- +- name: Migrate central site + hosts: all + roles: + - migrate_to_fc diff --git a/jcloud/playbooks/clamav.yml b/jcloud/playbooks/clamav.yml new file mode 100644 index 0000000..1449ca4 --- /dev/null +++ b/jcloud/playbooks/clamav.yml @@ -0,0 +1,9 @@ +--- +- name: Install ClamAV + hosts: all + become_user: root + become_method: sudo + gather_facts: yes + roles: + - role: clamav + diff --git a/jcloud/playbooks/configure_ssh_logging.yml b/jcloud/playbooks/configure_ssh_logging.yml new file mode 100644 index 0000000..0f22743 --- /dev/null +++ b/jcloud/playbooks/configure_ssh_logging.yml @@ -0,0 +1,5 @@ +--- +- name: Configure ssh session activites logging + hosts: all + roles: + - role: configure_ssh_logging \ No newline at end of file diff --git a/jcloud/playbooks/convert.yml b/jcloud/playbooks/convert.yml new file mode 100644 index 0000000..5413e12 --- /dev/null +++ b/jcloud/playbooks/convert.yml @@ -0,0 +1,10 @@ +--- +- name: Convert Jingrow Server to Database Server + hosts: all + become: yes + become_user: root + gather_facts: true + roles: + - role: convert + - role: mysqld_exporter + - role: deadlock_logger diff --git a/jcloud/playbooks/copy.yml b/jcloud/playbooks/copy.yml new file mode 100644 index 0000000..8f53a97 --- /dev/null +++ b/jcloud/playbooks/copy.yml @@ -0,0 +1,8 @@ +--- +- name: Copy Files + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: copy diff --git a/jcloud/playbooks/database.yml b/jcloud/playbooks/database.yml new file mode 100644 index 0000000..3ea4038 --- /dev/null +++ b/jcloud/playbooks/database.yml @@ -0,0 +1,26 @@ +--- +- name: Setup Database Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: mount + - role: mariadb + - role: mariadb_memory_allocator + - role: nginx + - role: agent + - role: node_exporter + - role: mysqld_exporter + - role: deadlock_logger + - role: filebeat + - role: clamav + - role: gpg_config + - role: aide + - role: additional_process_hardening + - role: warning_banners + - role: auditd + - role: sshd_hardening + - role: pam diff --git a/jcloud/playbooks/database_exporters.yml b/jcloud/playbooks/database_exporters.yml new file mode 100644 index 0000000..6c3ee6e --- /dev/null +++ b/jcloud/playbooks/database_exporters.yml @@ -0,0 +1,10 @@ +--- +- name: Setup Database Exporters + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: node_exporter + - role: mysqld_exporter + - role: monitoring_password diff --git a/jcloud/playbooks/database_memory_limits.yml b/jcloud/playbooks/database_memory_limits.yml new file mode 100644 index 0000000..5893a37 --- /dev/null +++ b/jcloud/playbooks/database_memory_limits.yml @@ -0,0 +1,7 @@ +--- +- name: Set memory limits for MariaDB + hosts: all + become: yes + become_user: root + roles: + - role: mariadb_systemd_limits diff --git a/jcloud/playbooks/database_rename.yml b/jcloud/playbooks/database_rename.yml new file mode 100644 index 0000000..9d0dd53 --- /dev/null +++ b/jcloud/playbooks/database_rename.yml @@ -0,0 +1,14 @@ +--- +- name: Rename Database Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: mariadb_rename + - role: agent_rename + - role: mysqld_exporter_rename + - role: deadlock_logger_rename + - role: filebeat_rename + - role: sshd_hardening + - role: user_ssh_certificate diff --git a/jcloud/playbooks/deadlock_logger.yml b/jcloud/playbooks/deadlock_logger.yml new file mode 100644 index 0000000..8fffbe4 --- /dev/null +++ b/jcloud/playbooks/deadlock_logger.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Deadlock Logger + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: deadlock_logger diff --git a/jcloud/playbooks/disable_ping.yml b/jcloud/playbooks/disable_ping.yml new file mode 100644 index 0000000..3fdefc7 --- /dev/null +++ b/jcloud/playbooks/disable_ping.yml @@ -0,0 +1,22 @@ +--- +- name: Disable Ping using ufw before.rules + hosts: all + become: yes + become_user: root + gather_facts: no + tasks: + - name: Install ufw + package: + name: ufw + state: present + + - name: Drop icmp echo-request packets + lineinfile: + state: present + path: /etc/ufw/before.rules + regexp: -A ufw-before-input -p icmp --icmp-type echo-request -j ACCEPT + line: -A ufw-before-input -p icmp --icmp-type echo-request -j DROP + + - name: Restart ufw + ufw: + state: reloaded diff --git a/jcloud/playbooks/docker.yml b/jcloud/playbooks/docker.yml new file mode 100644 index 0000000..5bfa238 --- /dev/null +++ b/jcloud/playbooks/docker.yml @@ -0,0 +1,8 @@ +--- +- name: Install Docker + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: docker diff --git a/jcloud/playbooks/docker_eof_update.yml b/jcloud/playbooks/docker_eof_update.yml new file mode 100644 index 0000000..43ed58c --- /dev/null +++ b/jcloud/playbooks/docker_eof_update.yml @@ -0,0 +1,8 @@ +--- +- name: Update Docker's daemon.json + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: docker_eof_update diff --git a/jcloud/playbooks/docker_system_prune.yml b/jcloud/playbooks/docker_system_prune.yml new file mode 100644 index 0000000..9114f0b --- /dev/null +++ b/jcloud/playbooks/docker_system_prune.yml @@ -0,0 +1,9 @@ +--- +- name: Prune Docker System + hosts: all + become: yes + become_user: root + gather_facts: no + + roles: + - docker_system_prune diff --git a/jcloud/playbooks/elasticsearch_exporter.yml b/jcloud/playbooks/elasticsearch_exporter.yml new file mode 100644 index 0000000..c66a2d2 --- /dev/null +++ b/jcloud/playbooks/elasticsearch_exporter.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Elasticsearch Exporter + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: elasticsearch_exporter diff --git a/jcloud/playbooks/enable_ping.yml b/jcloud/playbooks/enable_ping.yml new file mode 100644 index 0000000..b3d426a --- /dev/null +++ b/jcloud/playbooks/enable_ping.yml @@ -0,0 +1,22 @@ +--- +- name: Enable Ping using ufw before.rules + hosts: all + become: yes + become_user: root + gather_facts: no + tasks: + - name: Install ufw + package: + name: ufw + state: present + + - name: Accept icmp echo-request packets + lineinfile: + state: present + path: /etc/ufw/before.rules + regexp: -A ufw-before-input -p icmp --icmp-type echo-request -j DROP + line: -A ufw-before-input -p icmp --icmp-type echo-request -j ACCEPT + + - name: Restart ufw + ufw: + state: reloaded diff --git a/jcloud/playbooks/extend_ec2_volume.yml b/jcloud/playbooks/extend_ec2_volume.yml new file mode 100644 index 0000000..778c395 --- /dev/null +++ b/jcloud/playbooks/extend_ec2_volume.yml @@ -0,0 +1,8 @@ +--- +- name: Extend EC2 Volume + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: extend_ec2_volume diff --git a/jcloud/playbooks/fail2ban.yml b/jcloud/playbooks/fail2ban.yml new file mode 100644 index 0000000..4714d9b --- /dev/null +++ b/jcloud/playbooks/fail2ban.yml @@ -0,0 +1,8 @@ +--- +- name: Install Fail2ban + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: fail2ban \ No newline at end of file diff --git a/jcloud/playbooks/failover.yml b/jcloud/playbooks/failover.yml new file mode 100644 index 0000000..6925f34 --- /dev/null +++ b/jcloud/playbooks/failover.yml @@ -0,0 +1,8 @@ +--- +- name: Failover from Primary to Secondary + hosts: all + become: yes + become_user: root + + roles: + - role: failover diff --git a/jcloud/playbooks/failover_prepare_primary_proxy.yml b/jcloud/playbooks/failover_prepare_primary_proxy.yml new file mode 100644 index 0000000..9d45bfe --- /dev/null +++ b/jcloud/playbooks/failover_prepare_primary_proxy.yml @@ -0,0 +1,16 @@ +--- +- name: Prepare primary proxy for failover + hosts: all + become: yes + become_user: root + gather_facts: no + + tasks: + - name: Stop Agent to reject jobs + command: 'supervisorctl stop agent:' + + - name: Stop lsyncd and disable + systemd: + name: lsyncd + enabled: no + state: stopped diff --git a/jcloud/playbooks/failover_remove_primary_access.yml b/jcloud/playbooks/failover_remove_primary_access.yml new file mode 100644 index 0000000..4f5e7c3 --- /dev/null +++ b/jcloud/playbooks/failover_remove_primary_access.yml @@ -0,0 +1,13 @@ +--- +- name: Remove Primary Proxy Server's access + hosts: all + become: yes + gather_facts: no + + tasks: + - name: Remove Primary from Authorized Keys + become_user: jingrow + authorized_key: + user: jingrow + key: "{{ primary_public_key }}" + state: absent diff --git a/jcloud/playbooks/failover_up_secondary_proxy.yml b/jcloud/playbooks/failover_up_secondary_proxy.yml new file mode 100644 index 0000000..d0ecfc6 --- /dev/null +++ b/jcloud/playbooks/failover_up_secondary_proxy.yml @@ -0,0 +1,25 @@ +--- +- name: Up secondary proxy to serve requests + hosts: all + become: yes + become_user: root + gather_facts: no + + tasks: + - name: Ensure nginx is enabled and running + service: + name: nginx + enabled: yes + state: started + + - name: Reload nginx + service: + name: nginx + state: reloaded + + - name: Remove cron to reload nginx every 5 mins + become: yes + become_user: jingrow + cron: + name: reload_nginx + state: absent diff --git a/jcloud/playbooks/fetch_jingrow_public_key.yml b/jcloud/playbooks/fetch_jingrow_public_key.yml new file mode 100644 index 0000000..863c320 --- /dev/null +++ b/jcloud/playbooks/fetch_jingrow_public_key.yml @@ -0,0 +1,14 @@ +--- +- name: Fetch Jingrow Public Key + hosts: all + become: yes + become_user: root + gather_facts: no + + tasks: + - name: Fetch Jingrow Public Key + user: + name: jingrow + generate_ssh_key: yes # Won't overwrite existing key + +# Key will be put in server pg by AnsibleCallback diff --git a/jcloud/playbooks/filebeat.yml b/jcloud/playbooks/filebeat.yml new file mode 100644 index 0000000..e294721 --- /dev/null +++ b/jcloud/playbooks/filebeat.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Filebeat + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: filebeat diff --git a/jcloud/playbooks/filebeat_update.yml b/jcloud/playbooks/filebeat_update.yml new file mode 100644 index 0000000..1f6f80d --- /dev/null +++ b/jcloud/playbooks/filebeat_update.yml @@ -0,0 +1,39 @@ +--- +- name: Update Filebeat + hosts: all + become: yes + become_user: root + gather_facts: no + + tasks: + - name: Update Apt cache + apt: + update_cache: yes + ignore_errors: yes + + - name: Add Elasticsearch Repository Key + apt_key: + url: https://artifacts.elastic.co/GPG-KEY-elasticsearch + state: present + + - name: Add Elasticsearch Repository + apt_repository: + repo: deb https://artifacts.elastic.co/packages/7.x/apt stable main + state: present + update_cache: true + + - name: Update Filebeat + apt: + name: filebeat + state: latest + register: result + until: result.failed == false + retries: 5 + delay: 120 + + - name: Restart Filebeat Daemon + systemd: + name: filebeat + daemon_reload: true + enabled: yes + state: restarted diff --git a/jcloud/playbooks/get_apps.yml b/jcloud/playbooks/get_apps.yml new file mode 100644 index 0000000..f316c70 --- /dev/null +++ b/jcloud/playbooks/get_apps.yml @@ -0,0 +1,8 @@ +--- +- name: Get Bench data from Self Hosted Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: get_apps \ No newline at end of file diff --git a/jcloud/playbooks/get_sites.yml b/jcloud/playbooks/get_sites.yml new file mode 100644 index 0000000..131dec7 --- /dev/null +++ b/jcloud/playbooks/get_sites.yml @@ -0,0 +1,8 @@ +--- +- name: Sites from Current Bench + hosts: all + become: yes + become_user: root + gather_facts: true + roles: + - role: get_sites \ No newline at end of file diff --git a/jcloud/playbooks/glass_file.yml b/jcloud/playbooks/glass_file.yml new file mode 100644 index 0000000..407aa8e --- /dev/null +++ b/jcloud/playbooks/glass_file.yml @@ -0,0 +1,10 @@ +--- +- name: Adds dummy 200MB file for deletion purposes + hosts: all + become: yes + become_user: root + gather_facts: no + + tasks: + - name: Add 200MB file + command: 'fallocate -l 200M /root/glass' diff --git a/jcloud/playbooks/harden.yml b/jcloud/playbooks/harden.yml new file mode 100644 index 0000000..772d1d4 --- /dev/null +++ b/jcloud/playbooks/harden.yml @@ -0,0 +1,14 @@ +--- +- name: Apply Hardening Rules + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: gpg_config + - role: aide + - role: additional_process_hardening + - role: warning_banners + - role: auditd + - role: sshd_hardening + - role: pam diff --git a/jcloud/playbooks/increase_swap.yml b/jcloud/playbooks/increase_swap.yml new file mode 100644 index 0000000..7c202e7 --- /dev/null +++ b/jcloud/playbooks/increase_swap.yml @@ -0,0 +1,9 @@ +--- +- name: Increase Swap + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: swap + - role: swap_config diff --git a/jcloud/playbooks/keys.yml b/jcloud/playbooks/keys.yml new file mode 100644 index 0000000..b2b04df --- /dev/null +++ b/jcloud/playbooks/keys.yml @@ -0,0 +1,8 @@ +--- +- name: Generate and Fetch User Keys + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: keys diff --git a/jcloud/playbooks/log.yml b/jcloud/playbooks/log.yml new file mode 100644 index 0000000..be13ad5 --- /dev/null +++ b/jcloud/playbooks/log.yml @@ -0,0 +1,17 @@ +--- +- name: Setup Log Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: nginx + - role: agent + - role: node_exporter + - role: filebeat + - role: elasticsearch + - role: elasticsearch_exporter + - role: kibana + - role: filebeat_elasticsearch diff --git a/jcloud/playbooks/malware_scan.yml b/jcloud/playbooks/malware_scan.yml new file mode 100644 index 0000000..7cfb979 --- /dev/null +++ b/jcloud/playbooks/malware_scan.yml @@ -0,0 +1,5 @@ +--- +- name: Run malware scan + hosts: all + roles: + - malware_scan diff --git a/jcloud/playbooks/mariadb_change_root_password.yml b/jcloud/playbooks/mariadb_change_root_password.yml new file mode 100644 index 0000000..956d6a2 --- /dev/null +++ b/jcloud/playbooks/mariadb_change_root_password.yml @@ -0,0 +1,10 @@ +--- +- name: Change Root Password + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: mariadb_change_root_password + - role: mysqld_exporter + - role: deadlock_logger diff --git a/jcloud/playbooks/mariadb_change_root_password_secondary.yml b/jcloud/playbooks/mariadb_change_root_password_secondary.yml new file mode 100644 index 0000000..9bda8b3 --- /dev/null +++ b/jcloud/playbooks/mariadb_change_root_password_secondary.yml @@ -0,0 +1,9 @@ +--- +- name: Change Root Password on Secondary + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: mariadb_change_root_password_secondary + - role: mysqld_exporter diff --git a/jcloud/playbooks/mariadb_debug_symbols.yml b/jcloud/playbooks/mariadb_debug_symbols.yml new file mode 100644 index 0000000..08640ee --- /dev/null +++ b/jcloud/playbooks/mariadb_debug_symbols.yml @@ -0,0 +1,8 @@ +--- +- name: Setup MariaDB Debug Symbols + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: mariadb_10_6_16_jingrow_debug_symbols diff --git a/jcloud/playbooks/mariadb_memory_allocator.yml b/jcloud/playbooks/mariadb_memory_allocator.yml new file mode 100644 index 0000000..82ad007 --- /dev/null +++ b/jcloud/playbooks/mariadb_memory_allocator.yml @@ -0,0 +1,7 @@ +--- +- name: Update MariaDB Memory Allocator + hosts: all + become: yes + become_user: root + roles: + - role: mariadb_memory_allocator \ No newline at end of file diff --git a/jcloud/playbooks/mariadb_physical_backup.yml b/jcloud/playbooks/mariadb_physical_backup.yml new file mode 100644 index 0000000..952cac4 --- /dev/null +++ b/jcloud/playbooks/mariadb_physical_backup.yml @@ -0,0 +1,8 @@ +--- +- name: Perform MariaDB Physical Backup + hosts: all + become: yes + become_user: root + + roles: + - role: primary diff --git a/jcloud/playbooks/monitor.yml b/jcloud/playbooks/monitor.yml new file mode 100644 index 0000000..e332605 --- /dev/null +++ b/jcloud/playbooks/monitor.yml @@ -0,0 +1,16 @@ +--- +- name: Setup Monitor Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: nginx + - role: agent + - role: node_exporter + - role: prometheus + - role: alertmanager + - role: blackbox_exporter + - role: grafana diff --git a/jcloud/playbooks/mount.yml b/jcloud/playbooks/mount.yml new file mode 100644 index 0000000..b2b17fb --- /dev/null +++ b/jcloud/playbooks/mount.yml @@ -0,0 +1,8 @@ +--- +- name: Mount Volumes + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: mount diff --git a/jcloud/playbooks/mysqld_variable.yml b/jcloud/playbooks/mysqld_variable.yml new file mode 100644 index 0000000..4c91ec7 --- /dev/null +++ b/jcloud/playbooks/mysqld_variable.yml @@ -0,0 +1,7 @@ +--- +- name: Set mysqld variable + hosts: all + become: yes + become_user: root + roles: + - role: mysqld_variable diff --git a/jcloud/playbooks/mysqldump.yml b/jcloud/playbooks/mysqldump.yml new file mode 100644 index 0000000..5018ad6 --- /dev/null +++ b/jcloud/playbooks/mysqldump.yml @@ -0,0 +1,9 @@ +--- +- name: Set max_allowed_packet for mysqldump + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: mysqldump + diff --git a/jcloud/playbooks/nginx.yml b/jcloud/playbooks/nginx.yml new file mode 100644 index 0000000..302a362 --- /dev/null +++ b/jcloud/playbooks/nginx.yml @@ -0,0 +1,8 @@ +--- +- name: Install NGINX + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: nginx diff --git a/jcloud/playbooks/oci.yml b/jcloud/playbooks/oci.yml new file mode 100644 index 0000000..ba1bae4 --- /dev/null +++ b/jcloud/playbooks/oci.yml @@ -0,0 +1,8 @@ +--- +- name: Prepare OCI Server + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: oci diff --git a/jcloud/playbooks/ping.yml b/jcloud/playbooks/ping.yml new file mode 100644 index 0000000..60c3cf1 --- /dev/null +++ b/jcloud/playbooks/ping.yml @@ -0,0 +1,9 @@ +--- +- name: Ping Server + hosts: all + become: yes + become_user: root + gather_facts: yes + + roles: + - role: ping diff --git a/jcloud/playbooks/pkg_exists.yml b/jcloud/playbooks/pkg_exists.yml new file mode 100644 index 0000000..da0da76 --- /dev/null +++ b/jcloud/playbooks/pkg_exists.yml @@ -0,0 +1,14 @@ +--- +- name: Check if package is installed + hosts: all + become_user: root + become_method: sudo + tasks: + - name: Gather package facts + package_facts: + manager: auto + + - name: Check if package is installed + debug: + var: ansible_facts.packages + failed_when: '"{{ pkg }}" not in ansible_facts.packages' diff --git a/jcloud/playbooks/primary.yml b/jcloud/playbooks/primary.yml new file mode 100644 index 0000000..094e158 --- /dev/null +++ b/jcloud/playbooks/primary.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Primary + hosts: all + become: yes + become_user: root + + roles: + - role: primary diff --git a/jcloud/playbooks/primary_app.yml b/jcloud/playbooks/primary_app.yml new file mode 100644 index 0000000..ee2b7f7 --- /dev/null +++ b/jcloud/playbooks/primary_app.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Primary App Server + hosts: all + become: yes + become_user: root + + roles: + - role: primary_app diff --git a/jcloud/playbooks/primary_proxy.yml b/jcloud/playbooks/primary_proxy.yml new file mode 100644 index 0000000..6405219 --- /dev/null +++ b/jcloud/playbooks/primary_proxy.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Primary Proxy Server + hosts: all + become: yes + become_user: root + + roles: + - role: primary_proxy \ No newline at end of file diff --git a/jcloud/playbooks/process.yml b/jcloud/playbooks/process.yml new file mode 100644 index 0000000..2c220e3 --- /dev/null +++ b/jcloud/playbooks/process.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Process Exporter + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: process_exporter diff --git a/jcloud/playbooks/proxy.yml b/jcloud/playbooks/proxy.yml new file mode 100644 index 0000000..8edc2b9 --- /dev/null +++ b/jcloud/playbooks/proxy.yml @@ -0,0 +1,15 @@ +--- +- name: Setup Proxy Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: nginx + - role: agent + - role: proxy + - role: node_exporter + - role: filebeat + - role: clamav diff --git a/jcloud/playbooks/proxy_exporters.yml b/jcloud/playbooks/proxy_exporters.yml new file mode 100644 index 0000000..71eb77d --- /dev/null +++ b/jcloud/playbooks/proxy_exporters.yml @@ -0,0 +1,9 @@ +--- +- name: Setup Proxy Exporters + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: node_exporter + - role: monitoring_password diff --git a/jcloud/playbooks/proxysql.yml b/jcloud/playbooks/proxysql.yml new file mode 100644 index 0000000..4f2ed87 --- /dev/null +++ b/jcloud/playbooks/proxysql.yml @@ -0,0 +1,10 @@ +--- +- name: Setup ProxySQL + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: docker + - role: proxysql + - role: proxysql_monitor diff --git a/jcloud/playbooks/proxysql_monitor.yml b/jcloud/playbooks/proxysql_monitor.yml new file mode 100644 index 0000000..fa9b925 --- /dev/null +++ b/jcloud/playbooks/proxysql_monitor.yml @@ -0,0 +1,8 @@ +--- +- name: Setup ProxySQL Monitor + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: proxysql_monitor diff --git a/jcloud/playbooks/pt_stalk.yml b/jcloud/playbooks/pt_stalk.yml new file mode 100644 index 0000000..50208bd --- /dev/null +++ b/jcloud/playbooks/pt_stalk.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Percona Stalk + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: pt_stalk diff --git a/jcloud/playbooks/reconfigure_monitoring.yml b/jcloud/playbooks/reconfigure_monitoring.yml new file mode 100644 index 0000000..11442f2 --- /dev/null +++ b/jcloud/playbooks/reconfigure_monitoring.yml @@ -0,0 +1,8 @@ +--- +- name: Reconfigure Monitor Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: reconfigure_prometheus diff --git a/jcloud/playbooks/reconfigure_mysqld_exporter.yml b/jcloud/playbooks/reconfigure_mysqld_exporter.yml new file mode 100644 index 0000000..e5c9ab5 --- /dev/null +++ b/jcloud/playbooks/reconfigure_mysqld_exporter.yml @@ -0,0 +1,8 @@ +--- +- name: Reconfigure MySQLd Exporter + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: mysqld_exporter diff --git a/jcloud/playbooks/redis_exporter.yml b/jcloud/playbooks/redis_exporter.yml new file mode 100644 index 0000000..4dcbdc6 --- /dev/null +++ b/jcloud/playbooks/redis_exporter.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Prometheus Redis Exporter + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: redis_exporter diff --git a/jcloud/playbooks/registry.yml b/jcloud/playbooks/registry.yml new file mode 100644 index 0000000..ec6d04a --- /dev/null +++ b/jcloud/playbooks/registry.yml @@ -0,0 +1,15 @@ +--- +- name: Setup Registry Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: nginx + - role: agent + - role: docker + - role: registry + - role: node_exporter + - role: cadvisor diff --git a/jcloud/playbooks/reload_wireguard.yml b/jcloud/playbooks/reload_wireguard.yml new file mode 100644 index 0000000..311a916 --- /dev/null +++ b/jcloud/playbooks/reload_wireguard.yml @@ -0,0 +1,8 @@ +--- +- name: Reload Wireguard on Proxy Server + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: reload_wireguard diff --git a/jcloud/playbooks/rename.yml b/jcloud/playbooks/rename.yml new file mode 100644 index 0000000..3704117 --- /dev/null +++ b/jcloud/playbooks/rename.yml @@ -0,0 +1,12 @@ +--- +- name: Rename Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: agent_rename + - role: statsd_exporter_rename + - role: filebeat_rename + - role: sshd_hardening + - role: user_ssh_certificate diff --git a/jcloud/playbooks/restart_mysql.yml b/jcloud/playbooks/restart_mysql.yml new file mode 100644 index 0000000..8678d70 --- /dev/null +++ b/jcloud/playbooks/restart_mysql.yml @@ -0,0 +1,7 @@ +--- +- name: Restart mysqld/mariadb service + hosts: all + become: yes + become_user: root + roles: + - role: restart_mysql diff --git a/jcloud/playbooks/roles/additional_process_hardening/tasks/main.yml b/jcloud/playbooks/roles/additional_process_hardening/tasks/main.yml new file mode 100644 index 0000000..aba6969 --- /dev/null +++ b/jcloud/playbooks/roles/additional_process_hardening/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: Ensure address space layout randomization (ASLR) is enabled + sysctl: + name: kernel.randomize_va_space + value: '2' + state: present + reload: yes + sysctl_set: yes + ignoreerrors: yes + +- name: Ensure prelink is disabled + package: + name: prelink + state: absent diff --git a/jcloud/playbooks/roles/agent/tasks/main.yml b/jcloud/playbooks/roles/agent/tasks/main.yml new file mode 100644 index 0000000..5de232a --- /dev/null +++ b/jcloud/playbooks/roles/agent/tasks/main.yml @@ -0,0 +1,151 @@ +--- +- name: Clone Agent Repository + become: yes + become_user: jingrow + git: + repo: '{{ agent_repository_url }}' + dest: /home/jingrow/agent/repo + remote: upstream + +- name: Install Agent + become: yes + become_user: jingrow + pip: + name: file:///home/jingrow/agent/repo + virtualenv: /home/jingrow/agent/env + virtualenv_python: python3 + editable: yes + +- name: Generate Agent Configuration File + become: yes + become_user: jingrow + command: '/home/jingrow/agent/env/bin/agent setup config --name {{ server }} --workers {{ workers }} {% if proxy_ip is defined and proxy_ip is truthy %}--proxy-ip {{ proxy_ip }}{% endif %} {% if agent_sentry_dsn is defined and agent_sentry_dsn is truthy %}--sentry-dsn {{ agent_sentry_dsn }}{% endif %}' + args: + chdir: /home/jingrow/agent + +- name: Setup Agent SQLite Database + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup database + args: + chdir: /home/jingrow/agent + +- name: Setup Agent Usage Tracker + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup usage + args: + chdir: /home/jingrow/agent + +- name: Setup Agent Site Analytics Tracker + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup site-analytics + args: + chdir: /home/jingrow/agent + +- name: Create Agent NGINX Configuration Directory + become: yes + become_user: jingrow + file: + dest: /home/jingrow/agent/nginx + state: directory + +- name: Setup Agent Authentication + become: yes + become_user: jingrow + command: '/home/jingrow/agent/env/bin/agent setup authentication --password {{ agent_password }}' + args: + chdir: /home/jingrow/agent + +- name: Symlink Agent Supervisor Configuration + file: + src: /home/jingrow/agent/supervisor.conf + dest: /etc/supervisor/conf.d/agent.conf + state: link + force: yes + follow: no + +- name: Create Logs Directory for Supervisor + become: yes + become_user: jingrow + file: + dest: /home/jingrow/agent/logs + state: directory + +- name: Setup Agent Supervisor + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup supervisor + args: + chdir: /home/jingrow/agent + +- name: Create NGINX Root Configuration File + become: yes + become_user: jingrow + file: + path: /home/jingrow/agent/nginx/nginx.conf + state: touch + +- name: Symlink NGINX Root Configuration File + file: + src: /home/jingrow/agent/nginx/nginx.conf + dest: /etc/nginx/nginx.conf + state: link + force: yes + follow: no + +- name: Create Agent NGINX Configuration File + become: yes + become_user: jingrow + file: + path: /home/jingrow/agent/nginx.conf + state: touch + +- name: Symlink Agent NGINX Configuration File + file: + src: /home/jingrow/agent/nginx.conf + dest: /etc/nginx/conf.d/agent.conf + state: link + force: yes + follow: no + +- name: Create Agent TLS Directory + become: yes + become_user: jingrow + file: + dest: /home/jingrow/agent/tls + state: directory + +- name: Setup Agent TLS (Private Key) + become: yes + become_user: jingrow + copy: + content: '{{ certificate_private_key }}' + dest: /home/jingrow/agent/tls/privkey.pem + +- name: Setup Agent TLS (Full Chain) + become: yes + become_user: jingrow + copy: + content: '{{ certificate_full_chain }}' + dest: /home/jingrow/agent/tls/fullchain.pem + +- name: Setup Agent TLS (Intermediate Chain) + become: yes + become_user: jingrow + copy: + content: '{{ certificate_intermediate_chain }}' + dest: /home/jingrow/agent/tls/chain.pem + +- name: Setup Agent NGINX + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup nginx + args: + chdir: /home/jingrow/agent + +- name: Setup Monitoring Authentication + become: yes + become_user: jingrow + command: 'htpasswd -Bbc /home/jingrow/agent/nginx/monitoring.htpasswd jingrow {{ monitoring_password }}' diff --git a/jcloud/playbooks/roles/agent_rename/tasks/main.yml b/jcloud/playbooks/roles/agent_rename/tasks/main.yml new file mode 100644 index 0000000..4c90215 --- /dev/null +++ b/jcloud/playbooks/roles/agent_rename/tasks/main.yml @@ -0,0 +1,54 @@ +--- +- name: Generate Agent Configuration File + become: yes + become_user: jingrow + command: '/home/jingrow/agent/env/bin/agent setup config --name {{ server }} --workers {{ workers }} {% if proxy_ip is defined %}--proxy-ip {{ proxy_ip }}{% endif %}' + args: + chdir: /home/jingrow/agent + +- name: Setup Agent Authentication + become: yes + become_user: jingrow + command: '/home/jingrow/agent/env/bin/agent setup authentication --password {{ agent_password }}' + args: + chdir: /home/jingrow/agent + +- name: Setup Agent Supervisor + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup supervisor + args: + chdir: /home/jingrow/agent + +- name: Setup Agent TLS (Private Key) + become: yes + become_user: jingrow + copy: + content: '{{ certificate_private_key }}' + dest: /home/jingrow/agent/tls/privkey.pem + +- name: Setup Agent TLS (Full Chain) + become: yes + become_user: jingrow + copy: + content: '{{ certificate_full_chain }}' + dest: /home/jingrow/agent/tls/fullchain.pem + +- name: Setup Agent TLS (Intermediate Chain) + become: yes + become_user: jingrow + copy: + content: '{{ certificate_intermediate_chain }}' + dest: /home/jingrow/agent/tls/chain.pem + +- name: Setup Agent NGINX + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup nginx + args: + chdir: /home/jingrow/agent + +- name: Setup Monitoring Authentication + become: yes + become_user: jingrow + command: 'htpasswd -Bbc /home/jingrow/agent/nginx/monitoring.htpasswd jingrow {{ monitoring_password }}' diff --git a/jcloud/playbooks/roles/agent_sentry/tasks/main.yml b/jcloud/playbooks/roles/agent_sentry/tasks/main.yml new file mode 100644 index 0000000..da1e5d4 --- /dev/null +++ b/jcloud/playbooks/roles/agent_sentry/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: Setup Agent Sentry + become: yes + become_user: jingrow + command: '/home/jingrow/agent/env/bin/agent setup sentry --sentry-dsn {{ agent_sentry_dsn }}' + args: + chdir: /home/jingrow/agent diff --git a/jcloud/playbooks/roles/agent_set_proxy_ip/tasks/main.yml b/jcloud/playbooks/roles/agent_set_proxy_ip/tasks/main.yml new file mode 100644 index 0000000..13058c0 --- /dev/null +++ b/jcloud/playbooks/roles/agent_set_proxy_ip/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: Generate Agent Configuration File + become: yes + become_user: jingrow + command: '/home/jingrow/agent/env/bin/agent setup config --name {{ server }} --workers {{ workers }} {% if proxy_ip is defined %}--proxy-ip {{ proxy_ip }}{% endif %}' + args: + chdir: /home/jingrow/agent + +- name: Setup Agent Authentication + become: yes + become_user: jingrow + command: '/home/jingrow/agent/env/bin/agent setup authentication --password {{ agent_password }}' + args: + chdir: /home/jingrow/agent + +- name: Setup Agent NGINX + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup nginx + args: + chdir: /home/jingrow/agent diff --git a/jcloud/playbooks/roles/aide/files/99_aide_root.j2 b/jcloud/playbooks/roles/aide/files/99_aide_root.j2 new file mode 100644 index 0000000..a53aea7 --- /dev/null +++ b/jcloud/playbooks/roles/aide/files/99_aide_root.j2 @@ -0,0 +1,5 @@ +#/ Full # Don't scan entire server +/etc Full +#We can shortlist more directories to check here +#Some directories within the shortlisted directories will be skipped +#because of default config snippets in aide.conf.d diff --git a/jcloud/playbooks/roles/aide/tasks/main.yml b/jcloud/playbooks/roles/aide/tasks/main.yml new file mode 100644 index 0000000..fd9490c --- /dev/null +++ b/jcloud/playbooks/roles/aide/tasks/main.yml @@ -0,0 +1,23 @@ +--- +- name: Install AIDE + package: + name: + - aide + - aide-common + state: present + +- name: Update AIDE config to speed it up + copy: + src: 99_aide_root.j2 + dest: /etc/aide/aide.conf.d/99_aide_root + owner: root + group: root + mode: 0644 + backup: yes + +- name: Ensure AIDE is started + shell: aideinit -y --force + args: + creates: /var/lib/aide/aide.db + changed_when: no + failed_when: no diff --git a/jcloud/playbooks/roles/alertmanager/tasks/main.yml b/jcloud/playbooks/roles/alertmanager/tasks/main.yml new file mode 100644 index 0000000..869408c --- /dev/null +++ b/jcloud/playbooks/roles/alertmanager/tasks/main.yml @@ -0,0 +1,72 @@ +--- +- name: Create Alertmanager Directories + become: yes + become_user: jingrow + file: + path: "{{ item }}" + state: directory + mode: 0755 + with_items: + - /home/jingrow/alertmanager + - /home/jingrow/alertmanager/data + - /home/jingrow/.config/amtool + +- name: Set Architecture + set_fact: + arch: "{{'amd64' if (ansible_architecture == 'x86_64') else 'arm64'}}" + +- name: Download Alertmanager Archive + become: yes + become_user: jingrow + unarchive: + src: "https://github.com/prometheus/alertmanager/releases/download/v0.27.0/alertmanager-0.27.0.linux-{{ arch }}.tar.gz" + dest: /tmp + remote_src: yes + +- name: Copy Alertmanager and Amtool Binaries + become: yes + become_user: jingrow + copy: + src: "/tmp/alertmanager-0.27.0.linux-{{ arch }}/{{ item }}" + dest: "/home/jingrow/alertmanager/{{ item }}" + mode: 0755 + remote_src: yes + with_items: + - alertmanager + - amtool + +- name: Create Amtool Config + become: yes + become_user: jingrow + template: + src: amtool.yml + dest: /home/jingrow/.config/amtool/config.yml + mode: 0644 + +- name: Configure Alertmanager + template: + src: alertmanager.yml + dest: /home/jingrow/alertmanager/alertmanager.yml + validate: "/home/jingrow/alertmanager/amtool check-config %s" + +- name: Configure ownership permissions on alertmanager.yml + file: + path: /home/jingrow/alertmanager/alertmanager.yml + owner: jingrow + group: jingrow + mode: 0600 + +- name: Create Alertmanager Systemd Service File + template: + src: alertmanager.service + dest: /etc/systemd/system/alertmanager.service + owner: root + group: root + mode: 0644 + +- name: Restart Alertmanager Service + systemd: + daemon_reload: true + name: alertmanager + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/alertmanager/templates/alertmanager.service b/jcloud/playbooks/roles/alertmanager/templates/alertmanager.service new file mode 100644 index 0000000..c1a9a43 --- /dev/null +++ b/jcloud/playbooks/roles/alertmanager/templates/alertmanager.service @@ -0,0 +1,27 @@ +[Unit] +Description=Prometheus Alertmanager +After=network-online.target + +[Service] +Type=simple +User=jingrow +Group=jingrow + +PIDFile=/var/run/alertmanager.pid +ExecReload=/bin/kill -HUP $MAINPID + +ExecStart=/home/jingrow/alertmanager/alertmanager \ + --config.file=/home/jingrow/alertmanager/alertmanager.yml \ + --storage.path=/home/jingrow/alertmanager/data \ + --web.listen-address=127.0.0.1:9093 \ + --web.external-url=https://{{ server }}/alertmanager + + +SyslogIdentifier=alertmanager +Restart=always +RestartSec=1 +StartLimitInterval=0 + + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/jcloud/playbooks/roles/alertmanager/templates/alertmanager.yml b/jcloud/playbooks/roles/alertmanager/templates/alertmanager.yml new file mode 100644 index 0000000..6ef2436 --- /dev/null +++ b/jcloud/playbooks/roles/alertmanager/templates/alertmanager.yml @@ -0,0 +1,21 @@ +global: + resolve_timeout: 5m + +route: + group_by: ["alertname"] + group_wait: 10s + group_interval: 10s + repeat_interval: 1h + receiver: "web.hook" + +receivers: + - name: "web.hook" + webhook_configs: + - url: "http://10.0.0.1:5001/" + +inhibit_rules: + - source_match: + severity: "critical" + target_match: + severity: "warning" + equal: ["alertname", "dev", "instance"] diff --git a/jcloud/playbooks/roles/alertmanager/templates/amtool.yml b/jcloud/playbooks/roles/alertmanager/templates/amtool.yml new file mode 100644 index 0000000..f34f7d5 --- /dev/null +++ b/jcloud/playbooks/roles/alertmanager/templates/amtool.yml @@ -0,0 +1,2 @@ +alertmanager.url: http://127.0.0.1:9093/alertmanager" +output: extended diff --git a/jcloud/playbooks/roles/auditd/defaults/main.yml b/jcloud/playbooks/roles/auditd/defaults/main.yml new file mode 100644 index 0000000..603b56c --- /dev/null +++ b/jcloud/playbooks/roles/auditd/defaults/main.yml @@ -0,0 +1,7 @@ +--- +auditd: + space_left_action: email + action_mail_acct: root + admin_space_left_action: syslog + max_log_file_action: keep_logs + diff --git a/jcloud/playbooks/roles/auditd/handlers/main.yml b/jcloud/playbooks/roles/auditd/handlers/main.yml new file mode 100644 index 0000000..979ec0c --- /dev/null +++ b/jcloud/playbooks/roles/auditd/handlers/main.yml @@ -0,0 +1,8 @@ +--- +- name: restart auditd + command: /sbin/service auditd restart + changed_when: no + check_mode: no + failed_when: no + args: + warn: no \ No newline at end of file diff --git a/jcloud/playbooks/roles/auditd/tasks/main.yml b/jcloud/playbooks/roles/auditd/tasks/main.yml new file mode 100644 index 0000000..abf5ecf --- /dev/null +++ b/jcloud/playbooks/roles/auditd/tasks/main.yml @@ -0,0 +1,156 @@ +--- +- name: Ensure auditd is installed + package: + name: auditd + state: present + notify: restart auditd + +- name: "Ensure auditd service is enabled and running" + service: + name: auditd + state: started + enabled: yes + +- name: "Ensure audit log storage size is configured" + lineinfile: + dest: /etc/audit/auditd.conf + regexp: "^max_log_file( |=)" + line: "max_log_file = 10" + state: present + notify: restart auditd + +- name: "Ensure audit logs are not automatically deleted" + lineinfile: + dest: /etc/audit/auditd.conf + regexp: "^max_log_file_action" + line: "max_log_file_action = {{ auditd['max_log_file_action'] }}" + state: present + notify: restart auditd + +- name: "Ensure system is disabled when audit logs are full" + lineinfile: + dest: /etc/audit/auditd.conf + regexp: "{{ item.regexp }}" + line: "{{ item.line }}" + state: present + with_items: + - { regexp: '^space_left_action', line: "space_left_action = {{ auditd['space_left_action'] }}" } + - { regexp: '^action_mail_acct', line: "action_mail_acct = {{ auditd['action_mail_acct'] }}" } + - { regexp: '^admin_space_left_action', line: "admin_space_left_action = {{ auditd['admin_space_left_action'] }}" } + notify: restart auditd + +- name: "Ensure events that modify date and time information are collected" + template: + src: time_change.rules.j2 + dest: /etc/audit/rules.d/time_change.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure events that modify user/group information are collected" + template: + src: identity.rules.j2 + dest: /etc/audit/rules.d/identity.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure events that modify the system's network environment are collected" + template: + src: system_local.rules.j2 + dest: /etc/audit/rules.d/system_local.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure events that modify the system's Mandatory Access Controls are collected" + template: + src: MAC_policy.rules.j2 + dest: /etc/audit/rules.d/MAC_policy.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure login and logout events are collected" + template: + src: logins.rules.j2 + dest: /etc/audit/rules.d/logins.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure discretionary access control permission modification events are collected" + template: + src: perm_mod.rules.j2 + dest: /etc/audit/rules.d/perm_mod.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure unsuccessful unauthorized file access attempts are collected" + template: + src: access.rules.j2 + dest: /etc/audit/rules.d/access.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure use of privileged commands is collected" + block: + - name: "| AUDIT | Ensure use of privileged commands is collected" + shell: for i in $(df | grep '^/dev' | awk '{ print $NF }'); do find $i -xdev -type f -perm -4000 -o -type f -perm -2000 2>/dev/null; done + register: priv_procs + changed_when: no + check_mode: no + + - name: "| PATCH | Ensure use of privileged commands is collected" + template: + src: priv_commands.rules.j2 + dest: /etc/audit/rules.d/priv_commands.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure file deletion events by users are collected" + template: + src: deletion.rules.j2 + dest: /etc/audit/rules.d/deletion.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure changes to system administration scope (sudoers) is collected" + template: + src: scope.rules.j2 + dest: /etc/audit/rules.d/scope.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure system administrator command executions (sudo) are collected" + template: + src: actions.rules.j2 + dest: /etc/audit/rules.d/actions.rules + owner: root + group: root + mode: 0600 + notify: restart auditd + +- name: "Ensure the audit configuration is immutable" + template: + src: 99_finalize.rules.j2 + dest: /etc/audit/rules.d/99_finalize.rules + owner: root + group: root + mode: 0600 + notify: restart auditd diff --git a/jcloud/playbooks/roles/auditd/templates/99_finalize.rules.j2 b/jcloud/playbooks/roles/auditd/templates/99_finalize.rules.j2 new file mode 100644 index 0000000..381a9ac --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/99_finalize.rules.j2 @@ -0,0 +1 @@ +-e 2 \ No newline at end of file diff --git a/jcloud/playbooks/roles/auditd/templates/MAC_policy.rules.j2 b/jcloud/playbooks/roles/auditd/templates/MAC_policy.rules.j2 new file mode 100644 index 0000000..640c21a --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/MAC_policy.rules.j2 @@ -0,0 +1,2 @@ +-w /etc/selinux/ -p wa -k MAC-policy +-w /usr/share/selinux/ -p wa -k MAC-policy diff --git a/jcloud/playbooks/roles/auditd/templates/access.rules.j2 b/jcloud/playbooks/roles/auditd/templates/access.rules.j2 new file mode 100644 index 0000000..4a283cb --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/access.rules.j2 @@ -0,0 +1,4 @@ +-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access +-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access +-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access +-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access \ No newline at end of file diff --git a/jcloud/playbooks/roles/auditd/templates/actions.rules.j2 b/jcloud/playbooks/roles/auditd/templates/actions.rules.j2 new file mode 100644 index 0000000..9eafbd3 --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/actions.rules.j2 @@ -0,0 +1,2 @@ +-a always,exit -F arch=b64 -C euid!=uid -F euid=0 -F auid>=1000 -F auid!=4294967295 -S execve -k actions +-a always,exit -F arch=b32 -C euid!=uid -F euid=0 -F auid>=1000 -F auid!=4294967295 -S execve -k actions diff --git a/jcloud/playbooks/roles/auditd/templates/deletion.rules.j2 b/jcloud/playbooks/roles/auditd/templates/deletion.rules.j2 new file mode 100644 index 0000000..4eb88be --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/deletion.rules.j2 @@ -0,0 +1,2 @@ +-a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete +-a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F auid>=1000 -F auid!=4294967295 -k delete \ No newline at end of file diff --git a/jcloud/playbooks/roles/auditd/templates/identity.rules.j2 b/jcloud/playbooks/roles/auditd/templates/identity.rules.j2 new file mode 100644 index 0000000..f16cd78 --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/identity.rules.j2 @@ -0,0 +1,5 @@ +-w /etc/group -p wa -k identity +-w /etc/passwd -p wa -k identity +-w /etc/gshadow -p wa -k identity +-w /etc/shadow -p wa -k identity +-w /etc/security/opasswd -p wa -k identity \ No newline at end of file diff --git a/jcloud/playbooks/roles/auditd/templates/logins.rules.j2 b/jcloud/playbooks/roles/auditd/templates/logins.rules.j2 new file mode 100644 index 0000000..092a053 --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/logins.rules.j2 @@ -0,0 +1,3 @@ +-w /var/log/faillog -p wa -k logins +-w /var/log/lastlog -p wa -k logins +-w /var/run/faillock/ -p wa -k logins diff --git a/jcloud/playbooks/roles/auditd/templates/perm_mod.rules.j2 b/jcloud/playbooks/roles/auditd/templates/perm_mod.rules.j2 new file mode 100644 index 0000000..7eca170 --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/perm_mod.rules.j2 @@ -0,0 +1,6 @@ +-a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod +-a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295 -k perm_mod \ No newline at end of file diff --git a/jcloud/playbooks/roles/auditd/templates/priv_commands.rules.j2 b/jcloud/playbooks/roles/auditd/templates/priv_commands.rules.j2 new file mode 100644 index 0000000..e6f78b9 --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/priv_commands.rules.j2 @@ -0,0 +1,4 @@ +{% for proc in priv_procs.stdout_lines -%} +-a always,exit -F path={{ proc }} -F perm=x -F auid>=1000 -F auid!=4294967295 -k privileged + +{% endfor %} \ No newline at end of file diff --git a/jcloud/playbooks/roles/auditd/templates/scope.rules.j2 b/jcloud/playbooks/roles/auditd/templates/scope.rules.j2 new file mode 100644 index 0000000..0ae21fd --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/scope.rules.j2 @@ -0,0 +1,2 @@ +-w /etc/sudoers -p wa -k scope +-w /etc/sudoers.d/ -p wa -k scope diff --git a/jcloud/playbooks/roles/auditd/templates/system_local.rules.j2 b/jcloud/playbooks/roles/auditd/templates/system_local.rules.j2 new file mode 100644 index 0000000..ef28c58 --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/system_local.rules.j2 @@ -0,0 +1,6 @@ +-a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale +-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale +-w /etc/issue -p wa -k system-locale +-w /etc/issue.net -p wa -k system-locale +-w /etc/hosts -p wa -k system-locale +-w /etc/sysconfig/network -p wa -k system-locale \ No newline at end of file diff --git a/jcloud/playbooks/roles/auditd/templates/time_change.rules.j2 b/jcloud/playbooks/roles/auditd/templates/time_change.rules.j2 new file mode 100644 index 0000000..719a992 --- /dev/null +++ b/jcloud/playbooks/roles/auditd/templates/time_change.rules.j2 @@ -0,0 +1,7 @@ +-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k time-change +-a always,exit -F arch=b32 -S clock_settime -k time-change +{% if ansible_architecture == 'x86_64' -%} +-a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change +-a always,exit -F arch=b64 -S clock_settime -k time-change +{% endif %} +-w /etc/localtime -p wa -k time-change \ No newline at end of file diff --git a/jcloud/playbooks/roles/aws/tasks/main.yml b/jcloud/playbooks/roles/aws/tasks/main.yml new file mode 100644 index 0000000..6a13ab0 --- /dev/null +++ b/jcloud/playbooks/roles/aws/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: Copy Authorized Keys from ubuntu to root User + copy: + src: /home/ubuntu/.ssh/authorized_keys + dest: /root/.ssh/authorized_keys + mode: 0600 + remote_src: yes + +- name: Remove Ubuntu User + user: + name: ubuntu + state: absent + remove: yes + force: yes + ignore_errors: yes diff --git a/jcloud/playbooks/roles/bench/tasks/main.yml b/jcloud/playbooks/roles/bench/tasks/main.yml new file mode 100644 index 0000000..876aef0 --- /dev/null +++ b/jcloud/playbooks/roles/bench/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Create Benches Directory + become: yes + become_user: jingrow + file: + dest: /home/jingrow/benches + state: directory + +- name: Install MariaDB Client + apt: + pkg: + - mariadb-client + state: present diff --git a/jcloud/playbooks/roles/blackbox_exporter/tasks/main.yml b/jcloud/playbooks/roles/blackbox_exporter/tasks/main.yml new file mode 100644 index 0000000..cf03173 --- /dev/null +++ b/jcloud/playbooks/roles/blackbox_exporter/tasks/main.yml @@ -0,0 +1,44 @@ +--- +- name: Create Blackbox Exporter Directory + file: + path: /opt/blackbox_exporter + state: directory + mode: 0755 + +- name: Download Blackbox Exporter Archive + unarchive: + src: https://github.com/prometheus/blackbox_exporter/releases/download/v0.25.0/blackbox_exporter-0.25.0.linux-amd64.tar.gz + dest: /tmp + remote_src: yes + +- name: Copy Blackbox Exporter Binary + copy: + src: /tmp/blackbox_exporter-0.25.0.linux-amd64/blackbox_exporter + dest: /opt/blackbox_exporter/blackbox_exporter + remote_src: yes + mode: 0755 + owner: root + group: root + +- name: Configure Blackbox Exporter Modules + template: + src: blackbox.yml + dest: /opt/blackbox_exporter/blackbox.yml + owner: root + group: root + mode: 0644 + +- name: Create Blackbox Exporter Systemd Service File + template: + src: blackbox_exporter.service + dest: /etc/systemd/system/blackbox_exporter.service + owner: root + group: root + mode: 0644 + +- name: Restart Blackbox Exporter Service + systemd: + daemon_reload: true + name: blackbox_exporter + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/blackbox_exporter/templates/blackbox.yml b/jcloud/playbooks/roles/blackbox_exporter/templates/blackbox.yml new file mode 100644 index 0000000..8d89778 --- /dev/null +++ b/jcloud/playbooks/roles/blackbox_exporter/templates/blackbox.yml @@ -0,0 +1,17 @@ +modules: + http_2xx: + prober: http + timeout: 5s + http: + valid_http_versions: ['HTTP/1.1', 'HTTP/2.0'] + valid_status_codes: [200] + method: GET + no_follow_redirects: false + fail_if_ssl: false + fail_if_not_ssl: true + tls_config: + insecure_skip_verify: false + preferred_ip_protocol: 'ip4' + ip_protocol_fallback: false + icmp: + prober: icmp diff --git a/jcloud/playbooks/roles/blackbox_exporter/templates/blackbox_exporter.service b/jcloud/playbooks/roles/blackbox_exporter/templates/blackbox_exporter.service new file mode 100644 index 0000000..e04c2ec --- /dev/null +++ b/jcloud/playbooks/roles/blackbox_exporter/templates/blackbox_exporter.service @@ -0,0 +1,19 @@ +[Unit] +Description=Prometheus Blackbox Exporter +After=network-online.target + +[Service] +Type=simple +ExecReload=/bin/kill -HUP $MAINPID +ExecStart=/opt/blackbox_exporter/blackbox_exporter \ + --web.external-url=https://{{ server }}/blackbox \ + --web.listen-address=127.0.0.1:9115 \ + --config.file=/opt/blackbox_exporter/blackbox.yml + +SyslogIdentifier=blackbox_exporter +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/cadvisor/tasks/main.yml b/jcloud/playbooks/roles/cadvisor/tasks/main.yml new file mode 100644 index 0000000..a51636d --- /dev/null +++ b/jcloud/playbooks/roles/cadvisor/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: Create cAdvisor Directory + file: + path: /opt/cadvisor + state: directory + mode: 0755 + +- name: Download cAdvisor Binary (amd64) + get_url: + url: https://github.com/google/cadvisor/releases/download/v0.50.0/cadvisor-v0.50.0-linux-amd64 + checksum: sha256:c63976400ca980b4b34bd074ef93c5321650050e45e6dc4d0cb1baf7dc772639 + dest: /opt/cadvisor/cadvisor + mode: 0755 + when: ansible_architecture == "x86_64" + +- name: Download cAdvisor Binary (arm64) + get_url: + url: https://github.com/google/cadvisor/releases/download/v0.50.0/cadvisor-v0.50.0-linux-arm64 + checksum: sha256:37c998d54dc0e8b1635d9be4c0ab15dfc3123aa41805ad0bd909c896d5a1a89b + dest: /opt/cadvisor/cadvisor + mode: 0755 + when: ansible_architecture == "arm64" + +- name: Create cAdvisor Systemd Service File + template: + src: cadvisor.service + dest: /etc/systemd/system/cadvisor.service + owner: root + group: root + mode: 0644 + +- name: Restart cAdvisor Service + systemd: + daemon_reload: true + name: cadvisor + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/cadvisor/templates/cadvisor.service b/jcloud/playbooks/roles/cadvisor/templates/cadvisor.service new file mode 100644 index 0000000..369488d --- /dev/null +++ b/jcloud/playbooks/roles/cadvisor/templates/cadvisor.service @@ -0,0 +1,15 @@ +[Unit] +Description=cAdvisor Server +After=network-online.target + +[Service] +Type=simple +ExecStart=/opt/cadvisor/cadvisor -listen_ip 127.0.0.1 --port 9338 -housekeeping_interval=10s + +SyslogIdentifier=cadvisor +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/clamav/tasks/main.yml b/jcloud/playbooks/roles/clamav/tasks/main.yml new file mode 100644 index 0000000..e2983ce --- /dev/null +++ b/jcloud/playbooks/roles/clamav/tasks/main.yml @@ -0,0 +1,28 @@ +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install ClamAV and related packages + apt: + pkg: + - clamav + - clamav-base + state: + latest + +- name: Configure freshclam + replace: + dest: /etc/clamav/freshclam.conf + regexp: '^{{ item.source_name }}' + replace: '{{ item.target_name }}' + with_items: + - { source_name: 'Example', target_name: '#Example' } + - { source_name: '#UpdateLogFile', target_name: 'UpdateLogFile' } + - { source_name: '#DatabaseDirectory', target_name: 'DatabaseDirectory' } + - { source_name: '#LogTime', target_name: 'LogTime' } + +- name: Disable automatic update of virus definitions + service: + name: clamav-freshclam + state: stopped + enabled: no diff --git a/jcloud/playbooks/roles/configure_ssh_logging/files/record_ssh_session.j2 b/jcloud/playbooks/roles/configure_ssh_logging/files/record_ssh_session.j2 new file mode 100644 index 0000000..9d5e5af --- /dev/null +++ b/jcloud/playbooks/roles/configure_ssh_logging/files/record_ssh_session.j2 @@ -0,0 +1,11 @@ +#Script to Record the User's Terminal Session + +if [ "x$session_record" = "x" ] +then +timestamp=`date "+%d_%m_%Y_%H_%M"` +output=/var/log/ssh_sessions/session.$USER.$$.$timestamp +session_record=started +export session_record +script -t -f -q 2>${output}.timing $output +exit +fi \ No newline at end of file diff --git a/jcloud/playbooks/roles/configure_ssh_logging/tasks/main.yml b/jcloud/playbooks/roles/configure_ssh_logging/tasks/main.yml new file mode 100644 index 0000000..c9229fb --- /dev/null +++ b/jcloud/playbooks/roles/configure_ssh_logging/tasks/main.yml @@ -0,0 +1,15 @@ +--- + +- name: Create log folder + file: + path: /var/log/ssh_sessions + owner: jingrow + mode: u=rwX,g=rX,o=rX + recurse: yes + state: directory + +- name: Configure session recording + copy: + src: record_ssh_session.j2 + dest: /etc/profile.d/record_ssh_session.sh + diff --git a/jcloud/playbooks/roles/convert/tasks/main.yml b/jcloud/playbooks/roles/convert/tasks/main.yml new file mode 100644 index 0000000..c4f6d98 --- /dev/null +++ b/jcloud/playbooks/roles/convert/tasks/main.yml @@ -0,0 +1,86 @@ +--- +- name: Install MySQLdb Python Package + apt: + pkg: + - python3-mysqldb + state: present + +- name: Add MariaDB Configuration File + template: + src: mariadb.cnf + dest: /etc/mysql/conf.d/settings.cnf + owner: root + group: root + mode: 0644 + +- name: Get All Users + mysql_query: + login_user: root + login_password: "{{ mariadb_root_password }}" + login_db: mysql + query: + - "select user from mysql.user where user != 'root' or user!= 'debian-sys-maint' and host = 'localhost';" + register: user + +- name: Create List of Users + set_fact: + users: "{{ users|default([]) + [item] | reject('search','root') | reject('search','mariadb.sys') | reject('search','mysql') | reject('search','debian-sys-maint') }}" + with_items: + - "{{ user.query_result[0]|map(attribute='User') }}" + +- name: Allow Users to Access from any Host + mysql_query: + login_user: root + login_password: "{{ mariadb_root_password }}" + query: + - RENAME USER `{{ item }}`@'localhost' TO `{{ item }}`@'%'; + ignore_errors: true + with_items: '{{users}}' + +- name: Allow Remote root Login + mysql_user: + login_user: root + login_password: "{{ mariadb_root_password }}" + check_implicit_admin: yes + name: root + host: "{{ item }}" + priv: "*.*:ALL,GRANT" + password: "{{ mariadb_root_password }}" + state: present + with_items: + - localhost + - 127.0.0.1 + - ::1 + - "{{ private_ip }}" + - "%" + +- name: Bind MariaDB to Private IP Address + lineinfile: + dest: /etc/mysql/conf.d/settings.cnf + regexp: "^bind-address" + line: "bind-address = {{ private_ip }}" + insertafter: '\[mysqld\]' + state: present + +- name: Restart MariaDB Service + service: + name: mysql + state: restarted + +- name: Add Jingrow User to MySQL Group + user: + name: jingrow + groups: + - mysql + append: true + +- name: Create Monitor User + mysql_user: + login_user: root + login_password: "{{ mariadb_root_password }}" + check_implicit_admin: yes + name: monitor + host: "%" + priv: "sys.*:SELECT" + password: "monitor" + state: present \ No newline at end of file diff --git a/jcloud/playbooks/roles/convert/templates/mariadb.cnf b/jcloud/playbooks/roles/convert/templates/mariadb.cnf new file mode 100644 index 0000000..e8ce58e --- /dev/null +++ b/jcloud/playbooks/roles/convert/templates/mariadb.cnf @@ -0,0 +1,67 @@ +[mysqld] + +# GENERAL # +default-storage-engine = InnoDB + +# MyISAM # +key-buffer-size = 32M +myisam-recover-options = FORCE + +# SAFETY # +max-connect-errors = 1000000 +innodb = FORCE + +# DATA STORAGE # +datadir = /var/lib/mysql/ + +# BINARY LOGGING # +log-bin = /var/lib/mysql/mysql-bin +log_bin_index = /var/lib/mysql/mysql-bin.index +expire-logs-days = 14 +sync-binlog = 1 + + +# CACHES AND LIMITS # +tmp-table-size = 32M +max-heap-table-size = 32M +query-cache-type = 0 +query-cache-size = 0 +max-connections = 200 +thread-cache-size = 50 +open-files-limit = 65535 +table-definition-cache = 4096 +table-open-cache = 10240 +tmp-disk-table-size = 5120M +max-statement-time = 3600 +extra_port = 3307 +extra_max_connections = 5 + +# INNODB # +innodb-flush-method = O_DIRECT +innodb-log-files-in-group = 2 +innodb-log-file-size = 512M +innodb-flush-log-at-trx-commit = 1 +innodb-file-per-table = 1 +innodb-buffer-pool-size = {{ (ansible_memtotal_mb * 0.6)|round|int }}M +innodb-file-format = barracuda +innodb-large-prefix = 1 +innodb-old-blocks-time = 5000 +collation-server = utf8mb4_unicode_ci +character-set-server = utf8mb4 +character-set-client-handshake = FALSE +max_allowed_packet = 512M + +# LOGGING # +log-error = /var/lib/mysql/mysql-error.log +log-queries-not-using-indexes = 0 +slow-query-log = 1 +slow-query-log-file = /var/lib/mysql/mysql-slow.log + +# Networking +bind-address = {{ private_ip }} + +[mysql] +default-character-set = utf8mb4 + +[mysqldump] +max_allowed_packet = 512M diff --git a/jcloud/playbooks/roles/copy/tasks/main.yml b/jcloud/playbooks/roles/copy/tasks/main.yml new file mode 100644 index 0000000..41218e4 --- /dev/null +++ b/jcloud/playbooks/roles/copy/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: Copy Files from Source to Destination + command: "cp --archive --recursive {{ source }}/. {{ destination }}/" + async: 7200 + poll: 5 + +- name: Write Buffered Data to Disk + command: sync diff --git a/jcloud/playbooks/roles/deadlock_logger/files/deadlock_logger.sql b/jcloud/playbooks/roles/deadlock_logger/files/deadlock_logger.sql new file mode 100644 index 0000000..cdfdb93 --- /dev/null +++ b/jcloud/playbooks/roles/deadlock_logger/files/deadlock_logger.sql @@ -0,0 +1,27 @@ +DROP DATABASE IF EXISTS `percona`; + +CREATE DATABASE `percona` DEFAULT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci; + +USE `percona`; + +DROP TABLE IF EXISTS `deadlock`; + +CREATE TABLE `deadlock` ( + `server` char(20) NOT NULL, + `ts` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `thread` int unsigned NOT NULL, + `txn_id` bigint unsigned NOT NULL, + `txn_time` smallint unsigned NOT NULL, + `user` char(16) NOT NULL, + `hostname` char(20) NOT NULL, + `ip` char(15) NOT NULL, + `db` char(64) NOT NULL, + `tbl` char(64) NOT NULL, + `idx` char(64) NOT NULL, + `lock_type` char(16) NOT NULL, + `lock_mode` char(1) NOT NULL, + `wait_hold` char(1) NOT NULL, + `victim` tinyint unsigned NOT NULL, + `query` text NOT NULL, + PRIMARY KEY (`server`, `ts`, `thread`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; diff --git a/jcloud/playbooks/roles/deadlock_logger/tasks/main.yml b/jcloud/playbooks/roles/deadlock_logger/tasks/main.yml new file mode 100644 index 0000000..a6b2473 --- /dev/null +++ b/jcloud/playbooks/roles/deadlock_logger/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: Install Percona Toolkit + apt: + name: percona-toolkit + state: present + +- name: Copy Deadlock Logger Table Definition + template: + src: files/deadlock_logger.sql + dest: /tmp/deadlock_logger.sql + +- name: Create Deadlock Logger Table + mysql_db: + login_host: "{{ private_ip }}" + name: percona + state: import + target: /tmp/deadlock_logger.sql + +- name: Create Deadlock Logger Systemd Service File + template: + src: deadlock_logger.service + dest: /etc/systemd/system/deadlock_logger.service + owner: root + group: root + mode: 0644 + +- name: Restart Deadlock Logger Service + systemd: + daemon_reload: true + name: deadlock_logger + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/deadlock_logger/templates/deadlock_logger.service b/jcloud/playbooks/roles/deadlock_logger/templates/deadlock_logger.service new file mode 100644 index 0000000..66a5ffe --- /dev/null +++ b/jcloud/playbooks/roles/deadlock_logger/templates/deadlock_logger.service @@ -0,0 +1,17 @@ +[Unit] +Description=Percona Deadlock Logger +After=network-online.target + +[Service] +Type=simple +ExecReload=/bin/kill -HUP $MAINPID +ExecStart=/usr/bin/pt-deadlock-logger --interval 30 \ + --dest D=percona,t=deadlock ,p={{ mariadb_root_password }} + +SyslogIdentifier=deadlock_logger +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/deadlock_logger_rename/tasks/main.yml b/jcloud/playbooks/roles/deadlock_logger_rename/tasks/main.yml new file mode 100644 index 0000000..ebc6bc8 --- /dev/null +++ b/jcloud/playbooks/roles/deadlock_logger_rename/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: Create Deadlock Logger Systemd Service File + template: + src: ../../deadlock_logger/templates/deadlock_logger.service + dest: /etc/systemd/system/deadlock_logger.service + owner: root + group: root + mode: 0644 + +- name: Restart Deadlock Logger Service + systemd: + daemon_reload: true + name: deadlock_logger + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/docker/tasks/main.yml b/jcloud/playbooks/roles/docker/tasks/main.yml new file mode 100644 index 0000000..b894186 --- /dev/null +++ b/jcloud/playbooks/roles/docker/tasks/main.yml @@ -0,0 +1,87 @@ +--- +- name: Remove Old Docker Packages + apt: + state: absent + force: yes + pkg: + - docker + - docker-engine + - docker.io + - containerd + - runc + +- name: Install Docker Dependencies + apt: + state: present + force: yes + pkg: + - apt-transport-https + - ca-certificates + - curl + - software-properties-common + +- name: Add Docker Repository Key + apt_key: + url: https://download.docker.com/linux/ubuntu/gpg + state: present + +# This seems to be breaking on some servers +- name: Add NGINX Repository Key + apt_key: + url: https://nginx.org/keys/nginx_signing.key + state: present + +- name: Add Docker Repository + apt_repository: + repo: deb https://download.docker.com/linux/ubuntu focal stable + state: present + update_cache: true + +- name: Install Docker + apt: + name: docker-ce + state: present + +- name: Add Jingrow User to docker Group + user: + name: jingrow + groups: + - docker + append: true + +- name: Setup Docker Metrics + template: + src: daemon.json + dest: /etc/docker/daemon.json + +- name: Create Docker SystemD drop-in directory + file: + dest: /etc/systemd/system/docker.service.d + state: directory + owner: root + group: root + mode: 0644 + recurse: true + +- name: Set Docker to depend on Mounts + template: + src: mounts.conf + dest: /etc/systemd/system/docker.service.d/mounts.conf + owner: root + group: root + mode: 0644 + when: docker_depends_on_mounts | default(false) | bool + +- name: Restart Docker Daemon + systemd: + daemon_reload: true + name: docker + state: restarted + +- name: Get Docker Info + command: docker info + +- name: Restart Agent processes + supervisorctl: + name: "agent:" + state: restarted diff --git a/jcloud/playbooks/roles/docker/templates/daemon.json b/jcloud/playbooks/roles/docker/templates/daemon.json new file mode 100644 index 0000000..5530b86 --- /dev/null +++ b/jcloud/playbooks/roles/docker/templates/daemon.json @@ -0,0 +1,20 @@ +{ + "experimental": true, + "icc": false, + "iptables": true, + "live-restore": true, + "metrics-addr": "127.0.0.1:9323", + "no-new-privileges": true, + "userland-proxy": false, + "max-concurrent-downloads": 5, + "registry-mirrors": [ + "https://mirror.ccs.tencentyun.com", + "https://docker.m.daocloud.io", + "https://atomhub.openatom.cn" + ], + "proxies": { + "http-proxy": "http://127.0.0.1:1080", + "https-proxy": "http://127.0.0.1:1080", + "no-proxy": "localhost,127.0.0.1" + } + } \ No newline at end of file diff --git a/jcloud/playbooks/roles/docker/templates/mounts.conf b/jcloud/playbooks/roles/docker/templates/mounts.conf new file mode 100644 index 0000000..9118132 --- /dev/null +++ b/jcloud/playbooks/roles/docker/templates/mounts.conf @@ -0,0 +1,14 @@ +[Unit] +# If Docker gets activated, then the mount will be activated as well. +# If the mount fails to activate, Docker will not be started. +# If the mount is explicitly stopped (or restarted), Docker will be stopped (or restarted). + +# BindsTo imposes a stronger condition than RequiresTo. +# If the mount is stopped, Docker will be stopped too. + +# When used in conjunction with After +# The mount strictly has to be in active state for Docker to also be in active state. +# Reference: https://www.freedesktop.org/software/systemd/man/latest/systemd.unit.html#BindsTo= + +After=home-jingrow-benches.mount +BindsTo=home-jingrow-benches.mount \ No newline at end of file diff --git a/jcloud/playbooks/roles/docker_eof_update/tasks/main.yml b/jcloud/playbooks/roles/docker_eof_update/tasks/main.yml new file mode 100644 index 0000000..12679eb --- /dev/null +++ b/jcloud/playbooks/roles/docker_eof_update/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Update docker's daemon json file + template: + src: daemon.json + dest: /etc/docker/daemon.json + +- name: Restart Docker Daemon + systemd: + name: docker + state: restarted diff --git a/jcloud/playbooks/roles/docker_eof_update/templates/daemon.json b/jcloud/playbooks/roles/docker_eof_update/templates/daemon.json new file mode 100644 index 0000000..5530b86 --- /dev/null +++ b/jcloud/playbooks/roles/docker_eof_update/templates/daemon.json @@ -0,0 +1,20 @@ +{ + "experimental": true, + "icc": false, + "iptables": true, + "live-restore": true, + "metrics-addr": "127.0.0.1:9323", + "no-new-privileges": true, + "userland-proxy": false, + "max-concurrent-downloads": 5, + "registry-mirrors": [ + "https://mirror.ccs.tencentyun.com", + "https://docker.m.daocloud.io", + "https://atomhub.openatom.cn" + ], + "proxies": { + "http-proxy": "http://127.0.0.1:1080", + "https-proxy": "http://127.0.0.1:1080", + "no-proxy": "localhost,127.0.0.1" + } + } \ No newline at end of file diff --git a/jcloud/playbooks/roles/docker_system_prune/tasks/main.yml b/jcloud/playbooks/roles/docker_system_prune/tasks/main.yml new file mode 100644 index 0000000..c3aeaca --- /dev/null +++ b/jcloud/playbooks/roles/docker_system_prune/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: Prune docker system for last 6h hours + command: 'docker system prune -f --filter "until=6h"' + async: 7200 + poll: 30 diff --git a/jcloud/playbooks/roles/earlyoom_memory_limits/tasks/main.yml b/jcloud/playbooks/roles/earlyoom_memory_limits/tasks/main.yml new file mode 100644 index 0000000..ff8c8b6 --- /dev/null +++ b/jcloud/playbooks/roles/earlyoom_memory_limits/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Install earlyoom + apt: + pkg: + - earlyoom + state: + present + register: result + until: result.failed == false + retries: 5 + delay: 120 + +- name: Enable earlyoom + systemd: + name: earlyoom + state: started + enabled: yes diff --git a/jcloud/playbooks/roles/elasticsearch/tasks/main.yml b/jcloud/playbooks/roles/elasticsearch/tasks/main.yml new file mode 100644 index 0000000..9b60988 --- /dev/null +++ b/jcloud/playbooks/roles/elasticsearch/tasks/main.yml @@ -0,0 +1,29 @@ +--- +- name: Install Elasticsearch + apt: + name: elasticsearch + state: present + +- name: Setup Elasticsearch + template: + src: elasticsearch.yml + dest: /etc/elasticsearch/elasticsearch.yml + +- name: Restart Elasticsearch Daemon + systemd: + name: elasticsearch + daemon_reload: true + enabled: yes + state: restarted + +- name: Setup Elasticsearch Authentication + become: yes + become_user: jingrow + command: "htpasswd -Bbc /home/jingrow/agent/nginx/kibana.htpasswd jingrow {{ kibana_password }}" + +- name: Setup NGINX for Elasticsearch + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup log + args: + chdir: /home/jingrow/agent diff --git a/jcloud/playbooks/roles/elasticsearch/templates/elasticsearch.yml b/jcloud/playbooks/roles/elasticsearch/templates/elasticsearch.yml new file mode 100644 index 0000000..233cf36 --- /dev/null +++ b/jcloud/playbooks/roles/elasticsearch/templates/elasticsearch.yml @@ -0,0 +1,96 @@ +# ======================== Elasticsearch Configuration ========================= +# +# NOTE: Elasticsearch comes with reasonable defaults for most settings. +# Before you set out to tweak and tune the configuration, make sure you +# understand what are you trying to accomplish and the consequences. +# +# The primary way of configuring a node is via this file. This template lists +# the most important settings you may want to configure for a production cluster. +# +# Please consult the documentation for further information on configuration options: +# https://www.elastic.co/guide/en/elasticsearch/reference/index.html +# +# ---------------------------------- Cluster ----------------------------------- +# +# Use a descriptive name for your cluster: +# +#cluster.name: my-application +# +# ------------------------------------ Node ------------------------------------ +# +# Use a descriptive name for the node: +# +#node.name: node-1 +# +# Add custom attributes to the node: +# +#node.attr.rack: r1 +# +# ----------------------------------- Paths ------------------------------------ +# +# Path to directory where to store the data (separate multiple locations by comma): +# +path.data: /var/lib/elasticsearch +# +# Path to log files: +# +path.logs: /var/log/elasticsearch +# +# ----------------------------------- Memory ----------------------------------- +# +# Lock the memory on startup: +# +#bootstrap.memory_lock: true +# +# Make sure that the heap size is set to about half the memory available +# on the system and that the owner of the process is allowed to use this +# limit. +# +# Elasticsearch performs poorly when the system is swapping the memory. +# +# ---------------------------------- Network ----------------------------------- +# +# By default Elasticsearch is only accessible on localhost. Set a different +# address here to expose this node on the network: +# +#network.host: 192.168.0.1 +# +# By default Elasticsearch listens for HTTP traffic on the first free port it +# finds starting at 9200. Set a specific HTTP port here: +# +#http.port: 9200 +# +# For more information, consult the network module documentation. +# +# --------------------------------- Discovery ---------------------------------- +# +# Pass an initial list of hosts to perform discovery when this node is started: +# The default list of hosts is ["127.0.0.1", "[::1]"] +# +#discovery.seed_hosts: ["host1", "host2"] +# +# Bootstrap the cluster using an initial set of master-eligible nodes: +# +#cluster.initial_master_nodes: ["node-1", "node-2"] +# +# For more information, consult the discovery and cluster formation module documentation. +# +# ---------------------------------- Various ----------------------------------- +# +# Require explicit names when deleting indices: +# +#action.destructive_requires_name: true +# +# ---------------------------------- Security ---------------------------------- +# +# *** WARNING *** +# +# Elasticsearch security features are not enabled by default. +# These features are free, but require configuration changes to enable them. +# This means that users don’t have to provide credentials and can get full access +# to the cluster. Network connections are also not encrypted. +# +# To protect your data, we strongly encourage you to enable the Elasticsearch security features. +# Refer to the following documentation for instructions. +# +# https://www.elastic.co/guide/en/elasticsearch/reference/7.16/configuring-stack-security.html diff --git a/jcloud/playbooks/roles/elasticsearch_exporter/tasks/main.yml b/jcloud/playbooks/roles/elasticsearch_exporter/tasks/main.yml new file mode 100644 index 0000000..c1b4865 --- /dev/null +++ b/jcloud/playbooks/roles/elasticsearch_exporter/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- name: Create Elasticsearch Exporter Directory + file: + path: /opt/elasticsearch_exporter + state: directory + mode: 0755 + +- name: Download Elasticsearch Exporter Archive + unarchive: + src: https://github.com/prometheus-community/elasticsearch_exporter/releases/download/v1.3.0/elasticsearch_exporter-1.3.0.linux-amd64.tar.gz + dest: /tmp + remote_src: yes + +- name: Copy Elasticsearch Exporter Binary + copy: + src: /tmp/elasticsearch_exporter-1.3.0.linux-amd64/elasticsearch_exporter + dest: /opt/elasticsearch_exporter/elasticsearch_exporter + remote_src: yes + mode: 0755 + owner: root + group: root + +- name: Create Elasticsearch Exporter Systemd Service File + template: + src: elasticsearch_exporter.service + dest: /etc/systemd/system/elasticsearch_exporter.service + owner: root + group: root + mode: 0644 + +- name: Restart Elasticsearch Exporter Service + systemd: + daemon_reload: true + name: elasticsearch_exporter + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/elasticsearch_exporter/templates/elasticsearch_exporter.service b/jcloud/playbooks/roles/elasticsearch_exporter/templates/elasticsearch_exporter.service new file mode 100644 index 0000000..a2b3c17 --- /dev/null +++ b/jcloud/playbooks/roles/elasticsearch_exporter/templates/elasticsearch_exporter.service @@ -0,0 +1,15 @@ +[Unit] +Description=Prometheus Elasticsearch Exporter +After=network-online.target + +[Service] +Type=simple +ExecStart=/opt/elasticsearch_exporter/elasticsearch_exporter --es.indices --es.shards --web.listen-address=127.0.0.1:9114 + +SyslogIdentifier=elasticsearch_exporter +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/essentials/tasks/main.yml b/jcloud/playbooks/roles/essentials/tasks/main.yml new file mode 100644 index 0000000..467b9ed --- /dev/null +++ b/jcloud/playbooks/roles/essentials/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install Essential Packages + apt: + state: present + force: yes + pkg: + - build-essential + - git + - htop + - libcrypto++-dev + - libssl-dev + - ntp + - python3-dev + - python3-pip + - virtualenv + - redis-server + - screen + - supervisor + - vim + - acl + - zlib1g-dev diff --git a/jcloud/playbooks/roles/extend_ec2_volume/tasks/main.yml b/jcloud/playbooks/roles/extend_ec2_volume/tasks/main.yml new file mode 100644 index 0000000..79be67b --- /dev/null +++ b/jcloud/playbooks/roles/extend_ec2_volume/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: Show Volumes + command: df -hT + +- name: Show Block Devices + command: lsblk + +- name: Show Partitions + command: 'lsblk --noheadings --output PTTYPE {{ device }}' + register: lsblk_output + +- name: Set Partition Status + set_fact: + partitioned_disk: '{{ lsblk_output.stdout.strip() != "" }}' + +- name: Extend Partition + command: 'growpart {{ device }} 1' + register: result + until: result.rc == 0 + retries: 10 + delay: 10 + when: partitioned_disk + +- name: Show Modified Block Devices + command: lsblk + +- name: Extend Partitioned Filesystem + command: 'resize2fs {{ device }}-part1' + when: partitioned_disk + +- name: Extend Un-partitioned Filesystem + command: 'resize2fs {{ device }}' + when: not partitioned_disk + +- name: Add Glass file back + command: fallocate -l 200M /root/glass + +- name: Show Modified Volumes + command: df -h + +- name: Restart MariaDB + service: + name: mysql + state: restarted + when: restart_mariadb | default(false) | bool diff --git a/jcloud/playbooks/roles/fail2ban/defaults/main.yml b/jcloud/playbooks/roles/fail2ban/defaults/main.yml new file mode 100644 index 0000000..3feadba --- /dev/null +++ b/jcloud/playbooks/roles/fail2ban/defaults/main.yml @@ -0,0 +1,2 @@ +--- +fail2ban_nginx_access_log: /var/log/nginx/access.log \ No newline at end of file diff --git a/jcloud/playbooks/roles/fail2ban/handlers/main.yml b/jcloud/playbooks/roles/fail2ban/handlers/main.yml new file mode 100644 index 0000000..d83f78d --- /dev/null +++ b/jcloud/playbooks/roles/fail2ban/handlers/main.yml @@ -0,0 +1,5 @@ +--- +- name: restart fail2ban + service: + name: fail2ban + state: restarted diff --git a/jcloud/playbooks/roles/fail2ban/tasks/main.yml b/jcloud/playbooks/roles/fail2ban/tasks/main.yml new file mode 100644 index 0000000..a5c2415 --- /dev/null +++ b/jcloud/playbooks/roles/fail2ban/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: Install fail2ban package + apt: + pkg: fail2ban + state: present + +- name: Enable fail2ban + service: + name: fail2ban + enabled: yes + +- name: Copy jail.conf to jail.local + copy: + src: /etc/fail2ban/jail.conf + dest: /etc/fail2ban/jail.local + owner: root + group: root + mode: '0644' + remote_src: yes + +- name: Setup filters + template: + src: '{{item}}-filter.conf.j2' + dest: '/etc/fail2ban/filter.d/{{item}}.conf' + with_items: + - nginx-proxy + notify: + - restart fail2ban + +- name: setup jails + template: + src: '{{item}}-jail.conf.j2' + dest: '/etc/fail2ban/jail.d/{{item}}.conf' + with_items: + - nginx-proxy + notify: + - restart fail2ban diff --git a/jcloud/playbooks/roles/fail2ban/templates/nginx-proxy-filter.conf.j2 b/jcloud/playbooks/roles/fail2ban/templates/nginx-proxy-filter.conf.j2 new file mode 100644 index 0000000..27f74cd --- /dev/null +++ b/jcloud/playbooks/roles/fail2ban/templates/nginx-proxy-filter.conf.j2 @@ -0,0 +1,10 @@ +# Block IPs trying to use server as proxy. +[Definition] +failregex = .*\" 400 + .*"[A-Z]* /(cms|muieblackcat|db|cpcommerce|cgi-bin|wp-login|joomla|awstatstotals|wp-content|wp-includes|pma|phpmyadmin|myadmin|mysql|mysqladmin|sqladmin|mypma|admin|xampp|mysqldb|pmadb|phpmyadmin1|phpmyadmin2).*" 4[\d][\d] + .*".*supports_implicit_sdk_logging.*" 4[\d][\d] + .*".*activities?advertiser_tracking_enabled.*" 4[\d][\d] + .*".*/picture?type=normal.*" 4[\d][\d] + .*".*/announce.php?info_hash=.*" 4[\d][\d] + +ignoreregex = \ No newline at end of file diff --git a/jcloud/playbooks/roles/fail2ban/templates/nginx-proxy-jail.conf.j2 b/jcloud/playbooks/roles/fail2ban/templates/nginx-proxy-jail.conf.j2 new file mode 100644 index 0000000..eb8dc9c --- /dev/null +++ b/jcloud/playbooks/roles/fail2ban/templates/nginx-proxy-jail.conf.j2 @@ -0,0 +1,10 @@ +## block hosts trying to abuse our server as a forward proxy +## findtime - 15mins & bantime - 1hr +[nginx-proxy] +enabled = true +filter = nginx-proxy +logpath = {{ fail2ban_nginx_access_log }} +action = iptables-multiport[name=NoNginxProxy, port="http,https"] +maxretry = 3 +bantime = 3600 +findtime = 900 \ No newline at end of file diff --git a/jcloud/playbooks/roles/failover/tasks/main.yml b/jcloud/playbooks/roles/failover/tasks/main.yml new file mode 100644 index 0000000..15f84d8 --- /dev/null +++ b/jcloud/playbooks/roles/failover/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: Stop MariaDB Secondary Thread + mysql_replication: + mode: stopslave + login_user: root + login_password: "{{ mariadb_root_password }}" + +- name: Reset MariaDB Secondary Details + mysql_replication: + mode: resetslaveall + login_user: root + login_password: "{{ mariadb_root_password }}" + +- name: Reset MariaDB Primary Details + mysql_replication: + mode: resetmaster + login_user: root + login_password: "{{ mariadb_root_password }}" + +- name: Check Primary Status + mysql_replication: + mode: getmaster + login_user: root + login_password: "{{ mariadb_root_password }}" + register: primary_status + failed_when: 'primary_status["File"] == "" or primary_status["Position"] == "" or primary_status["Is_Master"] == false' + +- name: Check Secondary Status + mysql_replication: + mode: getslave + login_user: root + login_password: "{{ mariadb_root_password }}" + register: secondary_status + failed_when: 'secondary_status["Is_Slave"] == true' diff --git a/jcloud/playbooks/roles/fetch_package_meta/tasks/main.yml b/jcloud/playbooks/roles/fetch_package_meta/tasks/main.yml new file mode 100644 index 0000000..84e3192 --- /dev/null +++ b/jcloud/playbooks/roles/fetch_package_meta/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Fetch package details + block: + - name: Fetch package meta + command: apt show {{ package }} + register: package_meta + + - name: Fetch package change log + command: apt changelog {{ package }} + register: change_log \ No newline at end of file diff --git a/jcloud/playbooks/roles/filebeat/filebeat.md b/jcloud/playbooks/roles/filebeat/filebeat.md new file mode 100644 index 0000000..680e066 --- /dev/null +++ b/jcloud/playbooks/roles/filebeat/filebeat.md @@ -0,0 +1,11 @@ +If filebeat version on the newly setup server doesn't match the one on Elasticsearch server then you'll have to create the indexes manually. + +1. Export the index payload from the newly setup server +```sh +filebeat export template > filebeat.template.json +``` + +2. Use the exported payload to create indexes (Replace `filebeat-version` with the appropriate version) +```sh +curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/filebeat- -d@filebeat.template.json +``` \ No newline at end of file diff --git a/jcloud/playbooks/roles/filebeat/tasks/main.yml b/jcloud/playbooks/roles/filebeat/tasks/main.yml new file mode 100644 index 0000000..2739f73 --- /dev/null +++ b/jcloud/playbooks/roles/filebeat/tasks/main.yml @@ -0,0 +1,68 @@ +--- +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install Filebeat Dependencies + apt: + state: present + force: yes + pkg: + - apt-transport-https + - ca-certificates + - curl + - software-properties-common + +- name: Add Elasticsearch Repository Key + apt_key: + url: https://artifacts.elastic.co/GPG-KEY-elasticsearch + state: present + +- name: Add Elasticsearch Repository + apt_repository: + repo: deb https://artifacts.elastic.co/packages/7.x/apt stable main + state: present + update_cache: true + +- name: Install Filebeat + apt: + name: filebeat + state: present + +- name: Setup Filebeat + template: + src: filebeat.yml + dest: /etc/filebeat/filebeat.yml + +- name: Enable Filebeat Modules + command: filebeat modules enable nginx mysql system + +- name: Setup Filebeat Modules + template: + src: 'modules.d/{{ item }}.yml' + dest: '/etc/filebeat/modules.d/{{ item }}.yml' + loop: + - 'mysql' + - 'system' + - 'nginx' + +- name: Create Filebeat Modules Directory + file: + dest: /etc/filebeat/inputs.d + state: directory + +- name: Setup Filebeat Inputs + template: + src: 'inputs.d/{{ item }}.yml' + dest: '/etc/filebeat/inputs.d/{{ item }}.yml' + loop: + - 'all' + - 'containers' + - 'monitor' + +- name: Restart Filebeat Daemon + systemd: + name: filebeat + daemon_reload: true + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/filebeat/templates/filebeat.yml b/jcloud/playbooks/roles/filebeat/templates/filebeat.yml new file mode 100644 index 0000000..a04f9a6 --- /dev/null +++ b/jcloud/playbooks/roles/filebeat/templates/filebeat.yml @@ -0,0 +1,51 @@ +######################## Filebeat Configuration ############################ + +# ========================== Filebeat global options =========================== + +filebeat.config: + inputs: + enabled: true + path: ${path.config}/inputs.d/*.yml + reload: + enabled: true + period: 10s + modules: + enabled: true + path: ${path.config}/modules.d/*.yml + reload: + enabled: true + period: 10s + +# ================================== General =================================== + +name: "{{ server }}" + +# ================================== Outputs =================================== + +output.elasticsearch: + enabled: true + hosts: ["https://{{ log_server }}:443"] + + compression_level: 0 + protocol: "https" + + username: "jingrow" + password: "{{ kibana_password }}" + + path: "/elasticsearch" + + ssl.enabled: true + ssl.verification_mode: strict + ssl.supported_protocols: [TLSv1.3] + + {% if server_type is defined and server_type == "Database Server" %} + + bulk_max_size: 10 + + {% endif %} + +# ================================== Logging =================================== + +logging.to_files: true +logging.files: + path: /var/log/filebeat diff --git a/jcloud/playbooks/roles/filebeat/templates/inputs.d/all.yml b/jcloud/playbooks/roles/filebeat/templates/inputs.d/all.yml new file mode 100644 index 0000000..f16e0b2 --- /dev/null +++ b/jcloud/playbooks/roles/filebeat/templates/inputs.d/all.yml @@ -0,0 +1,4 @@ +- type: log + enabled: true + paths: + - /var/log/*.log diff --git a/jcloud/playbooks/roles/filebeat/templates/inputs.d/containers.yml b/jcloud/playbooks/roles/filebeat/templates/inputs.d/containers.yml new file mode 100644 index 0000000..e66af0e --- /dev/null +++ b/jcloud/playbooks/roles/filebeat/templates/inputs.d/containers.yml @@ -0,0 +1,5 @@ +- type: container + enabled: false + paths: + - /var/lib/docker/containers/*/*.log + stream: all diff --git a/jcloud/playbooks/roles/filebeat/templates/inputs.d/monitor.yml b/jcloud/playbooks/roles/filebeat/templates/inputs.d/monitor.yml new file mode 100644 index 0000000..5947349 --- /dev/null +++ b/jcloud/playbooks/roles/filebeat/templates/inputs.d/monitor.yml @@ -0,0 +1,7 @@ +- type: log + enabled: true + + pipeline: monitor + + paths: + - /home/jingrow/benches/*/logs/monitor.json.log diff --git a/jcloud/playbooks/roles/filebeat/templates/modules.d/mysql.yml b/jcloud/playbooks/roles/filebeat/templates/modules.d/mysql.yml new file mode 100644 index 0000000..1dab262 --- /dev/null +++ b/jcloud/playbooks/roles/filebeat/templates/modules.d/mysql.yml @@ -0,0 +1,10 @@ +- module: mysql + error: + enabled: true + var.paths: + - /var/lib/mysql/mysql-error.log + + slowlog: + enabled: true + var.paths: + - /var/lib/mysql/mysql-slow.log diff --git a/jcloud/playbooks/roles/filebeat/templates/modules.d/nginx.yml b/jcloud/playbooks/roles/filebeat/templates/modules.d/nginx.yml new file mode 100644 index 0000000..46d6cd4 --- /dev/null +++ b/jcloud/playbooks/roles/filebeat/templates/modules.d/nginx.yml @@ -0,0 +1,11 @@ +- module: nginx + access: + enabled: true + input: + pipeline: nginx + + error: + enabled: true + + ingress_controller: + enabled: false diff --git a/jcloud/playbooks/roles/filebeat/templates/modules.d/system.yml b/jcloud/playbooks/roles/filebeat/templates/modules.d/system.yml new file mode 100644 index 0000000..c8f3eee --- /dev/null +++ b/jcloud/playbooks/roles/filebeat/templates/modules.d/system.yml @@ -0,0 +1,6 @@ +- module: system + syslog: + enabled: true + + auth: + enabled: true diff --git a/jcloud/playbooks/roles/filebeat_elasticsearch/files/monitor.json b/jcloud/playbooks/roles/filebeat_elasticsearch/files/monitor.json new file mode 100644 index 0000000..2b4da12 --- /dev/null +++ b/jcloud/playbooks/roles/filebeat_elasticsearch/files/monitor.json @@ -0,0 +1,21 @@ +{ + "processors": [ + { + "json": { + "field": "message", + "target_field": "json", + "ignore_failure": true + } + }, + { + "date": { + "field": "json.timestamp", + "formats": [ + "yyyy-MM-dd HH:mm:ss.SSSSSS" + ], + "target_field": "@timestamp", + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/jcloud/playbooks/roles/filebeat_elasticsearch/files/nginx.json b/jcloud/playbooks/roles/filebeat_elasticsearch/files/nginx.json new file mode 100644 index 0000000..724fecb --- /dev/null +++ b/jcloud/playbooks/roles/filebeat_elasticsearch/files/nginx.json @@ -0,0 +1,244 @@ +{ + "description": "Pipeline for parsing Nginx access logs. Requires the geoip and user_agent plugins.", + "processors": [ + { + "set": { + "field": "event.ingested", + "value": "{{_ingest.timestamp}}" + } + }, + { + "rename": { + "field": "message", + "target_field": "event.original" + } + }, + { + "grok": { + "ignore_missing": true, + "field": "event.original", + "patterns": [ + "(%{NGINX_HOST} )?\"?(?:%{NGINX_ADDRESS_LIST:nginx.access.remote_ip_list}|%{NOTSPACE:source.address}) - (-|%{DATA:user.name}) \\[%{HTTPDATE:nginx.access.time}\\] \"%{DATA:nginx.access.info}\" %{NUMBER:http.response.status_code:long} %{NUMBER:http.response.body.bytes:long} \"(-|%{DATA:http.request.referrer})\" \"(-|%{DATA:user_agent.original})\" \"(-|%{DATA:http.request.forwarded_for})\" \"(-|%{DATA:http.request.site})\" %{NUMBER:http.request.duration}" + ], + "pattern_definitions": { + "NGINX_HOST": "(?:%{IP:destination.ip}|%{NGINX_NOTSEPARATOR:destination.domain})(:%{NUMBER:destination.port})?", + "NGINX_NOTSEPARATOR": "[^\t ,:]+", + "NGINX_ADDRESS_LIST": "(?:%{IP}|%{WORD})(\"?,?\\s*(?:%{IP}|%{WORD}))*" + } + } + }, + { + "grok": { + "field": "nginx.access.info", + "patterns": [ + "%{WORD:http.request.method} %{DATA:_tmp.url_orig} HTTP/%{NUMBER:http.version}", + "" + ], + "ignore_missing": true + } + }, + { + "uri_parts": { + "field": "_tmp.url_orig", + "ignore_failure": true + } + }, + { + "set": { + "if": "ctx.url?.domain == null && ctx.destination?.domain != null", + "field": "url.domain", + "value": "{{destination.domain}}" + } + }, + { + "remove": { + "ignore_missing": true, + "field": [ + "nginx.access.info", + "_tmp.url_orig" + ] + } + }, + { + "split": { + "field": "nginx.access.remote_ip_list", + "separator": "\"?,?\\s+", + "ignore_missing": true + } + }, + { + "split": { + "ignore_missing": true, + "field": "nginx.access.origin", + "separator": "\"?,?\\s+" + } + }, + { + "set": { + "field": "source.address", + "if": "ctx.source?.address == null", + "value": "" + } + }, + { + "script": { + "lang": "painless", + "source": "boolean isPrivate(def dot, def ip) {\n try {\n StringTokenizer tok = new StringTokenizer(ip, dot);\n int firstByte = Integer.parseInt(tok.nextToken());\n int secondByte = Integer.parseInt(tok.nextToken());\n if (firstByte == 10) {\n return true;\n }\n if (firstByte == 192 && secondByte == 168) {\n return true;\n }\n if (firstByte == 172 && secondByte >= 16 && secondByte <= 31) {\n return true;\n }\n if (firstByte == 127) {\n return true;\n }\n return false;\n }\n catch (Exception e) {\n return false;\n }\n} try {\n ctx.source.address = null;\n if (ctx.nginx.access.remote_ip_list == null) {\n return;\n }\n def found = false;\n for (def item : ctx.nginx.access.remote_ip_list) {\n if (!isPrivate(params.dot, item)) {\n ctx.source.address = item;\n found = true;\n break;\n }\n }\n if (!found) {\n ctx.source.address = ctx.nginx.access.remote_ip_list[0];\n }\n} catch (Exception e) {\n ctx.source.address = null;\n}", + "params": { + "dot": "." + }, + "if": "ctx.nginx?.access?.remote_ip_list != null && ctx.nginx.access.remote_ip_list.length > 0" + } + }, + { + "remove": { + "field": "source.address", + "if": "ctx.source.address == null" + } + }, + { + "grok": { + "ignore_failure": true, + "field": "source.address", + "patterns": [ + "^%{IP:source.ip}$" + ] + } + }, + { + "rename": { + "field": "@timestamp", + "target_field": "event.created" + } + }, + { + "date": { + "target_field": "@timestamp", + "formats": [ + "dd/MMM/yyyy:H:m:s Z" + ], + "field": "nginx.access.time", + "on_failure": [ + { + "append": { + "field": "error.message", + "value": "{{ _ingest.on_failure_message }}" + } + } + ] + } + }, + { + "remove": { + "field": "nginx.access.time" + } + }, + { + "user_agent": { + "field": "user_agent.original", + "ignore_missing": true + } + }, + { + "geoip": { + "field": "source.ip", + "target_field": "source.geo", + "ignore_missing": true + } + }, + { + "geoip": { + "properties": [ + "asn", + "organization_name" + ], + "ignore_missing": true, + "database_file": "GeoLite2-ASN.mmdb", + "field": "source.ip", + "target_field": "source.as" + } + }, + { + "rename": { + "ignore_missing": true, + "field": "source.as.asn", + "target_field": "source.as.number" + } + }, + { + "rename": { + "field": "source.as.organization_name", + "target_field": "source.as.organization.name", + "ignore_missing": true + } + }, + { + "set": { + "value": "event", + "field": "event.kind" + } + }, + { + "append": { + "field": "event.category", + "value": "web" + } + }, + { + "append": { + "field": "event.type", + "value": "access" + } + }, + { + "set": { + "field": "event.outcome", + "value": "success", + "if": "ctx?.http?.response?.status_code != null && ctx.http.response.status_code < 400" + } + }, + { + "set": { + "field": "event.outcome", + "value": "failure", + "if": "ctx?.http?.response?.status_code != null && ctx.http.response.status_code >= 400" + } + }, + { + "append": { + "value": "{{source.ip}}", + "if": "ctx?.source?.ip != null", + "field": "related.ip" + } + }, + { + "append": { + "value": "{{destination.ip}}", + "if": "ctx?.destination?.ip != null", + "field": "related.ip" + } + }, + { + "append": { + "value": "{{user.name}}", + "if": "ctx?.user?.name != null", + "field": "related.user" + } + }, + { + "script": { + "lang": "painless", + "description": "This script processor iterates over the whole document to remove fields with null values.", + "source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null);\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n" + } + } + ], + "on_failure": [ + { + "set": { + "field": "error.message", + "value": "{{ _ingest.on_failure_message }}" + } + } + ] + } \ No newline at end of file diff --git a/jcloud/playbooks/roles/filebeat_elasticsearch/tasks/main.yml b/jcloud/playbooks/roles/filebeat_elasticsearch/tasks/main.yml new file mode 100644 index 0000000..3c9e17e --- /dev/null +++ b/jcloud/playbooks/roles/filebeat_elasticsearch/tasks/main.yml @@ -0,0 +1,44 @@ +- name: Wait For Elasticsearch + wait_for: + port: 9200 + +- name: Wait For Kibana + wait_for: + port: 5601 + +- name: Wait For Kibana To Start + command: "curl --silent http://localhost:5601/api/status" + register: result + until: result.stdout.find("success") != -1 + retries: 60 + delay: 1 + changed_when: false + +- name: Setup Filebeat Indexes + command: filebeat setup -e -E setup.ilm.overwrite=true -E 'output.elasticsearch.hosts=["localhost:9200"]' -E output.elasticsearch.path="/" -E output.elasticsearch.protocol=http --index-management + +- name: Setup Filebeat Ingest Pipelines + command: filebeat setup -e --pipelines --modules nginx,system,mysql + +- name: Setup Filebeat Dashboards + command: filebeat setup -e --dashboards + +- name: Setup Monitor Ingest Pipeline + uri: + url: http://localhost:9200/_ingest/pipeline/monitor + method: PUT + body: "{{ lookup('file', 'monitor.json') }}" + body_format: json + +- name: Setup NGINX Ingest Pipeline + uri: + url: http://localhost:9200/_ingest/pipeline/nginx + method: PUT + body: "{{ lookup('file', 'nginx.json') }}" + body_format: json + +- name: Disable Filebeat Service + systemd: + name: filebeat + enabled: no + state: stopped diff --git a/jcloud/playbooks/roles/filebeat_rename/tasks/main.yml b/jcloud/playbooks/roles/filebeat_rename/tasks/main.yml new file mode 100644 index 0000000..8795f02 --- /dev/null +++ b/jcloud/playbooks/roles/filebeat_rename/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Setup Filebeat + template: + src: ../../filebeat/templates/filebeat.yml + dest: /etc/filebeat/filebeat.yml + +- name: Restart Filebeat Daemon + systemd: + name: filebeat + daemon_reload: true + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/frankfurter/README.md b/jcloud/playbooks/roles/frankfurter/README.md new file mode 100644 index 0000000..562f80f --- /dev/null +++ b/jcloud/playbooks/roles/frankfurter/README.md @@ -0,0 +1,103 @@ +# Guide to Setup Frankfurter +### Install Docker +#### Setup Docker Repository +```sh +# Add Docker's official GPG key: +sudo apt-get update +sudo apt-get install ca-certificates curl +sudo install -m 0755 -d /etc/apt/keyrings +sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc +sudo chmod a+r /etc/apt/keyrings/docker.asc + +# Add the repository to Apt sources: +echo \ + "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \ + $(. /etc/os-release && echo "$VERSION_CODENAME") stable" | \ + sudo tee /etc/apt/sources.list.d/docker.list > /dev/null +sudo apt-get update +``` + +#### Install Docker + +```sh +sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +``` +Reference: https://docs.docker.com/engine/install/ubuntu/ + +#### Install Frankfurter + +Copy `docker-compose.yml` from `/templates/`. + +The compose configuration (`docker-compose.yml`) is prepared to match the `docker run` command from the docs + +```sh +docker run -d -p 8080:8080 -e "DATABASE_URL=" --name frankfurter hakanensari/frankfurter +``` +#### Start Frankfurter + +```sh +docker compose up -d +``` + +### Setup TLS +#### Setup NGINX + +Place a minimal server block in `/etc/nginx/conf.d` + +```nginx +server { + server_name frankfurter.jingrow.cloud; + location / { + proxy_pass http://127.0.0.1:8080; + } +} +``` +#### Remove Default NGINX site +```sh +rm /etc/nginx/sites-enabled/default +rm /etc/nginx/sites-available/default +rm -rf /usr/share/nginx/html/ +``` + +#### Setup Certbot + +```sh +sudo python3 -m venv /opt/certbot/ +sudo /opt/certbot/bin/pip install certbot +ln -s /opt/certbot/bin/certbot /usr/bin/certbot +certbot --nginx +``` + + +#### Setup Certbot renewal cronjob +```sh +echo "0 0,12 * * * root /opt/certbot/bin/python -c 'import random; import time; time.sleep(random.random() * 3600)' && sudo certbot renew -q" | sudo tee -a /etc/crontab > /dev/null +``` + +Reference: https://certbot.eff.org/instructions?ws=nginx&os=ubuntufocal + +#### Add frankfurter to blocked subdomains on jcloud.jingrow.com +https://jingrow.com/app/blocked-domain/frankfurter + +### Done +```sh +curl -s https://frankfurter.jingrow.cloud/latest | jq +``` + +```json +{ + "amount": 1.0, + "base": "EUR", + "date": "2024-10-04", + "rates": { + "AUD": 1.6121, + "CHF": 0.9394, + "GBP": 0.83735, + "CNY": 92.61, + "JPY": 161.69, + "SGD": 1.4314, + "USD": 1.1029, + "ZAR": 19.2809 + } +} +``` \ No newline at end of file diff --git a/jcloud/playbooks/roles/frankfurter/tasks/main.yml b/jcloud/playbooks/roles/frankfurter/tasks/main.yml new file mode 100644 index 0000000..f9d0a00 --- /dev/null +++ b/jcloud/playbooks/roles/frankfurter/tasks/main.yml @@ -0,0 +1,38 @@ +--- +- name: Create Frankfurter Directory + become: yes + become_user: jingrow + file: + dest: /home/jingrow/frankfurter + state: directory + +- name: Create Frankfurter Compose File + become: yes + become_user: jingrow + template: + src: docker-compose.yml + dest: /home/jingrow/frankfurter/docker-compose.yml + +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install Docker Compose + apt: + name: docker-compose-plugin + state: latest + +- name: Start Frankfurter + become: yes + become_user: jingrow + command: docker-compose up -d + args: + chdir: /home/jingrow/frankfurter + +- name: Setup NGINX Proxy for Frankfurter + become: yes + become_user: jingrow + # This isn't implemented yet + command: /home/jingrow/agent/env/bin/agent setup frankfurter + args: + chdir: /home/jingrow/agent diff --git a/jcloud/playbooks/roles/frankfurter/templates/docker-compose.yml b/jcloud/playbooks/roles/frankfurter/templates/docker-compose.yml new file mode 100644 index 0000000..c1e9ee0 --- /dev/null +++ b/jcloud/playbooks/roles/frankfurter/templates/docker-compose.yml @@ -0,0 +1,27 @@ +services: + postgres: + image: postgres:16-alpine + restart: always + volumes: + - postgres:/var/lib/postgresql/data + environment: + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=postgres + - POSTGRES_DB=postgres + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + start_period: 1m + + frankfurter: + image: hakanensari/frankfurter + restart: always + ports: + - 8080:8080 + depends_on: + postgres: + condition: service_healthy + environment: + - DATABASE_URL=postgres://postgres:postgres@postgres/postgres + +volumes: + postgres: diff --git a/jcloud/playbooks/roles/get_apps/tasks/main.yml b/jcloud/playbooks/roles/get_apps/tasks/main.yml new file mode 100644 index 0000000..49312a1 --- /dev/null +++ b/jcloud/playbooks/roles/get_apps/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: Get Bench App Version + become: yes + become_user: jingrow + command: bench version --format json + ignore_errors: True + register: versions + args: + chdir: "{{ bench_path }}" + +- name: Get Apps from Current Bench + command: python3 -c 'import json;data=json.loads({{ versions.stdout | to_json }});print([x["app"] for x in data])' + register: apps + ignore_errors: True + +- name: Get Versions from Current Bench + command: python3 -c 'import json;data=json.loads({{ versions.stdout | to_json }});print(data)' + ignore_errors: False + +- name: Get Remotes of Current Apps + command: git ls-remote --get-url + register: remote + args: + chdir: "{{ bench_path }}/apps/{{ item }}" + with_items: + - "{{ apps.stdout }}" + +- name: Generate App Branches + shell: git branch --show-current + register: branches + args: + chdir: "{{ bench_path }}/apps/{{ item }}" + with_items: + - "{{ apps.stdout }}" + +- name: Generate Apps List + set_fact: + rg_apps: "{{ rg_apps | default([])+ [{ 'app' : item.0, 'remote' : item.1, 'branch': item.2 }] }}" + with_together: + - "{{ apps.stdout }}" + - "{{ remote.results | map(attribute='stdout_lines') | list }}" + - "{{ branches.results | map(attribute='stdout_lines') | list }}" + +- name: Get Apps for Release Group + command: python3 -c 'import json;data=json.loads(r"""{{ rg_apps | to_json }}""");print(data)' \ No newline at end of file diff --git a/jcloud/playbooks/roles/get_files/tasks/main.yml b/jcloud/playbooks/roles/get_files/tasks/main.yml new file mode 100644 index 0000000..dad8d4a --- /dev/null +++ b/jcloud/playbooks/roles/get_files/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Move files from Existing to new Site + become: yes + become_user: jingrow + command: cp -R {{ bench_path }}/sites/{{ item.0 }}/public {{ bench_path }}/sites/{{ item.0 }}/private /home/jingrow/benches/{{ item.1 }}/sites/{{ item.2 }} + with_together: + - "{{ ex_sites }}" + - "{{ new_benches }}" + - "{{ new_sites }}" + ignore_errors: yes + async: 300 + poll: 5 \ No newline at end of file diff --git a/jcloud/playbooks/roles/get_sites/tasks/main.yml b/jcloud/playbooks/roles/get_sites/tasks/main.yml new file mode 100644 index 0000000..2d5f742 --- /dev/null +++ b/jcloud/playbooks/roles/get_sites/tasks/main.yml @@ -0,0 +1,34 @@ +--- +- name: Get Sites from Current Bench + become: yes + become_user: jingrow + command: bench --site all list-apps --format json + ignore_errors: True + args: + chdir: "{{ bench_path }}" + +- name: Get Site list from Bench + command: bench execute jingrow.utils.get_sites + register: sites + ignore_errors: True + args: + chdir: "{{ bench_path }}" + +- name: Load Site Configs from Existing Sites + command: cat {{item}}/site_config.json + ignore_errors: True + loop: "{{ sites.stdout }}" + register: confs + args: + chdir: "{{ bench_path }}/sites" + +- name: Load Configs + set_fact: + configs: "{{ configs | default([])+ [{ 'site' : item.0, 'config' : item.1 }] }}" + ignore_errors: True + with_together: + - "{{ sites.stdout }}" + - "{{ confs.results | map(attribute='stdout') }}" + +- name: Get Site Configs from Existing Sites + command: python3 -c 'import json;data=json.loads(r"""{{ configs | to_json }}""");print(data)' \ No newline at end of file diff --git a/jcloud/playbooks/roles/gpg_config/tasks/main.yml b/jcloud/playbooks/roles/gpg_config/tasks/main.yml new file mode 100644 index 0000000..6993320 --- /dev/null +++ b/jcloud/playbooks/roles/gpg_config/tasks/main.yml @@ -0,0 +1,16 @@ +--- +- name: Ensure GPG keys are configured | Ubuntu + command: gpg --quiet --with-fingerprint /etc/apt/trusted.gpg + +- name: Ensure package manager repositories are configured + block: + - name: "List package manager repositories" + shell: apt list + changed_when: no + register: repolist + + - name: "Display package manager repositories list" + debug: + msg: + - "Please check against site policy repos listed below match expected:" + - "{{ repolist.stdout_lines }}" diff --git a/jcloud/playbooks/roles/grafana/tasks/main.yml b/jcloud/playbooks/roles/grafana/tasks/main.yml new file mode 100644 index 0000000..a20a9ac --- /dev/null +++ b/jcloud/playbooks/roles/grafana/tasks/main.yml @@ -0,0 +1,80 @@ +--- +- name: Create Grafana Directories + become: yes + become_user: jingrow + file: + path: '{{ item }}' + state: directory + mode: 0755 + with_items: + - /home/jingrow/grafana + - /home/jingrow/grafana/provisioning + - /home/jingrow/grafana/provisioning/datasources + - /home/jingrow/grafana/provisioning/dashboards + - /home/jingrow/grafana/logs + - /home/jingrow/grafana/data + - /home/jingrow/grafana/data/dashboards + - /home/jingrow/grafana/data/plugins + +- name: Download Grafana Archive + become: yes + become_user: jingrow + unarchive: + src: https://dl.grafana.com/oss/release/grafana-10.3.1.linux-amd64.tar.gz + dest: /tmp + remote_src: yes + +- name: Copy Grafana Directories + become: yes + become_user: jingrow + copy: + src: '/tmp/grafana-v10.3.1/{{ item }}/' + dest: '/home/jingrow/grafana/{{ item }}/' + mode: 0755 + remote_src: yes + with_items: + - bin + - conf + - plugins-bundled + - public + +- name: Provision Prometheus Datasource + become: yes + become_user: jingrow + template: + src: prometheus.yml + dest: /home/jingrow/grafana/provisioning/datasources/prometheus.yml + mode: 0640 + +- name: Configure Grafana + become: yes + become_user: jingrow + template: + src: grafana.ini + dest: /home/jingrow/grafana/grafana.ini + mode: 0640 + +- name: Create Grafana Systemd Service File + template: + src: grafana.service + dest: /etc/systemd/system/grafana.service + owner: root + group: root + mode: 0644 + +- name: Restart Grafana Service + systemd: + daemon_reload: true + name: grafana + enabled: yes + state: restarted + +- name: Setup Grafana Authentication + become: yes + become_user: jingrow + command: 'htpasswd -Bbc /home/jingrow/agent/nginx/grafana.htpasswd jingrow {{ grafana_password }}' + +- name: Setup Grafana HTTP Authentication + become: yes + become_user: jingrow + command: 'htpasswd -Bbc /home/jingrow/agent/nginx/grafana-ui.htpasswd admin {{ grafana_password }}' diff --git a/jcloud/playbooks/roles/grafana/templates/grafana.ini b/jcloud/playbooks/roles/grafana/templates/grafana.ini new file mode 100644 index 0000000..38fc55a --- /dev/null +++ b/jcloud/playbooks/roles/grafana/templates/grafana.ini @@ -0,0 +1,41 @@ +app_mode = production +instance_name = {{ server }} + +# Directories +[paths] +data = /home/jingrow/grafana/data +logs = /home/jingrow/grafana/logs +plugins = /home/jingrow/grafana/plugins +datasources = /home/jingrow/grafana/datasources + +# HTTP options +[server] +protocol = http +http_addr = 127.0.0.1 +http_port = 3000 +domain = {{ server }} +root_url = https://{{ server }}/grafana +serve_from_sub_path = true + +# Security +[security] +admin_user = admin +admin_password = {{ grafana_password }} +allow_sign_up = false +default_theme = dark + +# Dashboards +[dashboards] +versions_to_keep = 20 + +[dashboards.json] +enabled = true +path = /home/jingrow/grafana/dashboards + +# Logging +[log] +mode = 'console, file' +level = 'info' + +[metrics] +enabled = true diff --git a/jcloud/playbooks/roles/grafana/templates/grafana.service b/jcloud/playbooks/roles/grafana/templates/grafana.service new file mode 100644 index 0000000..1ee986b --- /dev/null +++ b/jcloud/playbooks/roles/grafana/templates/grafana.service @@ -0,0 +1,26 @@ +[Unit] +Description=Grafana +After=network-online.target + +[Service] +User=jingrow +Group=jingrow +Type=simple + +PIDFile=/var/run/grafana.pid +ExecReload=/bin/kill -HUP $MAINPID + +WorkingDirectory=/home/jingrow/grafana +ExecStart=/home/jingrow/grafana/bin/grafana-server \ + --config=/home/jingrow/grafana/grafana.ini \ + cfg:default.paths.logs=/home/jingrow/grafana/logs \ + cfg:default.paths.data=/home/jingrow/grafana/data \ + cfg:default.paths.plugins=/home/jingrow/grafana/plugins \ + cfg:default.paths.provisioning=/home/jingrow/grafana/provisioning + +SyslogIdentifier=grafana +Restart=always + + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/grafana/templates/prometheus.yml b/jcloud/playbooks/roles/grafana/templates/prometheus.yml new file mode 100644 index 0000000..8ab10c5 --- /dev/null +++ b/jcloud/playbooks/roles/grafana/templates/prometheus.yml @@ -0,0 +1,19 @@ +apiVersion: 1 + +datasources: + - name: Prometheus + type: prometheus + access: proxy + url: http://127.0.0.1:9090/prometheus + isDefault: true + jsonData: + httpMethod: 'POST' + manageAlerts: true + prometheusType: Prometheus + prometheusVersion: 2.49.1 + cacheLevel: 'High' + disableRecordingRules: false + incrementalQueryOverlapWindow: 10m + exemplarTraceIdDestinations: + editable: true + version: 1 diff --git a/jcloud/playbooks/roles/keys/tasks/main.yml b/jcloud/playbooks/roles/keys/tasks/main.yml new file mode 100644 index 0000000..d81b398 --- /dev/null +++ b/jcloud/playbooks/roles/keys/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Create SSH Keys for Jingrow User + user: + name: jingrow + generate_ssh_key: yes + +- name: Create SSH Keys for root User + user: + name: root + generate_ssh_key: yes diff --git a/jcloud/playbooks/roles/kibana/tasks/main.yml b/jcloud/playbooks/roles/kibana/tasks/main.yml new file mode 100644 index 0000000..af5402e --- /dev/null +++ b/jcloud/playbooks/roles/kibana/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Install Kibana + apt: + name: kibana + state: present + +- name: Setup Kibana + template: + src: kibana.yml + dest: /etc/kibana/kibana.yml + +- name: Restart Kibana Daemon + systemd: + name: kibana + daemon_reload: true + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/kibana/templates/kibana.yml b/jcloud/playbooks/roles/kibana/templates/kibana.yml new file mode 100644 index 0000000..b8434a2 --- /dev/null +++ b/jcloud/playbooks/roles/kibana/templates/kibana.yml @@ -0,0 +1,2 @@ +server.host: "localhost" +server.basePath: "/kibana" diff --git a/jcloud/playbooks/roles/malware_scan/tasks/main.yml b/jcloud/playbooks/roles/malware_scan/tasks/main.yml new file mode 100644 index 0000000..d28e6f6 --- /dev/null +++ b/jcloud/playbooks/roles/malware_scan/tasks/main.yml @@ -0,0 +1,28 @@ +- name: Update APT Cache + apt: + update_cache: yes + ignore_errors: yes + +- name: Update ClamAV and related packages so updating of virus definitions work + apt: + pkg: + - clamav + - clamav-base + state: + latest + +- name: Disable automatic update of virus definitions + service: + name: clamav-freshclam + state: stopped + enabled: no + +- name: Update freshclam + command: freshclam + async: 3600 + poll: 5 + +- name: Scan home directory + command: 'clamscan --max-filesize=10M --max-scansize=10M --infected --recursive=yes /home' + async: 32000 + poll: 300 diff --git a/jcloud/playbooks/roles/mariadb/tasks/main.yml b/jcloud/playbooks/roles/mariadb/tasks/main.yml new file mode 100644 index 0000000..ed4080b --- /dev/null +++ b/jcloud/playbooks/roles/mariadb/tasks/main.yml @@ -0,0 +1,139 @@ +- name: Add MariaDB Repository Key + apt_key: + url: https://mariadb.org/mariadb_release_signing_key.pgp + state: present + +- name: Add MariaDB Repository + apt_repository: + repo: deb https://mirror.rackspace.com/mariadb/repo/10.6/ubuntu {{ ansible_distribution_release }} main + state: present + update_cache: true + +- name: Update APT Cache + apt: + update_cache: yes + +- name: Use Debian Unattended Package Installation Mode + shell: export DEBIAN_FRONTEND=noninteractive + changed_when: false + +- name: Install MariaDB + apt: + pkg: + - mariadb-server + - mariadb-client + - libmariadbclient18 + state: present + +- name: Install MySQLdb Python Package + apt: + pkg: + - python3-mysqldb + state: present + +- name: Add MariaDB Configuration File + template: + src: mariadb.cnf + dest: /etc/mysql/conf.d/jingrow.cnf + owner: root + group: root + mode: 0644 + +- name: Set my.cnf to load jingrow.cnf file + lineinfile: + dest: /etc/mysql/my.cnf + line: '!includedir /etc/mysql/conf.d/' + +# In Mariadb 10.6, /etc/mysql/mariadb.conf.d/ superseds /etc/mysql/conf.d/. The way to fix it to add !includedir /etc/mysql/conf.d/ to the end of /etc/mysql/my.cnf +# /etc/mysql/conf.d/ is in the second last line of /etc/mysql/my.cnf. Swapping is the way to go. +- name: Remove /etc/mysql/conf.d/ from the file + lineinfile: + path: /etc/mysql/my.cnf + regexp: '^(!includedir /etc/mysql/conf.d/)$' + state: absent + +- name: Add /etc/mysql/conf.d/ to the end of the file + lineinfile: + path: /etc/mysql/my.cnf + line: '!includedir /etc/mysql/conf.d/' + +- name: Set Open Files Count Limit for MariaDB + lineinfile: + dest: /lib/systemd/system/mariadb.service + regexp: '^LimitNOFILE(\s*)=(\s*)\d+' + line: 'LimitNOFILE = infinity' + insertafter: '\[Service\]' + state: present + +- name: Set MariaDB to depend on Mounts + template: + src: mounts.conf + dest: /etc/systemd/system/mariadb.service.d/mounts.conf + owner: root + group: root + mode: 0644 + when: mariadb_depends_on_mounts | default(false) | bool + +- name: Restart MariaDB Service + systemd: + daemon_reload: true + name: mysql + state: restarted + enabled: yes + +- name: Set MariaDB root Password + mysql_user: + login_user: root + login_password: '{{ mariadb_root_password }}' + check_implicit_admin: yes + name: root + host: '{{ item }}' + priv: '*.*:ALL,GRANT' + password: '{{ mariadb_root_password }}' + state: present + with_items: + - localhost + - 127.0.0.1 + - ::1 + - '%' + +- name: Add .my.cnf MariaDB Configuration File + template: + src: my.cnf + dest: /root/.my.cnf + owner: root + group: root + mode: 0600 + +- name: Remove MariaDB Test Database + mysql_db: + name: test + state: absent + +- name: Remove MariaDB Test Users + mysql_user: + name: test + state: absent + +- name: Remove Anonymous MariaDB Users + mysql_user: + name: '' + state: absent + +- name: Add Jingrow User to MySQL Group + user: + name: jingrow + groups: + - mysql + append: true + +- name: Create Monitor User + mysql_user: + login_user: root + login_password: '{{ mariadb_root_password }}' + check_implicit_admin: yes + name: monitor + host: '%' + priv: 'sys.*:SELECT' + password: 'monitor' + state: present diff --git a/jcloud/playbooks/roles/mariadb/templates/mariadb.cnf b/jcloud/playbooks/roles/mariadb/templates/mariadb.cnf new file mode 100644 index 0000000..71cec1f --- /dev/null +++ b/jcloud/playbooks/roles/mariadb/templates/mariadb.cnf @@ -0,0 +1,70 @@ +[mysqld] + +# GENERAL # +default-storage-engine = InnoDB + +# MyISAM # +key-buffer-size = 32M +myisam-recover-options = FORCE + +# SAFETY # +max-connect-errors = 1000000 +innodb = FORCE + +# DATA STORAGE # +datadir = /var/lib/mysql/ + +# BINARY LOGGING # +log-bin = /var/lib/mysql/mysql-bin +log_bin_index = /var/lib/mysql/mysql-bin.index +expire-logs-days = 14 +sync-binlog = 1 + +# REPLICATION # +server-id = {{ server_id }} + +# CACHES AND LIMITS # +tmp-table-size = 32M +max-heap-table-size = 32M +query-cache-type = 0 +query-cache-size = 0 +max-connections = 200 +thread-cache-size = 50 +open-files-limit = 65535 +table-definition-cache = 4096 +table-open-cache = 10240 +tmp-disk-table-size = 5120M +max-statement-time = 3600 +extra_port = 3307 +extra_max_connections = 5 + +# INNODB # +innodb-flush-method = O_DIRECT +innodb-log-files-in-group = 2 +innodb-log-file-size = 512M +innodb-flush-log-at-trx-commit = 1 +innodb-file-per-table = 1 +innodb-buffer-pool-size = {{ (ansible_memtotal_mb * 0.6)|round|int }}M +innodb-file-format = barracuda +innodb-large-prefix = 1 +innodb-old-blocks-time = 5000 +innodb_print_all_deadlocks = on +collation-server = utf8mb4_unicode_ci +character-set-server = utf8mb4 +character-set-client-handshake = FALSE +max_allowed_packet = 512M + +# LOGGING # +log-error = /var/lib/mysql/mysql-error.log +log-queries-not-using-indexes = 0 +slow-query-log = 1 +slow-query-log-file = /var/lib/mysql/mysql-slow.log + +# Networking +bind-address = {{ private_ip }} + +[mysql] +default-character-set = utf8mb4 + +[mysqldump] +max_allowed_packet = 512M diff --git a/jcloud/playbooks/roles/mariadb/templates/mounts.conf b/jcloud/playbooks/roles/mariadb/templates/mounts.conf new file mode 100644 index 0000000..195ecde --- /dev/null +++ b/jcloud/playbooks/roles/mariadb/templates/mounts.conf @@ -0,0 +1,14 @@ +[Unit] +# If MariaDB gets activated, then mounts will be activated as well. +# If one of the mounts fails to activate, MariaDB will not be started. +# If one of the mounts is explicitly stopped (or restarted), MariaDB will be stopped (or restarted). + +# BindsTo imposes a stronger condition than RequiresTo. +# If one of the mounts are stopped, MariaDB will be stopped too. + +# When used in conjunction with After +# The mounts strictly have to be in active state for MariaDB to also be in active state. +# Reference: https://www.freedesktop.org/software/systemd/man/latest/systemd.unit.html#BindsTo= + +After=etc-mysql.mount var-lib-mysql.mount +BindsTo=etc-mysql.mount var-lib-mysql.mount \ No newline at end of file diff --git a/jcloud/playbooks/roles/mariadb/templates/my.cnf b/jcloud/playbooks/roles/mariadb/templates/my.cnf new file mode 100644 index 0000000..7ed7c99 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb/templates/my.cnf @@ -0,0 +1,5 @@ +[client] +host={{ private_ip }} +port=3306 +user=root +password={{ mariadb_root_password }} diff --git a/jcloud/playbooks/roles/mariadb_10_4_to_10_6/tasks/main.yml b/jcloud/playbooks/roles/mariadb_10_4_to_10_6/tasks/main.yml new file mode 100644 index 0000000..fb4729a --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_10_4_to_10_6/tasks/main.yml @@ -0,0 +1,85 @@ +- name: Remove MariaDB Repository File + file: + path: /etc/apt/sources.list.d/mariadb.list + state: absent + +- name: Add MariaDB Repository Key + apt_key: + url: https://mariadb.org/mariadb_release_signing_key.pgp + state: present + +# This seems to be breaking on some database servers +# TODO: Move to container deployments +- name: Add NGINX Repository Key + apt_key: + url: https://nginx.org/keys/nginx_signing.key + state: present + +- name: Add MariaDB Repository + apt_repository: + repo: deb https://mirror.rackspace.com/mariadb/repo/10.6/ubuntu {{ ansible_distribution_release }} main + state: present + update_cache: true + +- name: Update APT Cache + apt: + update_cache: yes + +- name: Use Debian Unattended Package Installation Mode + shell: export DEBIAN_FRONTEND=noninteractive + changed_when: false + +- name: Stop MariaDB Service + systemd: + name: mariadb + state: stopped + +- name: Install MariaDB + apt: + pkg: + - mariadb-server + - mariadb-client + - libmariadbclient18 + state: latest + register: result + until: result.failed == false + retries: 5 + delay: 120 + +# In Mariadb 10.6, /etc/mysql/mariadb.conf.d/ superseds /etc/mysql/conf.d/. The way to fix it to add !includedir /etc/mysql/conf.d/ to the end of /etc/mysql/my.cnf +# /etc/mysql/conf.d/ is in the second last line of /etc/mysql/my.cnf. Swapping is the way to go. +- name: Remove /etc/mysql/conf.d/ from the file + lineinfile: + path: /etc/mysql/my.cnf + regexp: '^(!includedir /etc/mysql/conf.d/)$' + state: absent + +- name: Add /etc/mysql/conf.d/ to the end of the file + lineinfile: + path: /etc/mysql/my.cnf + line: '!includedir /etc/mysql/conf.d/' + +- name: Set Open Files Count Limit for MariaDB + lineinfile: + dest: /lib/systemd/system/mariadb.service + regexp: '^LimitNOFILE(\s*)=(\s*)\d+' + line: 'LimitNOFILE = infinity' + insertafter: '\[Service\]' + state: present + +- name: Start MariaDB Service + systemd: + daemon_reload: true + name: mariadb + state: started + enabled: yes + +- name: Run MariaDB Upgrade + command: mariadb-upgrade + +- name: Restart MariaDB Service + systemd: + daemon_reload: true + name: mariadb + state: restarted + enabled: yes diff --git a/jcloud/playbooks/roles/mariadb_10_6/tasks/main.yml b/jcloud/playbooks/roles/mariadb_10_6/tasks/main.yml new file mode 100644 index 0000000..6d12e34 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_10_6/tasks/main.yml @@ -0,0 +1,90 @@ +--- +- name: Use Debian Unattended Package Installation Mode + shell: export DEBIAN_FRONTEND=noninteractive + changed_when: false + +- name: Remove MariaDB Repository Files + file: + path: '{{ item }}' + state: absent + with_items: + - /etc/apt/sources.list.d/mariadb.list + - /etc/apt/sources.list.d/packages_jingrow_cloud_mariadb_10_6.list + - /etc/apt/sources.list.d/mirror_rackspace_com_mariadb_repo_10_6_ubuntu.list + - /etc/apt/sources.list.d/deb_mariadb_org_10_6_ubuntu.list + +- name: Add MariaDB Repository Key + apt_key: + url: https://mariadb.org/mariadb_release_signing_key.pgp + state: present + +- name: Add MariaDB Repository + apt_repository: + repo: deb https://mirror.rackspace.com/mariadb/repo/10.6/ubuntu {{ ansible_distribution_release }} main + state: present + +- name: Add MariaDB Debug Symbols Repository + apt_repository: + repo: deb https://mirror.rackspace.com/mariadb/repo/10.6/ubuntu {{ ansible_distribution_release }} main/debug + state: present + +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install MariaDB + apt: + pkg: + - mariadb-server + - mariadb-client + - libmariadbclient18 + state: latest + +- name: Set Open Files Count Limit for MariaDB + lineinfile: + dest: /lib/systemd/system/mariadb.service + regexp: '^LimitNOFILE(\s*)=(\s*)\d+' + line: 'LimitNOFILE = infinity' + insertafter: '\[Service\]' + state: present + +- name: Restart MariaDB Service + systemd: + daemon_reload: true + name: mysql + state: restarted + enabled: yes + +- name: Install MariaDB Debug Symbols + apt: + package: mariadb-server-core-10.6-dbgsym + state: latest + +- name: Add Ubuntu Debug Symbols Repository Key + apt_key: + id: F2EDC64DC5AEE1F6B9C621F0C8CAB6595FDFF622 + keyserver: keyserver.ubuntu.com + state: present + +- name: Add Ubuntu Debug Symbols Repositories + apt_repository: + repo: '{{ item }}' + state: present + with_items: + - 'deb http://ddebs.ubuntu.com {{ ansible_distribution_release }} main restricted universe multiverse' + - 'deb http://ddebs.ubuntu.com {{ ansible_distribution_release }}-updates main restricted universe multiverse' + - 'deb http://ddebs.ubuntu.com {{ ansible_distribution_release }}-proposed main restricted universe multiverse' + +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install Ubuntu Debug Symbols + apt: + pkg: + - libc6-dbg + - libstdc++6-10-dbg + - lib32stdc++6-10-dbg + - libx32stdc++6-10-dbg + - libstdc++6-dbgsym + state: latest diff --git a/jcloud/playbooks/roles/mariadb_10_6_16_jingrow/tasks/main.yml b/jcloud/playbooks/roles/mariadb_10_6_16_jingrow/tasks/main.yml new file mode 100644 index 0000000..8885483 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_10_6_16_jingrow/tasks/main.yml @@ -0,0 +1,58 @@ +- name: Remove MariaDB Repository File + file: + path: /etc/apt/sources.list.d/mariadb.list + state: absent + +- name: Remove MariaDB Repository File + file: + path: /etc/apt/sources.list.d/mirror_rackspace_com_mariadb_repo_10_6_ubuntu.list + state: absent + +- name: Remove MariaDB Repository File + file: + path: /etc/apt/sources.list.d/deb_mariadb_org_10_6_ubuntu.list + state: absent + +- name: Add MariaDB Repository Key + apt_key: + url: https://packages.jingrow.cloud/jingrow.gpg.key + state: present + +- name: Add MariaDB Repository + apt_repository: + repo: deb https://packages.jingrow.cloud/mariadb/10.6 focal main + state: present + update_cache: true + +- name: Update APT Cache + apt: + update_cache: yes + +- name: Use Debian Unattended Package Installation Mode + shell: export DEBIAN_FRONTEND=noninteractive + changed_when: false + +- name: Install MariaDB + apt: + pkg: + - mariadb-server-10.6 + - mariadb-server-core-10.6 + - mariadb-client-10.6 + - mariadb-client-core-10.6 + - libmariadbclient18 + state: latest + +- name: Set Open Files Count Limit for MariaDB + lineinfile: + dest: /lib/systemd/system/mariadb.service + regexp: '^LimitNOFILE(\s*)=(\s*)\d+' + line: 'LimitNOFILE = infinity' + insertafter: '\[Service\]' + state: present + +- name: Restart MariaDB Service + systemd: + daemon_reload: true + name: mariadb + state: restarted + enabled: yes diff --git a/jcloud/playbooks/roles/mariadb_10_6_16_jingrow_debug_symbols/tasks/main.yml b/jcloud/playbooks/roles/mariadb_10_6_16_jingrow_debug_symbols/tasks/main.yml new file mode 100644 index 0000000..6721e31 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_10_6_16_jingrow_debug_symbols/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: Add MariaDB Debug Symbols Repository + apt_repository: + repo: deb https://packages.jingrow.cloud/mariadb/10.6 focal main/debug + state: present + update_cache: true + +- name: Use Debian Unattended Package Installation Mode + shell: export DEBIAN_FRONTEND=noninteractive + changed_when: false + +- name: Install MariaDB + apt: + package: mariadb-server-core-10.6-dbgsym + state: latest diff --git a/jcloud/playbooks/roles/mariadb_change_root_password/tasks/main.yml b/jcloud/playbooks/roles/mariadb_change_root_password/tasks/main.yml new file mode 100644 index 0000000..8872627 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_change_root_password/tasks/main.yml @@ -0,0 +1,41 @@ +--- +- name: Delete .my.cnf MariaDB Configuration File + file: + path: /root/.my.cnf + state: absent + +- name: Change MariaDB root Password for localhost + mysql_user: + login_user: root + login_password: "{{ mariadb_old_root_password }}" + check_implicit_admin: yes + name: root + host: "{{ item }}" + priv: "*.*:ALL,GRANT" + password: "{{ mariadb_root_password }}" + state: present + with_items: + - localhost + +- name: Change MariaDB root Password for other hosts + mysql_user: + login_user: root + login_password: "{{ mariadb_root_password }}" + check_implicit_admin: yes + name: root + host: "{{ item }}" + priv: "*.*:ALL,GRANT" + password: "{{ mariadb_root_password }}" + state: present + with_items: + - 127.0.0.1 + - ::1 + - "%" + +- name: Add .my.cnf MariaDB Configuration File + template: + src: my.cnf + dest: /root/.my.cnf + owner: root + group: root + mode: 0600 diff --git a/jcloud/playbooks/roles/mariadb_change_root_password/templates/my.cnf b/jcloud/playbooks/roles/mariadb_change_root_password/templates/my.cnf new file mode 100644 index 0000000..7ed7c99 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_change_root_password/templates/my.cnf @@ -0,0 +1,5 @@ +[client] +host={{ private_ip }} +port=3306 +user=root +password={{ mariadb_root_password }} diff --git a/jcloud/playbooks/roles/mariadb_change_root_password_secondary/tasks/main.yml b/jcloud/playbooks/roles/mariadb_change_root_password_secondary/tasks/main.yml new file mode 100644 index 0000000..36aee99 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_change_root_password_secondary/tasks/main.yml @@ -0,0 +1,35 @@ +--- +- name: Add .my.cnf MariaDB Configuration File + template: + src: my.cnf + dest: /root/.my.cnf + owner: root + group: root + mode: 0600 + +- name: Stop Secondary Thread + mysql_replication: + mode: stopslave + login_user: root + login_password: "{{ mariadb_root_password }}" + +- name: Change Primary Password + mysql_replication: + mode: changemaster + login_user: root + login_password: "{{ mariadb_root_password }}" + master_password: "{{ mariadb_root_password }}" + +- name: Start MariaDB Secondary Thread + mysql_replication: + mode: startslave + login_user: root + login_password: "{{ mariadb_root_password }}" + +- name: Check Secondary Status + mysql_replication: + mode: getslave + login_user: root + login_password: "{{ mariadb_root_password }}" + register: secondary_status + failed_when: 'secondary_status["Slave_IO_Running"] == "No" or secondary_status["Slave_SQL_Running"] == "No"' diff --git a/jcloud/playbooks/roles/mariadb_change_root_password_secondary/templates/my.cnf b/jcloud/playbooks/roles/mariadb_change_root_password_secondary/templates/my.cnf new file mode 100644 index 0000000..7ed7c99 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_change_root_password_secondary/templates/my.cnf @@ -0,0 +1,5 @@ +[client] +host={{ private_ip }} +port=3306 +user=root +password={{ mariadb_root_password }} diff --git a/jcloud/playbooks/roles/mariadb_memory_allocator/tasks/main.yml b/jcloud/playbooks/roles/mariadb_memory_allocator/tasks/main.yml new file mode 100644 index 0000000..c1d26c7 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_memory_allocator/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Update NGINX Repository Key + apt_key: + url: https://nginx.org/keys/nginx_signing.key + state: present + +- name: Install Memory Allocators + apt: + pkg: + - libjemalloc2 + - google-perftools + state: present + update_cache: yes + when: allocator != "system" + +- name: Find Memory Allocator Library + find: + paths: /usr/lib + recurse: true + file_type: file + patterns: "lib{{ allocator }}.so.*" + register: libraries + when: allocator != "system" + +- name: Set Memory Allocator + template: + src: allocator.conf + dest: /etc/systemd/system/mariadb.service.d/allocator.conf + owner: root + group: root + mode: 0644 + when: allocator != "system" + +- name: Remove Memory Allocator + file: + path: /etc/systemd/system/mariadb.service.d/allocator.conf + state: absent + when: allocator == "system" + +- name: Restart MariaDB + systemd: + daemon_reload: true + name: mariadb + enabled: yes + state: restarted + +- name: Show Memory Allocator + mysql_query: + login_user: root + login_password: "{{ mariadb_root_password }}" + login_db: mysql + query: + - "SHOW VARIABLES LIKE 'version_malloc_library'" diff --git a/jcloud/playbooks/roles/mariadb_memory_allocator/templates/allocator.conf b/jcloud/playbooks/roles/mariadb_memory_allocator/templates/allocator.conf new file mode 100644 index 0000000..1824481 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_memory_allocator/templates/allocator.conf @@ -0,0 +1,2 @@ +[Service] +Environment=LD_PRELOAD={{libraries.files[0].path}} diff --git a/jcloud/playbooks/roles/mariadb_rename/tasks/main.yml b/jcloud/playbooks/roles/mariadb_rename/tasks/main.yml new file mode 100644 index 0000000..aa0e7a9 --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_rename/tasks/main.yml @@ -0,0 +1,52 @@ +--- +- name: Add MariaDB Configuration File + template: + src: ../../mariadb/templates/mariadb.cnf + dest: /etc/mysql/conf.d/jingrow.cnf + owner: root + group: root + mode: 0644 + +- name: Restart MariaDB Service + systemd: + daemon_reload: true + name: mysql + state: restarted + enabled: yes + +- name: Change MariaDB root Password for localhost + mysql_user: + # login_user: root + config_file: /root/.my.cnf + # login_password: "{{ mariadb_old_root_password }}" + check_implicit_admin: yes + name: root + host: "{{ item }}" + priv: "*.*:ALL,GRANT" + password: "{{ mariadb_root_password }}" + state: present + with_items: + - localhost + +- name: Add .my.cnf MariaDB Configuration File + template: + src: ../../mariadb/templates/my.cnf + dest: /root/.my.cnf + owner: root + group: root + mode: 0600 + +- name: Change MariaDB root Password for other hosts + mysql_user: + login_user: root + login_password: "{{ mariadb_root_password }}" + check_implicit_admin: yes + name: root + host: "{{ item }}" + priv: "*.*:ALL,GRANT" + password: "{{ mariadb_root_password }}" + state: present + with_items: + - 127.0.0.1 + - ::1 + - "%" diff --git a/jcloud/playbooks/roles/mariadb_systemd_limits/tasks/main.yml b/jcloud/playbooks/roles/mariadb_systemd_limits/tasks/main.yml new file mode 100644 index 0000000..500763e --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_systemd_limits/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Add Systemd memory limit file + template: + src: memory-custom.conf + dest: /etc/systemd/system/mariadb.service.d/memory.conf + owner: root + group: root + mode: 0644 + +- name: Reload systemd config + systemd: + daemon_reload: yes diff --git a/jcloud/playbooks/roles/mariadb_systemd_limits/templates/memory-custom.conf b/jcloud/playbooks/roles/mariadb_systemd_limits/templates/memory-custom.conf new file mode 100644 index 0000000..31701cb --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_systemd_limits/templates/memory-custom.conf @@ -0,0 +1,4 @@ +[Service] +MemoryHigh={{ memory_high }}G +MemoryMax={{ memory_max }}G +MemorySwapMax={{ memory_swap_max }}G diff --git a/jcloud/playbooks/roles/mariadb_systemd_limits/templates/memory.conf b/jcloud/playbooks/roles/mariadb_systemd_limits/templates/memory.conf new file mode 100644 index 0000000..1c8fa0d --- /dev/null +++ b/jcloud/playbooks/roles/mariadb_systemd_limits/templates/memory.conf @@ -0,0 +1,4 @@ +[Service] +MemoryHigh={{ max(ansible_memtotal_mb // 1024 - 2, 1) }}G +MemoryMax={{ max(ansible_memtotal_mb // 1024 - 1, 2) }}G +MemorySwapMax=100M diff --git a/jcloud/playbooks/roles/migrate_to_fc/tasks/main.yml b/jcloud/playbooks/roles/migrate_to_fc/tasks/main.yml new file mode 100644 index 0000000..79d011f --- /dev/null +++ b/jcloud/playbooks/roles/migrate_to_fc/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: Put site on maintenance_mode + command: 'bench --site {{ site }} set-maintenance-mode on' + args: + chdir: /home/jingrow/benches/{{ bench }} + +- name: Get migrate script + get_url: + url: https://jingrow.com/assets/jcloud/migrate_2 + dest: '/home/jingrow/benches/{{ bench }}/migrate_2' + validate_certs: no + +- name: Run migrate on site + command: '/home/jingrow/benches/{{ bench }}/env/bin/python migrate_2 -s {{ site }} -u {{ username }} -p {{ password }} -f {{ version }}' + args: + chdir: /home/jingrow/benches/{{ bench }} + register: migrate_command + ignore_errors: yes + async: 3600 + poll: 5 + +- name: Activate site if migrate failed + command: 'bench --site {{ site }} set-maintenance-mode off' + args: + chdir: /home/jingrow/benches/{{ bench }} + when: migrate_command.rc != 0 + +- name: Mark play as failed if migrate fails + fail: + msg: Play failed as migrate failed + when: migrate_command.rc != 0 diff --git a/jcloud/playbooks/roles/monitoring_password/tasks/main.yml b/jcloud/playbooks/roles/monitoring_password/tasks/main.yml new file mode 100644 index 0000000..0b03a0e --- /dev/null +++ b/jcloud/playbooks/roles/monitoring_password/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: Setup Monitoring Authentication + become: yes + become_user: jingrow + command: "htpasswd -Bbc /home/jingrow/agent/nginx/monitoring.htpasswd jingrow {{ monitoring_password }}" diff --git a/jcloud/playbooks/roles/mount/tasks/main.yml b/jcloud/playbooks/roles/mount/tasks/main.yml new file mode 100644 index 0000000..3ccf196 --- /dev/null +++ b/jcloud/playbooks/roles/mount/tasks/main.yml @@ -0,0 +1,55 @@ +--- +- name: Set JSON Variables + set_fact: + all_mounts: '{{ all_mounts_json | from_json }}' + volume_mounts: '{{ volume_mounts_json | from_json }}' + bind_mounts: '{{ bind_mounts_json | from_json }}' + +- name: Create Mount Points + file: + dest: "{{ item.mount_point }}" + state: directory + owner: "{{ item.mount_point_owner }}" + group: "{{ item.mount_point_group }}" + mode: "{{ item.mount_point_mode }}" + loop: "{{ all_mounts }}" + +- name: Format Volumes + filesystem: + fstype: "{{ item.filesystem }}" + dev: "{{ item.source }}" + force: false + loop: "{{ volume_mounts }}" + +- name: Show Block Device UUIDs + command: 'lsblk {{ item.source }} -no UUID' + loop: "{{ volume_mounts }}" + register: block_devices + +- name: Mount Volumes + mount: + src: "UUID={{ item.stdout.strip() }}" + path: "{{ item.item.mount_point }}" + fstype: "{{ item.item.filesystem }}" + opts: "{{ item.item.mount_options }}" + state: mounted + loop: "{{ block_devices.results }}" + +- name: Create Mount Source Directories + file: + dest: "{{ item.source }}" + state: directory + owner: "{{ item.mount_point_owner }}" + group: "{{ item.mount_point_group }}" + mode: "{{ item.mount_point_mode }}" + loop: "{{ bind_mounts }}" + +- name: Mount Bind Mounts + mount: + src: "{{ item.source }}" + path: "{{ item.mount_point }}" + fstype: none + opts: "{{ item.mount_options }}" + state: mounted + loop: "{{ bind_mounts }}" + diff --git a/jcloud/playbooks/roles/mysqld_exporter/tasks/main.yml b/jcloud/playbooks/roles/mysqld_exporter/tasks/main.yml new file mode 100644 index 0000000..51e1a4f --- /dev/null +++ b/jcloud/playbooks/roles/mysqld_exporter/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: Create MySQL Exporter Directory + file: + path: /opt/mysqld_exporter + state: directory + mode: 0755 + +- name: Set Architecture + set_fact: + arch: "{{'amd64' if (ansible_architecture == 'x86_64') else 'arm64'}}" + +- name: Download MySQL Exporter Archive + unarchive: + src: "https://github.com/prometheus/mysqld_exporter/releases/download/v0.15.1/mysqld_exporter-0.15.1.linux-{{ arch }}.tar.gz" + dest: /tmp + remote_src: yes + +- name: Copy MySQL Exporter Binary + copy: + src: "/tmp/mysqld_exporter-0.15.1.linux-{{ arch }}/mysqld_exporter" + dest: /opt/mysqld_exporter/mysqld_exporter + remote_src: yes + mode: 0755 + owner: root + group: root + +- name: Create MySQL Exporter Systemd Service File + template: + src: mysqld_exporter.service + dest: /etc/systemd/system/mysqld_exporter.service + owner: root + group: root + mode: 0644 + +- name: Restart MySQL Exporter Service + systemd: + daemon_reload: true + name: mysqld_exporter + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/mysqld_exporter/templates/mysqld_exporter.service b/jcloud/playbooks/roles/mysqld_exporter/templates/mysqld_exporter.service new file mode 100644 index 0000000..a0a1e9a --- /dev/null +++ b/jcloud/playbooks/roles/mysqld_exporter/templates/mysqld_exporter.service @@ -0,0 +1,20 @@ +[Unit] +Description=Prometheus MySQL Exporter +After=network-online.target + +[Service] +Environment="DATA_SOURCE_NAME=root:{{ mariadb_root_password }}@({{ private_ip }}:3306)/" +Type=simple +ExecStart=/opt/mysqld_exporter/mysqld_exporter --web.listen-address=127.0.0.1:9104 \ + --collect.binlog_size \ + --collect.perf_schema.eventswaits \ + --collect.perf_schema.file_events \ + --config.my-cnf=/root/.my.cnf \ + +SyslogIdentifier=mysqld_exporter +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/mysqld_exporter_rename/tasks/main.yml b/jcloud/playbooks/roles/mysqld_exporter_rename/tasks/main.yml new file mode 100644 index 0000000..0ff8105 --- /dev/null +++ b/jcloud/playbooks/roles/mysqld_exporter_rename/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: Create MySQL Exporter Systemd Service File + template: + src: ../../mysqld_exporter/templates/mysqld_exporter.service + dest: /etc/systemd/system/mysqld_exporter.service + owner: root + group: root + mode: 0644 + +- name: Restart MySQL Exporter Service + systemd: + daemon_reload: true + name: mysqld_exporter + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/mysqld_variable/tasks/main.yml b/jcloud/playbooks/roles/mysqld_variable/tasks/main.yml new file mode 100644 index 0000000..e6a7782 --- /dev/null +++ b/jcloud/playbooks/roles/mysqld_variable/tasks/main.yml @@ -0,0 +1,39 @@ +--- +- name: Set global variable dynamically + mysql_variables: + variable: '{{ variable }}' + value: '{{ value }}' + mode: global + when: dynamic | default(true) | bool + +- name: Add variable in jingrow.cnf + ini_file: + path: /etc/mysql/conf.d/jingrow.cnf + section: mysqld + option: '{{ variable | replace("_", "-") }}' + value: '{{ value | default(None) }}' # condition to not set rhs + backup: true + allow_no_value: true + create: false + when: persist | default(false) | bool + +- name: Remove variable as it's been skipped/disabled + ini_file: + path: /etc/mysql/conf.d/jingrow.cnf + section: mysqld + option: '{{ variable | replace("_", "-") | replace("skip-", "") }}' + create: false + state: absent + when: skip | default(false) | bool + +- name: Remove skip prefixed variable as variable is enabled + ini_file: + path: /etc/mysql/conf.d/jingrow.cnf + section: mysqld + option: '{{ "skip-" + variable | replace("_", "-") }}' + allow_no_value: true + create: false + state: absent + when: + - persist | default(false) | bool + - not skip | default(true) | bool diff --git a/jcloud/playbooks/roles/mysqldump/tasks/main.yml b/jcloud/playbooks/roles/mysqldump/tasks/main.yml new file mode 100644 index 0000000..1873e68 --- /dev/null +++ b/jcloud/playbooks/roles/mysqldump/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: Ensure max_allowed_packet value is set to 512 MB + ini_file: + path: /etc/mysql/conf.d/mysqldump.cnf + section: mysqldump + option: max_allowed_packet + value: 512M diff --git a/jcloud/playbooks/roles/nginx/tasks/main.yml b/jcloud/playbooks/roles/nginx/tasks/main.yml new file mode 100644 index 0000000..9f480f2 --- /dev/null +++ b/jcloud/playbooks/roles/nginx/tasks/main.yml @@ -0,0 +1,99 @@ +--- +- name: Install NGINX + apt: + pkg: + - nginx + - nginx-extras + - apache2-utils + - libpcre3-dev + state: present + +- name: Setup NGINX Source Repository + copy: + content: "deb-src http://nginx.org/packages/mainline/ubuntu/ {{ ansible_distribution_release }} nginx" + dest: /etc/apt/sources.list.d/nginx.list + +- name: Add NGINX Repository Key + apt_key: + url: https://nginx.org/keys/nginx_signing.key + state: present + +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install NGINX Build Dependencies + apt: + pkg: + - nginx + state: build-dep + +- name: Extract NGINX Version + shell: "nginx -v 2>&1 | grep -oP '[\\d.]*'" + register: nginx_version + +- name: Create NGINX Source Directory + file: + dest: /opt/nginx + state: directory + +- name: Download NGINX Source Archive + unarchive: + src: http://nginx.org/download/nginx-{{ nginx_version.stdout }}.tar.gz + dest: /opt/nginx + remote_src: yes + +- name: Clone NGINX VTS Module Repository + git: + repo: https://github.com/vozlt/nginx-module-vts + dest: /opt/nginx-modules/nginx-module-vts + +- name: Extract NGINX Compile Flags + shell: "nginx -V 2>&1 | grep -oP '\\--with-[-\\w]* ' | tr '\\n' ' '" + register: nginx_compile_flags + +- name: Configure NGINX Modules + shell: "./configure {{ nginx_compile_flags.stdout }} --add-dynamic-module=/opt/nginx-modules/nginx-module-vts" + args: + chdir: "/opt/nginx/nginx-{{ nginx_version.stdout }}" + +- name: Compile NGINX modules + shell: "make modules" + args: + chdir: "/opt/nginx/nginx-{{ nginx_version.stdout }}" + +- name: Copy NGINX VTS Module Shared Object File + copy: + src: "/opt/nginx/nginx-{{ nginx_version.stdout }}/objs/ngx_http_vhost_traffic_status_module.so" + dest: /usr/lib/nginx/modules + remote_src: yes + +- name: Create VTS Database File + file: + path: /var/log/nginx/vts.db + state: touch + owner: www-data + group: www-data + +- name: Remove Default Enabled NGINX Virtual Host + file: + path: /etc/nginx/sites-enabled/default + state: absent + +- name: Remove Default Available NGINX Virtual Host + file: + path: /etc/nginx/sites-available/default + state: absent + +- name: Add www-data user to Jingrow group + user: + name: www-data + groups: jingrow + append: yes + when: ansible_distribution == "Ubuntu" and ansible_distribution_release == 'jammy' + +- name: Restart NGINX and Enable at Boot + service: + name: nginx + state: restarted + enabled: yes diff --git a/jcloud/playbooks/roles/node_exporter/tasks/main.yml b/jcloud/playbooks/roles/node_exporter/tasks/main.yml new file mode 100644 index 0000000..3a91cc4 --- /dev/null +++ b/jcloud/playbooks/roles/node_exporter/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: Create Node Exporter Directory + file: + path: /opt/node_exporter + state: directory + mode: 0755 + +- name: Set Architecture + set_fact: + arch: "{{'amd64' if (ansible_architecture == 'x86_64') else 'arm64'}}" + +- name: Download Node Exporter Archive + unarchive: + src: "https://github.com/prometheus/node_exporter/releases/download/v1.8.2/node_exporter-1.8.2.linux-{{ arch }}.tar.gz" + dest: /tmp + remote_src: yes + +- name: Copy Node Exporter Binary + copy: + src: "/tmp/node_exporter-1.8.2.linux-{{ arch }}/node_exporter" + dest: /opt/node_exporter/node_exporter + remote_src: yes + mode: 0755 + owner: root + group: root + +- name: Create Node Exporter Systemd Service File + template: + src: node_exporter.service + dest: /etc/systemd/system/node_exporter.service + owner: root + group: root + mode: 0644 + +- name: Restart Node Exporter Service + systemd: + daemon_reload: true + name: node_exporter + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/node_exporter/templates/node_exporter.service b/jcloud/playbooks/roles/node_exporter/templates/node_exporter.service new file mode 100644 index 0000000..9d8ae7c --- /dev/null +++ b/jcloud/playbooks/roles/node_exporter/templates/node_exporter.service @@ -0,0 +1,15 @@ +[Unit] +Description=Prometheus Node Exporter +After=network-online.target + +[Service] +Type=simple +ExecStart=/opt/node_exporter/node_exporter --web.listen-address=127.0.0.1:9100 + +SyslogIdentifier=node_exporter +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/oci/tasks/main.yml b/jcloud/playbooks/roles/oci/tasks/main.yml new file mode 100644 index 0000000..bab32d1 --- /dev/null +++ b/jcloud/playbooks/roles/oci/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- name: Delete IPTables Block-All Rule + iptables: + chain: INPUT + jump: REJECT + reject_with: icmp-host-prohibited + state: absent + +- name: Delete IPTables Block-All Rule from Rules File + replace: + path: /etc/iptables/rules.v4 + regexp: '-A INPUT -j REJECT --reject-with icmp-host-prohibited' + replace: '#-A INPUT -j REJECT --reject-with icmp-host-prohibited' + +- name: Copy Authorized Keys from ubuntu to root User + copy: + src: /home/ubuntu/.ssh/authorized_keys + dest: /root/.ssh/authorized_keys + mode: 0600 + remote_src: yes + +- name: Remove OPC User + user: + name: opc + state: absent + remove: yes + force: yes + ignore_errors: yes + +- name: Remove Ubuntu User + user: + name: ubuntu + state: absent + remove: yes + force: yes + ignore_errors: yes diff --git a/jcloud/playbooks/roles/pam/defaults/main.yml b/jcloud/playbooks/roles/pam/defaults/main.yml new file mode 100644 index 0000000..029661f --- /dev/null +++ b/jcloud/playbooks/roles/pam/defaults/main.yml @@ -0,0 +1,8 @@ +--- +pam_faillock: + attempts: 5 + fail_interval: 900 + unlock_time: 900 + fail_for_root: no + remember: 5 + pwhash: sha512 diff --git a/jcloud/playbooks/roles/pam/tasks/main.yml b/jcloud/playbooks/roles/pam/tasks/main.yml new file mode 100644 index 0000000..f837b9f --- /dev/null +++ b/jcloud/playbooks/roles/pam/tasks/main.yml @@ -0,0 +1,84 @@ +--- +- name: Install latest libpam-modules + apt: + name: libpam-modules + state: latest + update_cache: yes + +- name: Install PAM module to perform password quality checking + package: + name: libpam-pwquality + state: present + +- name: Ensure password creation requirements are configured + lineinfile: + state: present + path: /etc/security/pwquality.conf + regexp: '^{{ item.key }}' + line: '{{ item.key }} = {{ item.value }}' + with_items: + - { key: 'minlen', value: '14' } + - { key: 'minclass', value: '4' } + +- name: 5.4.2 | L1 | PATCH | Ensure lockout for failed password attempts is configured + block: + - name: Add key value pairs in faillock config + lineinfile: + state: present + path: /etc/security/faillock.conf + regexp: '^{{ item.key }}' + line: '{{ item.key }} = {{ item.value }}' + with_items: + - { key: 'deny', value: '{{ pam_faillock.attempts }}' } + - { key: 'unlock_time', value: '{{ pam_faillock.unlock_time }}' } + - { key: 'fail_interval', value: '{{ pam_faillock.fail_interval }}' } + + - name: Add fail_for_root setting in faillock config + lineinfile: + state: present + path: /etc/security/faillock.conf + regexp: '^even_deny_root' + line: "{{ pam_faillock.fail_for_root | ternary('even_deny_root','') }}" + + - name: Add default, deny count, and unlock times for preauth + lineinfile: + path: /etc/pam.d/common-auth + state: present + line: "auth\trequired\tpam_faillock.so preauth audit silent" + insertafter: '^#?auth ?' + + - name: Add success and default settings to pam_unix.so + lineinfile: + path: /etc/pam.d/common-auth + state: present + line: "auth\t[success=1 default=bad]\tpam_unix.so" + insertafter: '^#?auth ?' + backup: yes + + - name: Add default, deny count, and unlock times for authfail + lineinfile: + path: /etc/pam.d/common-auth + state: present + line: "auth\t[default=die]\tpam_faillock.so authfail audit" + insertafter: '^#?auth ?' + + - name: Add deny count and unlock times to authsucc + lineinfile: + path: /etc/pam.d/common-auth + state: present + line: "auth\tsufficient\tpam_faillock.so authsucc audit" + insertafter: '^#?auth ?' + +- name: "5.4.3 | L1 | PATCH | Ensure password hashing algorithm is SHA-512 | add sha512 settings" + lineinfile: + path: /etc/pam.d/common-password + state: present + line: "password\tsufficient\tpam_unix.so {{ pam_faillock.pwhash }} shadow nullok try_first_pass use_authtok" + insertafter: '^#?password ?' + +- name: "5.4.4 | L1 | PATCH | Ensure password reuse is limited | add remember settings" + lineinfile: + path: /etc/pam.d/common-password + state: present + line: "password\trequired\tpam_pwhistory.so remember={{ pam_faillock.remember }}" + insertafter: '^#?password ?' diff --git a/jcloud/playbooks/roles/ping/tasks/main.yml b/jcloud/playbooks/roles/ping/tasks/main.yml new file mode 100644 index 0000000..e692b8e --- /dev/null +++ b/jcloud/playbooks/roles/ping/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: Ping + ping: + data: pong + +- name: Gather Facts + debug: + var: ansible_facts diff --git a/jcloud/playbooks/roles/plausible/tasks/main.yml b/jcloud/playbooks/roles/plausible/tasks/main.yml new file mode 100644 index 0000000..2241d3a --- /dev/null +++ b/jcloud/playbooks/roles/plausible/tasks/main.yml @@ -0,0 +1,44 @@ +--- +- name: Download Plausible Self Hosting Archive + become: yes + become_user: jingrow + unarchive: + src: https://github.com/plausible/hosting/archive/master.tar.gz + dest: /tmp + remote_src: yes + +- name: Create Plausible Directory + become: yes + become_user: jingrow + copy: + src: /tmp/hosting-master/ + dest: /home/jingrow/plausible + remote_src: yes + +- name: Create Plausible Environment File + become: yes + become_user: jingrow + template: + src: plausible-conf.env + dest: /home/jingrow/plausible/plausible-conf.env + +- name: Install Docker Compose + get_url: + url: https://github.com/docker/compose/releases/download/v2.3.3/docker-compose-linux-x86_64 + checksum: sha256:d31e90dda58e21a6463cb918868421b4b58c32504b01b1612d154fe6a9167a91 + dest: /usr/local/bin/docker-compose + mode: 0777 + +- name: Start Plausible + become: yes + become_user: jingrow + command: docker-compose up -d + args: + chdir: /home/jingrow/plausible + +- name: Setup NGINX Proxy for Plausible + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup analytics + args: + chdir: /home/jingrow/agent diff --git a/jcloud/playbooks/roles/plausible/templates/plausible-conf.env b/jcloud/playbooks/roles/plausible/templates/plausible-conf.env new file mode 100644 index 0000000..a5bc968 --- /dev/null +++ b/jcloud/playbooks/roles/plausible/templates/plausible-conf.env @@ -0,0 +1,20 @@ +ADMIN_USER_EMAIL=dev@jingrow.com +ADMIN_USER_NAME=Aditya +ADMIN_USER_PWD={{ plausible_password }} + +BASE_URL=https://{{ server }} +SECRET_KEY_BASE={{ plausible_secret }} + +DISABLE_REGISTRATION=true + +MAILER_EMAIL=notifications@jingrow.com + +SMTP_HOST_ADDR={{ plausible_mail_server }} +SMTP_HOST_PORT={{ plausible_mail_port }} +SMTP_USER_NAME={{ plausible_mail_login }} +SMTP_USER_PWD={{ plausible_mail_password }} +SMTP_HOST_SSL_ENABLED=false +SMTP_RETRIES=2 + +GOOGLE_CLIENT_ID={{ google_client_id }} +GOOGLE_CLIENT_SECRET={{ google_client_secret }} diff --git a/jcloud/playbooks/roles/primary/tasks/main.yml b/jcloud/playbooks/roles/primary/tasks/main.yml new file mode 100644 index 0000000..7c5c318 --- /dev/null +++ b/jcloud/playbooks/roles/primary/tasks/main.yml @@ -0,0 +1,54 @@ +--- +- name: Install MariaBackup + apt: + pkg: mariadb-backup + +- name: Create Directory for Database Dump + file: + dest: '{{ backup_path }}' + state: directory + +- name: Count Number of IBData Files + shell: 'cd /var/lib/mysql && find . -name "*.ibd" | wc -l' + register: ibdata_file_count + +- name: Calculate Open Files Limit + set_fact: + open_files_limit: '{{ [ibdata_file_count.stdout|int + 10000, 65535] | max }}' + +- name: Set Soft Limit on Open Files in Linux + pam_limits: + domain: '*' + limit_type: 'soft' + limit_item: 'nofile' + value: '{{ open_files_limit }}' + backup: yes + +- name: Set Hard Limit on Open Files in Linux + pam_limits: + domain: '*' + limit_type: 'hard' + limit_item: 'nofile' + value: '{{ open_files_limit|int * 2 }}' + +- name: Delete MariaBackup Directory + file: + path: '{{ backup_path }}' + state: absent + +- name: Start MariaBackup + command: 'mariabackup --backup --rsync --target-dir {{ backup_path }} --user root --password {{ mariadb_root_password }} --open-files-limit {{ open_files_limit }} --parallel 4 --compress-threads 4' + async: 3600 + poll: 5 + +- name: Prepare MariaBackup + command: 'mariabackup --prepare --target-dir {{ backup_path }} --open-files-limit {{ open_files_limit }}' + async: 3600 + poll: 5 + +- name: Add Secondary Public Key to Authorized Keys + authorized_key: + user: root + key: '{{ secondary_root_public_key }}' + state: present + when: secondary_root_public_key is defined diff --git a/jcloud/playbooks/roles/primary_app/tasks/main.yml b/jcloud/playbooks/roles/primary_app/tasks/main.yml new file mode 100644 index 0000000..8476bd2 --- /dev/null +++ b/jcloud/playbooks/roles/primary_app/tasks/main.yml @@ -0,0 +1,67 @@ +--- +- name: Remove Primary Server from Known Hosts + become: yes + become_user: jingrow + known_hosts: + name: "{{ secondary_private_ip }}" + state: absent + +- name: Add Primary Server to Known Hosts + become: yes + become_user: jingrow + shell: ssh-keyscan {{ secondary_private_ip }} >> /home/jingrow/.ssh/known_hosts + +- name: Install Lsyncd + apt: + state: present + pkg: + - lsyncd + +- name: Create Lsyncd Directory + become: yes + become_user: jingrow + file: + dest: /home/jingrow/lsyncd + state: directory + +- name: Create Lsyncd Log and Status File + become: yes + become_user: jingrow + file: + dest: /home/jingrow/lsyncd/{{ item }} + state: touch + with_items: + - lsyncd.logs + - lsyncd.status + +- name: Create Lsynd Config File + become: yes + become_user: jingrow + template: + src: lsyncd.conf + dest: /home/jingrow/lsyncd/lsyncd.conf.lua + +- name: Setup lsyncd service for jingrow user + template: + src: lsyncd.service + dest: /etc/systemd/system/lsyncd.service + owner: root + group: root + mode: 0644 + +- name: Get Number of Files in Benches Directory + shell: find /home/jingrow/benches -type f | wc -l + register: file_count + +- name: Update Number of Maximum Watches + sysctl: + name: fs.inotify.max_user_watches + value: "{{ [file_count.stdout|int, 1048576] | max }}" + state: present + +- name: Restart Lsyncd service + systemd: + daemon_reload: true + name: lsyncd + state: restarted + enabled: True diff --git a/jcloud/playbooks/roles/primary_app/templates/lsyncd.conf b/jcloud/playbooks/roles/primary_app/templates/lsyncd.conf new file mode 100644 index 0000000..d5a0d2a --- /dev/null +++ b/jcloud/playbooks/roles/primary_app/templates/lsyncd.conf @@ -0,0 +1,17 @@ +settings { + logfile = "/home/jingrow/lsyncd/lsyncd.logs", + statusFile = "/home/jingrow/lsyncd/lsyncd.status" +} + +sync { + default.rsyncssh, + source="/home/jingrow/benches", + host="{{ secondary_private_ip }}", + targetdir="/home/jingrow/benches", + rsync = { + archive = true, + acls = true, + xattrs = true, + compress = true, + }, +} \ No newline at end of file diff --git a/jcloud/playbooks/roles/primary_app/templates/lsyncd.service b/jcloud/playbooks/roles/primary_app/templates/lsyncd.service new file mode 100644 index 0000000..dd5b4a8 --- /dev/null +++ b/jcloud/playbooks/roles/primary_app/templates/lsyncd.service @@ -0,0 +1,12 @@ +[Unit] +Description=Live Syncing (Mirror) Daemon +After=network-online.target + +[Service] +Type=simple +User=jingrow +Group=jingrow +ExecStart=/usr/bin/lsyncd -nodaemon /home/jingrow/lsyncd/lsyncd.conf.lua + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/jcloud/playbooks/roles/primary_proxy/tasks/main.yml b/jcloud/playbooks/roles/primary_proxy/tasks/main.yml new file mode 100644 index 0000000..eb49709 --- /dev/null +++ b/jcloud/playbooks/roles/primary_proxy/tasks/main.yml @@ -0,0 +1,57 @@ +--- +- name: Remove Primary Proxy Server from Known Hosts + become: yes + become_user: jingrow + known_hosts: + name: "{{ secondary_private_ip }}" + state: absent + +- name: Add Primary Proxy Server to Known Hosts + become: yes + become_user: jingrow + shell: ssh-keyscan {{ secondary_private_ip }} >> /home/jingrow/.ssh/known_hosts + +- name: Install Lsyncd + apt: + state: present + pkg: + - lsyncd + +- name: Create Lsyncd Directory + become: yes + become_user: jingrow + file: + dest: /home/jingrow/lsyncd + state: directory + +- name: Create Lsyncd Log and Status File + become: yes + become_user: jingrow + file: + dest: /home/jingrow/lsyncd/{{ item }} + state: touch + with_items: + - lsyncd.logs + - lsyncd.status + +- name: Create Lsynd Config File + become: yes + become_user: jingrow + template: + src: lsyncd.conf + dest: /home/jingrow/lsyncd/lsyncd.conf.lua + +- name: Setup lsyncd service for jingrow user + template: + src: lsyncd.service + dest: /etc/systemd/system/lsyncd.service + owner: root + group: root + mode: 0644 + +- name: Restart Lsyncd service + systemd: + daemon_reload: true + name: lsyncd + state: restarted + enabled: True diff --git a/jcloud/playbooks/roles/primary_proxy/templates/lsyncd.conf b/jcloud/playbooks/roles/primary_proxy/templates/lsyncd.conf new file mode 100644 index 0000000..bc503da --- /dev/null +++ b/jcloud/playbooks/roles/primary_proxy/templates/lsyncd.conf @@ -0,0 +1,17 @@ +settings { + logfile = "/home/jingrow/lsyncd/lsyncd.logs", + statusFile = "/home/jingrow/lsyncd/lsyncd.status" +} + +sync { + default.rsyncssh, + source="/home/jingrow/agent/nginx", + host="{{ secondary_private_ip }}", + targetdir="/home/jingrow/agent/nginx", + rsync = { + archive = true, + acls = true, + xattrs = true, + compress = true, + }, +} \ No newline at end of file diff --git a/jcloud/playbooks/roles/primary_proxy/templates/lsyncd.service b/jcloud/playbooks/roles/primary_proxy/templates/lsyncd.service new file mode 100644 index 0000000..dd5b4a8 --- /dev/null +++ b/jcloud/playbooks/roles/primary_proxy/templates/lsyncd.service @@ -0,0 +1,12 @@ +[Unit] +Description=Live Syncing (Mirror) Daemon +After=network-online.target + +[Service] +Type=simple +User=jingrow +Group=jingrow +ExecStart=/usr/bin/lsyncd -nodaemon /home/jingrow/lsyncd/lsyncd.conf.lua + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/jcloud/playbooks/roles/process_exporter/tasks/main.yml b/jcloud/playbooks/roles/process_exporter/tasks/main.yml new file mode 100644 index 0000000..0b76702 --- /dev/null +++ b/jcloud/playbooks/roles/process_exporter/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- name: Create Processs Exporter Directory + file: + path: /opt/process_exporter + state: directory + mode: 0755 + +- name: Set Architecture + set_fact: + arch: "{{'amd64' if (ansible_architecture == 'x86_64') else 'arm64'}}" + +- name: Download Processs Exporter Archive + unarchive: + src: "https://github.com/ncabatoff/process-exporter/releases/download/v0.8.3/process-exporter-0.8.3.linux-{{ arch }}.tar.gz" + dest: /tmp + remote_src: yes + +- name: Copy Processs Exporter Binary + copy: + src: "/tmp/process-exporter-0.8.3.linux-{{ arch }}/process-exporter" + dest: /opt/process_exporter/process_exporter + remote_src: yes + mode: 0755 + owner: root + group: root + +- name: Create Process Expoter Config + template: + src: config.yml + dest: /opt/process_exporter/config.yml + owner: root + group: root + mode: 0644 + +- name: Create Processs Exporter Systemd Service File + template: + src: process_exporter.service + dest: /etc/systemd/system/process_exporter.service + owner: root + group: root + mode: 0644 + +- name: Restart Processs Exporter Service + systemd: + daemon_reload: true + name: process_exporter + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/process_exporter/templates/config.yml b/jcloud/playbooks/roles/process_exporter/templates/config.yml new file mode 100644 index 0000000..5955203 --- /dev/null +++ b/jcloud/playbooks/roles/process_exporter/templates/config.yml @@ -0,0 +1,7 @@ +process_names: + - name: '{% raw %}rq:job:{{.Matches.fn}}{% endraw %}' + cmdline: + - "rq: Started running (?P\\S+) .+" + - name: '{% raw %}{{.Comm}}{% endraw %}' + cmdline: + - '.+' diff --git a/jcloud/playbooks/roles/process_exporter/templates/process_exporter.service b/jcloud/playbooks/roles/process_exporter/templates/process_exporter.service new file mode 100644 index 0000000..54ea491 --- /dev/null +++ b/jcloud/playbooks/roles/process_exporter/templates/process_exporter.service @@ -0,0 +1,18 @@ +[Unit] +Description=Prometheus Process Exporter +After=network-online.target + +[Service] +Type=simple +ExecStart=/opt/process_exporter/process_exporter \ + --config.path /opt/process_exporter/config.yml \ + --threads=false \ + --web.listen-address=127.0.0.1:9256 + +SyslogIdentifier=process_exporter +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/prometheus/tasks/main.yml b/jcloud/playbooks/roles/prometheus/tasks/main.yml new file mode 100644 index 0000000..9297a21 --- /dev/null +++ b/jcloud/playbooks/roles/prometheus/tasks/main.yml @@ -0,0 +1,131 @@ +--- +- name: Create Prometheus Directories + become: yes + become_user: jingrow + file: + path: '{{ item }}' + state: directory + mode: 0755 + with_items: + - /home/jingrow/prometheus + - /home/jingrow/prometheus/data + - /home/jingrow/prometheus/rules + - /home/jingrow/prometheus/file_sd + +- name: Download Prometheus Archive + become: yes + become_user: jingrow + unarchive: + src: https://github.com/prometheus/prometheus/releases/download/v2.49.1/prometheus-2.49.1.linux-amd64.tar.gz + dest: /tmp + remote_src: yes + +- name: Copy Prometheus Console Templates + become: yes + become_user: jingrow + copy: + src: '/tmp/prometheus-2.49.1.linux-amd64/{{ item }}/' + dest: '/home/jingrow/prometheus/{{ item }}/' + mode: 0755 + remote_src: yes + with_items: + - console_libraries + - consoles + +- name: Copy Prometheus and Promtool Binaries + become: yes + become_user: jingrow + copy: + src: '/tmp/prometheus-2.49.1.linux-amd64/{{ item }}' + dest: '/home/jingrow/prometheus/{{ item }}' + mode: 0755 + remote_src: yes + with_items: + - prometheus + - promtool + +- name: Configure Prometheus Service Discovery for Monitor Server + become: yes + become_user: jingrow + template: + src: self.yml + dest: /home/jingrow/prometheus/file_sd/self.yml + force: true + mode: 0600 + +- name: Configure Prometheus Service Discovery for Jcloud Server + become: yes + become_user: jingrow + template: + src: jcloud.yml.j2 + dest: /home/jingrow/prometheus/file_sd/jcloud.yml + force: true + mode: 0600 + +- name: Set JSON Variables + set_fact: + registries: '{{ registries_json | from_json }}' + clusters: '{{ clusters_json | from_json }}' + log_servers: '{{ log_servers_json | from_json }}' + +- name: Configure Prometheus Service Discovery for Registries + become: yes + become_user: jingrow + template: + src: registry.yml + dest: /home/jingrow/prometheus/file_sd/registry.{{ item.name }}.yml + force: true + mode: 0600 + loop: '{{ registries }}' + +- name: Configure Prometheus Service Discovery for Log Servers + become: yes + become_user: jingrow + template: + src: log.yml + dest: /home/jingrow/prometheus/file_sd/log.{{ item.name }}.yml + force: true + mode: 0600 + loop: '{{ log_servers }}' + +- name: Configure Prometheus + template: + src: prometheus.yml + dest: /home/jingrow/prometheus/prometheus.yml + validate: '/home/jingrow/prometheus/promtool check config %s' + +- name: Configure ownership permissions on prometheus.yml + file: + path: /home/jingrow/prometheus/prometheus.yml + owner: jingrow + group: jingrow + mode: 0600 + +- name: Create Prometheus Systemd Service File + template: + src: prometheus.service + dest: /etc/systemd/system/prometheus.service + owner: root + group: root + mode: 0644 + +- name: Restart Prometheus Service + systemd: + daemon_reload: true + name: prometheus + enabled: yes + state: restarted + +- name: Setup Agent Based Discovery + become: yes + become_user: jingrow + command: '/home/jingrow/agent/env/bin/agent setup monitor --url {{ jcloud_url }} --token {{ monitor_token }}' + args: + chdir: /home/jingrow/agent + +- name: Setup Cronjob for Agent Based Discovery + become: yes + become_user: jingrow + cron: + name: agent discovery + job: cd /home/jingrow/agent && /home/jingrow/agent/env/bin/agent discover diff --git a/jcloud/playbooks/roles/prometheus/templates/jcloud.yml.j2 b/jcloud/playbooks/roles/prometheus/templates/jcloud.yml.j2 new file mode 100644 index 0000000..553b24f --- /dev/null +++ b/jcloud/playbooks/roles/prometheus/templates/jcloud.yml.j2 @@ -0,0 +1,66 @@ +- targets: + - '{{ jcloud_app_server }}' + - '{{ jcloud_db_server }}' + {%- if jcloud_db_replica_server +%} + - '{{ jcloud_db_replica_server }}' + {%- endif +%} + labels: + job: node + __metrics_path__: /metrics/node + +- targets: + - '{{ jcloud_app_server }}' + labels: + job: nginx + __metrics_path__: /metrics/nginx + +- targets: + - '{{ jcloud_app_server }}' + labels: + job: cadvisor + __metrics_path__: /metrics/cadvisor + +- targets: + - '{{ jcloud_db_server }}' + {%- if jcloud_db_replica_server +%} + - '{{ jcloud_db_replica_server }}' + {%- endif +%} + labels: + job: mariadb + __metrics_path__: /metrics/mariadb + +- targets: + - '{{ jcloud_app_server }}' + labels: + job: docker + __metrics_path__: /metrics/docker + +- targets: + - '{{ jcloud_app_server }}' + labels: + job: gunicorn + __metrics_path__: /metrics/gunicorn + +- targets: + - '{{ jcloud_app_server }}' + labels: + job: process + __metrics_path__: /metrics/process + +- targets: + - '{{ jcloud_app_server }}' + labels: + job: rq + __metrics_path__: /metrics/rq + +- targets: + - '{{ jcloud_app_server }}' + labels: + job: redis + __metrics_path__: /metrics/redis + +- targets: + - '{{ jcloud_app_server }}' + labels: + job: jcloud + __metrics_path__: /metrics/jcloud diff --git a/jcloud/playbooks/roles/prometheus/templates/log.yml b/jcloud/playbooks/roles/prometheus/templates/log.yml new file mode 100644 index 0000000..aad3555 --- /dev/null +++ b/jcloud/playbooks/roles/prometheus/templates/log.yml @@ -0,0 +1,17 @@ +- targets: + - "{{ item.name }}" + labels: + job: node + __metrics_path__: /metrics/node + +- targets: + - "{{ item.name }}" + labels: + job: nginx + __metrics_path__: /metrics/nginx + +- targets: + - "{{ item.name }}" + labels: + job: elasticsearch + __metrics_path__: /metrics/elasticsearch diff --git a/jcloud/playbooks/roles/prometheus/templates/prometheus.service b/jcloud/playbooks/roles/prometheus/templates/prometheus.service new file mode 100644 index 0000000..f608993 --- /dev/null +++ b/jcloud/playbooks/roles/prometheus/templates/prometheus.service @@ -0,0 +1,34 @@ +[Unit] +Description=Prometheus +After=network-online.target + +[Service] +Type=simple +User=jingrow +Group=jingrow + +PIDFile=/var/run/prometheus.pid +ExecReload=/bin/kill -HUP $MAINPID + +ExecStart=/home/jingrow/prometheus/prometheus \ + --config.file=/home/jingrow/prometheus/prometheus.yml \ + --enable-feature=promql-negative-offset \ + --query.timeout=10m \ + --query.max-concurrency=50 \ + --storage.tsdb.path={{ prometheus_data_directory }} \ + --storage.tsdb.retention.time=180d \ + --web.console.libraries=/home/jingrow/prometheus/console_libraries \ + --web.console.templates=/home/jingrow/prometheus/consoles \ + --web.listen-address=127.0.0.1:9090 \ + --web.external-url=https://{{ server }}/prometheus + +SyslogIdentifier=prometheus +Restart=always +RestartSec=1 +StartLimitInterval=0 + +LimitNOFILE=1024000 +LimitNOFILESoft=102400 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/prometheus/templates/prometheus.yml b/jcloud/playbooks/roles/prometheus/templates/prometheus.yml new file mode 100644 index 0000000..a8dbb4f --- /dev/null +++ b/jcloud/playbooks/roles/prometheus/templates/prometheus.yml @@ -0,0 +1,154 @@ +#jinja2:block_start_string:'##', block_end_string:'##', trim_blocks: False + +global: + scrape_interval: 15s + scrape_timeout: 10s + evaluation_interval: 15s + +rule_files: + - /home/jingrow/prometheus/rules/*.yml + +alerting: + alertmanagers: + - scheme: https + path_prefix: /alertmanager + static_configs: + - targets: ['{{ server }}'] + basic_auth: + username: jingrow + password: '{{ grafana_password }}' + +scrape_configs: + - job_name: self + scrape_interval: 1m + scheme: https + basic_auth: + username: jingrow + password: '{{ monitoring_password }}' + relabel_configs: + - source_labels: [__address__] + target_label: instance + file_sd_configs: + - files: + - /home/jingrow/prometheus/file_sd/self.yml + + - job_name: jcloud + scrape_interval: 1m + scheme: https + basic_auth: + username: jingrow + password: '{{ jcloud_monitoring_password }}' + relabel_configs: + - source_labels: [__address__] + target_label: instance + file_sd_configs: + - files: + - /home/jingrow/prometheus/file_sd/jcloud.yml + + - job_name: site + scrape_interval: 1m + metrics_path: /blackbox/probe + scheme: https + params: + module: [http_2xx] + basic_auth: + username: jingrow + password: '{{ grafana_password }}' + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + regex: 'https://(.*)/api/method/ping' + - target_label: __address__ + replacement: '{{ server }}' + file_sd_configs: + - files: + - /home/jingrow/prometheus/file_sd/sites.yml + + - job_name: domain + scrape_interval: 15m + metrics_path: /blackbox/probe + scheme: https + params: + module: [http_2xx] + basic_auth: + username: jingrow + password: '{{ grafana_password }}' + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + regex: 'https://(.*)/api/method/ping' + - target_label: __address__ + replacement: '{{ server }}' + file_sd_configs: + - files: + - /home/jingrow/prometheus/file_sd/domains.yml + + - job_name: tls + scrape_interval: 15m + metrics_path: /blackbox/probe + scheme: https + params: + module: [http_2xx] + basic_auth: + username: jingrow + password: '{{ grafana_password }}' + relabel_configs: + - source_labels: [__address__] + target_label: __param_target + - source_labels: [__param_target] + target_label: instance + regex: 'https://(.*)' + - target_label: __address__ + replacement: '{{ server }}' + file_sd_configs: + - files: + - /home/jingrow/prometheus/file_sd/tls.yml + + ## for registry in registries ## + - job_name: '{{ registry.name }}' + scrape_interval: 1m + scheme: https + basic_auth: + username: jingrow + password: '{{ registry.monitoring_password }}' + relabel_configs: + - source_labels: [__address__] + target_label: instance + file_sd_configs: + - files: + - '/home/jingrow/prometheus/file_sd/registry.{{ registry.name }}.yml' + ## endfor ## + + ## for log in log_servers ## + - job_name: '{{ log.name }}' + scrape_interval: 1m + scheme: https + basic_auth: + username: jingrow + password: '{{ log.monitoring_password }}' + relabel_configs: + - source_labels: [__address__] + target_label: instance + file_sd_configs: + - files: + - '/home/jingrow/prometheus/file_sd/log.{{ log.name }}.yml' + ## endfor ## + + ## for cluster in clusters ## + - job_name: 'cluster.{{ cluster.name }}' + scrape_interval: 1m + scheme: https + basic_auth: + username: jingrow + password: '{{ cluster.monitoring_password }}' + relabel_configs: + - source_labels: [__address__] + target_label: instance + file_sd_configs: + - files: + - '/home/jingrow/prometheus/file_sd/cluster.{{ cluster.name }}.yml' + ## endfor ## diff --git a/jcloud/playbooks/roles/prometheus/templates/registry.yml b/jcloud/playbooks/roles/prometheus/templates/registry.yml new file mode 100644 index 0000000..b89cb21 --- /dev/null +++ b/jcloud/playbooks/roles/prometheus/templates/registry.yml @@ -0,0 +1,29 @@ +- targets: + - "{{ item.name }}" + labels: + job: node + __metrics_path__: /metrics/node + +- targets: + - "{{ item.name }}" + labels: + job: nginx + __metrics_path__: /metrics/nginx + +- targets: + - "{{ item.name }}" + labels: + job: cadvisor + __metrics_path__: /metrics/cadvisor + +- targets: + - "{{ item.name }}" + labels: + job: docker + __metrics_path__: /metrics/docker + +- targets: + - "{{ item.name }}" + labels: + job: registry + __metrics_path__: /metrics/registry diff --git a/jcloud/playbooks/roles/prometheus/templates/self.yml b/jcloud/playbooks/roles/prometheus/templates/self.yml new file mode 100644 index 0000000..80f577f --- /dev/null +++ b/jcloud/playbooks/roles/prometheus/templates/self.yml @@ -0,0 +1,35 @@ +- targets: + - "{{ server }}" + labels: + job: node + __metrics_path__: /metrics/node + +- targets: + - "{{ server }}" + labels: + job: nginx + __metrics_path__: /metrics/nginx + +- targets: + - "{{ server }}" + labels: + job: prometheus + __metrics_path__: /metrics/prometheus + +- targets: + - "{{ server }}" + labels: + job: alertmanager + __metrics_path__: /metrics/alertmanager + +- targets: + - "{{ server }}" + labels: + job: blackbox + __metrics_path__: /metrics/blackbox + +- targets: + - "{{ server }}" + labels: + job: grafana + __metrics_path__: /metrics/grafana diff --git a/jcloud/playbooks/roles/proxy/tasks/main.yml b/jcloud/playbooks/roles/proxy/tasks/main.yml new file mode 100644 index 0000000..d85dd33 --- /dev/null +++ b/jcloud/playbooks/roles/proxy/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- name: Create Agent NGINX Hosts Directory + become: yes + become_user: jingrow + file: + path: /home/jingrow/agent/nginx/hosts + state: directory + +- name: Create Agent NGINX Upstream Directory + become: yes + become_user: jingrow + file: + path: /home/jingrow/agent/nginx/upstreams + state: directory + +- name: Create NGINX Proxy Configuration File + become: yes + become_user: jingrow + file: + path: /home/jingrow/agent/nginx/proxy.conf + state: touch + +- name: Symlink NGINX Proxy Configuration File + file: + src: /home/jingrow/agent/nginx/proxy.conf + dest: /etc/nginx/conf.d/proxy.conf + state: link + force: yes + follow: no + +- name: Setup Agent Proxy + become: yes + become_user: jingrow + command: '/home/jingrow/agent/env/bin/agent setup proxy --domain {{ domain }} --jcloud-url {{ jcloud_url }}' + args: + chdir: /home/jingrow/agent diff --git a/jcloud/playbooks/roles/proxysql/tasks/main.yml b/jcloud/playbooks/roles/proxysql/tasks/main.yml new file mode 100644 index 0000000..4c00abf --- /dev/null +++ b/jcloud/playbooks/roles/proxysql/tasks/main.yml @@ -0,0 +1,68 @@ +--- +- name: Create ProxySQL Directory + become: yes + become_user: jingrow + file: + dest: /home/jingrow/proxysql + state: directory + +- name: Copy ProxySQL TLS (Private Key) + copy: + src: /home/jingrow/agent/tls/privkey.pem + dest: /home/jingrow/proxysql/proxysql-key.pem + mode: 0600 + remote_src: yes + +- name: Copy ProxySQL TLS (CA Certificate) + copy: + src: /home/jingrow/agent/tls/chain.pem + dest: /home/jingrow/proxysql/proxysql-ca.pem + mode: 0600 + remote_src: yes + +- name: Copy ProxySQL TLS (Server Certificate) + copy: + src: /home/jingrow/agent/tls/fullchain.pem + dest: /home/jingrow/proxysql/proxysql-cert.pem + mode: 0600 + remote_src: yes + +- name: Create ProxySQL config + template: + src: proxysql.cnf + dest: /home/jingrow/proxysql/proxysql.cnf + mode: 0644 + +- name: Start ProxySQL + become: yes + become_user: jingrow + command: "docker run -d --name proxysql --hostname proxysql --restart always -p 3306:6033 -p 127.0.0.1:6032:6032 -p 127.0.0.1:6070:6070 -v /home/jingrow/proxysql:/var/lib/proxysql -v /home/jingrow/proxysql/proxysql.cnf:/etc/proxysql.cnf proxysql/proxysql:2.3.2" + +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install MariaDB Client + apt: + pkg: + - mariadb-client + - python3-mysqldb + state: present + +- name: Enable ProxySQL Auditing + mysql_query: + login_user: jingrow + login_password: "{{ proxysql_admin_password }}" + login_host: 127.0.0.1 + login_port: 6032 + query: + - INSERT INTO mysql_query_rules(active, match_digest, log) VALUES (1, '.', 1); + - LOAD MYSQL QUERY RULES TO RUNTIME; + - SAVE MYSQL QUERY RULES TO DISK; + +- name: Setup Agent ProxySQL + become: yes + become_user: jingrow + command: "/home/jingrow/agent/env/bin/agent setup proxysql --password {{ proxysql_admin_password }}" + args: + chdir: /home/jingrow/agent diff --git a/jcloud/playbooks/roles/proxysql/templates/proxysql.cnf b/jcloud/playbooks/roles/proxysql/templates/proxysql.cnf new file mode 100644 index 0000000..c334b99 --- /dev/null +++ b/jcloud/playbooks/roles/proxysql/templates/proxysql.cnf @@ -0,0 +1,57 @@ +datadir="/var/lib/proxysql" +errorlog="/var/lib/proxysql/proxysql.log" + +admin_variables= +{ + admin_credentials="jingrow:{{ proxysql_admin_password }}" + mysql_ifaces="0.0.0.0:6032" + hash_passwords=true + + restapi_enabled=true + prometheus_memory_metrics_interval=60 +} + +mysql_variables= +{ + interfaces="0.0.0.0:6033" + default_schema="information_schema" + stacksize=1048576 + server_version="5.5.30" + threads=4 + have_compress=true + + connect_timeout_server=3000 + connect_retries_on_failure=10 + + monitor_username="monitor" + monitor_password="monitor" + monitor_history=600000 + monitor_connect_interval=60000 + monitor_ping_interval=10000 + monitor_read_only_interval=1500 + monitor_read_only_timeout=500 + ping_interval_server_msec=120000 + ping_timeout_server=500 + + commands_stats=true + sessions_sort=true + + have_ssl=true + + auditlog_filename="/var/lib/proxysql/audit.log" + auditlog_filesize=104857600 + + eventslog_filename="/var/lib/proxysql/events.log" + eventslog_filesize=104857600 + eventslog_format=2 + eventslog_default_log=true + + poll_timeout=2000 + max_connections=2048 + default_query_delay=0 + default_query_timeout=300000 + max_transaction_time=1200000 + wait_timeout=300000 + default_sql_safe_updates="ON" +} + diff --git a/jcloud/playbooks/roles/proxysql_monitor/tasks/main.yml b/jcloud/playbooks/roles/proxysql_monitor/tasks/main.yml new file mode 100644 index 0000000..da4d7d8 --- /dev/null +++ b/jcloud/playbooks/roles/proxysql_monitor/tasks/main.yml @@ -0,0 +1,73 @@ +--- +- name: Create MySQL Exporter Directory + file: + path: /opt/mysqld_exporter + state: directory + mode: 0755 + +- name: Set Architecture + set_fact: + arch: "{{'amd64' if (ansible_architecture == 'x86_64') else 'arm64'}}" + +- name: Download MySQL Exporter Archive + unarchive: + src: "https://github.com/prometheus/mysqld_exporter/releases/download/v0.15.1/mysqld_exporter-0.15.1.linux-{{ arch }}.tar.gz" + dest: /tmp + remote_src: yes + +- name: Copy MySQL Exporter Binary + copy: + src: "/tmp/mysqld_exporter-0.15.1.linux-{{ arch }}/mysqld_exporter" + dest: /opt/mysqld_exporter/mysqld_exporter + remote_src: yes + mode: 0755 + owner: root + group: root + +- name: Create MySQL Client Config + template: + src: monitor.cnf + dest: /home/jingrow/proxysql/monitor.cnf + owner: root + group: root + mode: 0644 + +- name: Remove ProxySQL Monitor User + mysql_query: + login_user: jingrow + login_password: "{{ proxysql_admin_password }}" + login_host: 127.0.0.1 + login_port: 6032 + query: + - DELETE FROM mysql_users WHERE username = "monitor" + - LOAD MYSQL USERS TO RUNTIME + - SAVE MYSQL USERS FROM RUNTIME + - SAVE MYSQL USERS TO DISK + + +- name: Create ProxySQL Monitoring User + mysql_query: + login_user: jingrow + login_password: "{{ proxysql_admin_password }}" + login_host: 127.0.0.1 + login_port: 6032 + query: + - "INSERT INTO mysql_users (username, password, default_hostgroup, default_schema, use_ssl, max_connections) VALUES ('monitor', 'monitor', {{ default_hostgroup }}, NULL, 1, 16)" + - LOAD MYSQL USERS TO RUNTIME + - SAVE MYSQL USERS FROM RUNTIME + - SAVE MYSQL USERS TO DISK + +- name: Create MySQL Exporter Systemd Service File + template: + src: mysqld_exporter.service + dest: /etc/systemd/system/mysqld_exporter.service + owner: root + group: root + mode: 0644 + +- name: Restart MySQL Exporter Service + systemd: + daemon_reload: true + name: mysqld_exporter + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/proxysql_monitor/templates/monitor.cnf b/jcloud/playbooks/roles/proxysql_monitor/templates/monitor.cnf new file mode 100644 index 0000000..99b1d32 --- /dev/null +++ b/jcloud/playbooks/roles/proxysql_monitor/templates/monitor.cnf @@ -0,0 +1,7 @@ +[client] +user=monitor +password=monitor +host={{ server }} +ssl-verify-server-cert +ssl-verify-identity +ssl-ca=/usr/share/ca-certificates/mozilla/ISRG_Root_X1.crt \ No newline at end of file diff --git a/jcloud/playbooks/roles/proxysql_monitor/templates/mysqld_exporter.service b/jcloud/playbooks/roles/proxysql_monitor/templates/mysqld_exporter.service new file mode 100644 index 0000000..a362ce0 --- /dev/null +++ b/jcloud/playbooks/roles/proxysql_monitor/templates/mysqld_exporter.service @@ -0,0 +1,16 @@ +[Unit] +Description=Prometheus MySQL Exporter +After=network-online.target + +[Service] +Type=simple +ExecStart=/opt/mysqld_exporter/mysqld_exporter --config.my-cnf=/home/jingrow/proxysql/monitor.cnf \ + --web.listen-address=127.0.0.1:9104 + +SyslogIdentifier=mysqld_exporter +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/pt_stalk/tasks/main.yml b/jcloud/playbooks/roles/pt_stalk/tasks/main.yml new file mode 100644 index 0000000..0cb23f2 --- /dev/null +++ b/jcloud/playbooks/roles/pt_stalk/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: Install Percona Toolkit Dependencies + apt: + pkg: + - net-tools # Needed for netstat + state: present + +- name: Install Percona Toolkit + apt: + deb: https://downloads.percona.com/downloads/percona-toolkit/3.5.7/binary/debian/{{ ansible_distribution_release }}/x86_64/percona-toolkit_3.5.7-1.{{ ansible_distribution_release }}_amd64.deb + +- name: Fix a bug in pt-stalk + ansible.builtin.replace: + path: /usr/bin/pt-stalk + regexp: 'cut -f 3 ' # lsof returns name, pid, user, ... . We need pid so 2nd field not 3rd + replace: 'cut -f 2 ' + +- name: Enable Full GDB Backtrace + ansible.builtin.replace: + path: /usr/bin/pt-stalk + regexp: '-ex "thread apply all bt" \\' + replace: '-ex "set print frame-arguments all" -ex "thread apply all bt full" \\' + +- name: Create Stalk Systemd Service File + template: + src: pt_stalk.service + dest: /etc/systemd/system/pt_stalk.service + owner: root + group: root + mode: 0644 + +- name: Restart Stalk Service + systemd: + daemon_reload: true + name: pt_stalk + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/pt_stalk/templates/pt_stalk.service b/jcloud/playbooks/roles/pt_stalk/templates/pt_stalk.service new file mode 100644 index 0000000..76a947d --- /dev/null +++ b/jcloud/playbooks/roles/pt_stalk/templates/pt_stalk.service @@ -0,0 +1,26 @@ +[Unit] +Description=Percona Stalk +After=network-online.target + +[Service] +Type=simple +ExecReload=/bin/kill -HUP $MAINPID +ExecStart=/usr/bin/pt-stalk --defaults-file /root/.my.cnf \ + --host {{ private_ip }} \ + --port {{ mariadb_port }} \ + {% if stalk_gdb_collector | bool %} --collect-gdb {% endif %} \ + {% if stalk_strace_collector | bool %} --collect-strace {% endif %} \ + --interval {{ stalk_interval }} \ + --cycles {{ stalk_cycles }} \ + --sleep {{ stalk_sleep }} \ + --function {{ stalk_function }} \ + --variable {{ stalk_variable }} \ + --threshold {{ stalk_threshold }} + +SyslogIdentifier=pt_stalk +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/reconfigure_prometheus/tasks/main.yml b/jcloud/playbooks/roles/reconfigure_prometheus/tasks/main.yml new file mode 100644 index 0000000..99b6f5e --- /dev/null +++ b/jcloud/playbooks/roles/reconfigure_prometheus/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: Configure Prometheus Service Discovery for Jcloud Server + become: yes + become_user: jingrow + template: + src: ../../prometheus/templates/jcloud.yml.j2 + dest: /home/jingrow/prometheus/file_sd/jcloud.yml + force: true + mode: 0600 + +- name: Set JSON Variables + set_fact: + registries: "{{ registries_json | from_json }}" + clusters: "{{ clusters_json | from_json }}" + log_servers: "{{ log_servers_json | from_json }}" + +- name: Configure Prometheus Service Discovery for Registries + become: yes + become_user: jingrow + template: + src: ../../prometheus/templates/registry.yml + dest: /home/jingrow/prometheus/file_sd/registry.{{ item.name }}.yml + force: true + mode: 0600 + loop: "{{ registries }}" + +- name: Configure Prometheus Service Discovery for Log Servers + become: yes + become_user: jingrow + template: + src: ../../prometheus/templates/log.yml + dest: /home/jingrow/prometheus/file_sd/log.{{ item.name }}.yml + force: true + mode: 0600 + loop: "{{ log_servers }}" + +- name: Configure Prometheus + become: yes + become_user: jingrow + template: + src: ../../prometheus/templates/prometheus.yml + dest: /home/jingrow/prometheus/prometheus.yml + force: true + mode: 0600 + validate: "/home/jingrow/prometheus/promtool check config %s" + +- name: Reload Prometheus Service + systemd: + name: prometheus + state: reloaded diff --git a/jcloud/playbooks/roles/redis_exporter/tasks/main.yml b/jcloud/playbooks/roles/redis_exporter/tasks/main.yml new file mode 100644 index 0000000..dbdb081 --- /dev/null +++ b/jcloud/playbooks/roles/redis_exporter/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: Create Redis Exporter Directory + file: + path: /opt/redis_exporter + state: directory + mode: 0755 + +- name: Set Architecture + set_fact: + arch: "{{'amd64' if (ansible_architecture == 'x86_64') else 'arm64'}}" + +- name: Download Redis Exporter Archive + unarchive: + src: "https://github.com/oliver006/redis_exporter/releases/download/v1.67.0/redis_exporter-v1.67.0.linux-{{ arch }}.tar.gz" + dest: /tmp + remote_src: yes + +- name: Copy Redis Exporter Binary + copy: + src: "/tmp/redis_exporter-v1.67.0.linux-{{ arch }}/redis_exporter" + dest: /opt/redis_exporter/redis_exporter + remote_src: yes + mode: 0755 + owner: root + group: root + +- name: Create Redis Exporter Systemd Service File + template: + src: redis_exporter.service + dest: /etc/systemd/system/redis_exporter.service + owner: root + group: root + mode: 0644 + +- name: Start Redis Exporter Service + systemd: + daemon_reload: true + name: redis_exporter + enabled: yes + state: started diff --git a/jcloud/playbooks/roles/redis_exporter/templates/redis_exporter.service b/jcloud/playbooks/roles/redis_exporter/templates/redis_exporter.service new file mode 100644 index 0000000..81e080e --- /dev/null +++ b/jcloud/playbooks/roles/redis_exporter/templates/redis_exporter.service @@ -0,0 +1,14 @@ +[Unit] +Description=Prometheus Redis Exporter +After=network-online.target + +[Service] +Type=simple +ExecStart=/opt/redis_exporter/redis_exporter --web.listen-address 127.0.0.1:9121 --redis.addr redis://127.0.0.1:13000 +SyslogIdentifier=redis_exporter +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/registry/files/registry-ui.sh b/jcloud/playbooks/roles/registry/files/registry-ui.sh new file mode 100644 index 0000000..e8fa8ae --- /dev/null +++ b/jcloud/playbooks/roles/registry/files/registry-ui.sh @@ -0,0 +1,8 @@ +docker run -d --name registry-ui \ + --restart always \ + --net container:registry \ + -e REGISTRY_TITLE="Jingrow Registry" \ + -e NGINX_PROXY_PASS_URL=http://127.0.0.1:5000 \ + -e DELETE_IMAGES=true \ + -e SINGLE_REGISTRY=true \ + joxit/docker-registry-ui:latest diff --git a/jcloud/playbooks/roles/registry/files/registry.sh b/jcloud/playbooks/roles/registry/files/registry.sh new file mode 100644 index 0000000..50cf9da --- /dev/null +++ b/jcloud/playbooks/roles/registry/files/registry.sh @@ -0,0 +1,11 @@ +docker run -d --name registry \ + --restart always \ + -e REGISTRY_LOG_ACCESSLOG_DISABLED=false \ + -e REGISTRY_HTTP_DEBUG_ADDR=":5001" \ + -e REGISTRY_HTTP_DEBUG_PROMETHEUS_ENABLED=true \ + -e REGISTRY_STORAGE_DELETE_ENABLED=true \ + -p 127.0.0.1:5000:5000 \ + -p 127.0.0.1:5001:5001 \ + -p 127.0.0.1:6000:80 \ + -v /home/jingrow/registry/data:/var/lib/registry \ + registry:2 diff --git a/jcloud/playbooks/roles/registry/tasks/main.yml b/jcloud/playbooks/roles/registry/tasks/main.yml new file mode 100644 index 0000000..84223b9 --- /dev/null +++ b/jcloud/playbooks/roles/registry/tasks/main.yml @@ -0,0 +1,42 @@ +--- +- name: Create Registry Directory + become: yes + become_user: jingrow + file: + dest: /home/jingrow/registry + state: directory + +- name: Create Registry Data Directory + become: yes + become_user: jingrow + file: + dest: /home/jingrow/registry/data + state: directory + +- name: Stop and Remove old Docker Registry UI Container + shell: docker stop registry-ui; docker rm registry-ui + ignore_errors: true + +- name: Stop and Remove old Docker Registry Container + shell: docker stop registry; docker rm registry + ignore_errors: true + +- name: Start Docker Registry + script: files/registry.sh + +- name: Start Docker Registry UI + script: files/registry-ui.sh + +- name: Setup Registry Authentication + become: yes + become_user: jingrow + command: 'htpasswd -Bbc registry.htpasswd {{ registry_username }} {{ registry_password }}' + args: + chdir: /home/jingrow/registry + +- name: Setup NGINX for Registry + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup registry + args: + chdir: /home/jingrow/agent diff --git a/jcloud/playbooks/roles/reload_wireguard/tasks/main.yml b/jcloud/playbooks/roles/reload_wireguard/tasks/main.yml new file mode 100644 index 0000000..fea5d6c --- /dev/null +++ b/jcloud/playbooks/roles/reload_wireguard/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Regenerate Wireguard Config + template: + src: wg.conf + dest: /etc/wireguard/wg0.conf + owner: root + group: root + mode: 0600 + +- name: Reload Wireguard with systemd + systemd: + name: wg-quick@wg0 + state: reloaded \ No newline at end of file diff --git a/jcloud/playbooks/roles/reload_wireguard/templates/wg.conf b/jcloud/playbooks/roles/reload_wireguard/templates/wg.conf new file mode 100644 index 0000000..3bf3ffa --- /dev/null +++ b/jcloud/playbooks/roles/reload_wireguard/templates/wg.conf @@ -0,0 +1,26 @@ +[Interface] +Address = {{ wireguard_network }} +ListenPort = {{ wireguard_port }} +PrivateKey = {{ wireguard_private_key }} + +PreUp = sysctl -w net.ipv4.ip_forward=1 + +PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o {{ interface_id }} -j MASQUERADE +PostUp = iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu + +PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o {{ interface_id }} -j MASQUERADE +PostDown = sysctl -w net.ipv4.ip_forward=0 + +{% if peers %} +{% for peer in (peers | from_json) %} +[Peer] +# {{ peer.name }} +{% if peer.peer_ip %} +Endpoint = {{peer.peer_ip}}:{{wireguard_port}} +{% endif %} +PublicKey = {{ peer.public_key }} +AllowedIPs = {{ peer.allowed_ips}} +PersistentKeepalive = 25 + +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/jcloud/playbooks/roles/restart_mysql/tasks/main.yml b/jcloud/playbooks/roles/restart_mysql/tasks/main.yml new file mode 100644 index 0000000..e838041 --- /dev/null +++ b/jcloud/playbooks/roles/restart_mysql/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: Restart MariaDB Service + systemd: + name: mysql + state: restarted diff --git a/jcloud/playbooks/roles/rq_exporter/tasks/main.yml b/jcloud/playbooks/roles/rq_exporter/tasks/main.yml new file mode 100644 index 0000000..195e80d --- /dev/null +++ b/jcloud/playbooks/roles/rq_exporter/tasks/main.yml @@ -0,0 +1,23 @@ +--- +- name: Install RQ Exporter Python Package + become: yes + become_user: jingrow + pip: + name: rq-exporter + virtualenv: /home/jingrow/jingrow-bench/env + virtualenv_python: /home/jingrow/jingrow-bench/env/bin/python + +- name: Create RQ Exporter Systemd Service File + template: + src: rq-exporter.service + dest: /etc/systemd/system/rq-exporter.service + owner: root + group: root + mode: 0644 + +- name: Start RQ Exporter Service + systemd: + daemon_reload: true + name: rq-exporter + enabled: yes + state: started diff --git a/jcloud/playbooks/roles/rq_exporter/templates/rq-exporter.service b/jcloud/playbooks/roles/rq_exporter/templates/rq-exporter.service new file mode 100644 index 0000000..9ba49a8 --- /dev/null +++ b/jcloud/playbooks/roles/rq_exporter/templates/rq-exporter.service @@ -0,0 +1,14 @@ +[Unit] +Description=Prometheus RQ Exporter +After=network-online.target + +[Service] +Type=simple +ExecStart=/home/jingrow/jingrow-bench/env/bin/rq-exporter --host 127.0.0.1 --port 9726 --redis-host 127.0.0.1 --redis-port 11000 +SyslogIdentifier=rq-exporter +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/scaleway/tasks/main.yml b/jcloud/playbooks/roles/scaleway/tasks/main.yml new file mode 100644 index 0000000..930be3b --- /dev/null +++ b/jcloud/playbooks/roles/scaleway/tasks/main.yml @@ -0,0 +1,45 @@ +--- +- name: Copy Authorized Keys from Ubuntu to root User + copy: + src: /home/ubuntu/.ssh/authorized_keys + dest: /root/.ssh/authorized_keys + mode: 0600 + remote_src: yes + +- name: Create Netplan Configuration File + template: + src: netplan.yaml + dest: /etc/netplan/99-jcloud.yaml + +- name: Generate Netplan Configuration + shell: netplan generate + +- name: Apply Netplan Configuration + shell: netplan apply + +- name: Disable Password Authentication + lineinfile: + dest: /etc/ssh/sshd_config + regexp: "^PasswordAuthentication" + line: "PasswordAuthentication no" + state: present + +- name: Reload SSHD + service: + name: sshd + state: reloaded + +- name: Remove BIND + apt: + pkg: + - bind9 + state: absent + autoremove: yes + +- name: Remove Ubuntu User + user: + name: ubuntu + state: absent + remove: yes + force: yes + ignore_errors: yes diff --git a/jcloud/playbooks/roles/scaleway/templates/netplan.yaml b/jcloud/playbooks/roles/scaleway/templates/netplan.yaml new file mode 100644 index 0000000..d172513 --- /dev/null +++ b/jcloud/playbooks/roles/scaleway/templates/netplan.yaml @@ -0,0 +1,12 @@ +network: + version: 2 + ethernets: + private: + match: + macaddress: "{{ private_mac_address }}" + vlans: + private.{{ private_vlan_id }}: + id: "{{ private_vlan_id }}" + link: private + addresses: + - "{{ private_ip }}/16" diff --git a/jcloud/playbooks/roles/scaleway_dedibox/tasks/main.yml b/jcloud/playbooks/roles/scaleway_dedibox/tasks/main.yml new file mode 100644 index 0000000..8190c94 --- /dev/null +++ b/jcloud/playbooks/roles/scaleway_dedibox/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: Copy Authorized Keys from jingrow to root User + copy: + src: /home/jingrow/.ssh/authorized_keys + dest: /root/.ssh/authorized_keys + mode: 0600 + remote_src: yes + +- name: Create Netplan Configuration File + template: + src: netplan.yaml + dest: /etc/netplan/99-jcloud.yaml + +- name: Generate Netplan Configuration + shell: netplan generate + +- name: Apply Netplan Configuration + shell: netplan apply + +- name: Disable Password Authentication + lineinfile: + dest: /etc/ssh/sshd_config + regexp: "^PasswordAuthentication" + line: "PasswordAuthentication no" + state: present + +- name: Reload SSHD + service: + name: sshd + state: reloaded + +- name: Remove BIND + apt: + pkg: + - bind9 + state: absent + autoremove: yes diff --git a/jcloud/playbooks/roles/scaleway_dedibox/templates/netplan.yaml b/jcloud/playbooks/roles/scaleway_dedibox/templates/netplan.yaml new file mode 100644 index 0000000..d172513 --- /dev/null +++ b/jcloud/playbooks/roles/scaleway_dedibox/templates/netplan.yaml @@ -0,0 +1,12 @@ +network: + version: 2 + ethernets: + private: + match: + macaddress: "{{ private_mac_address }}" + vlans: + private.{{ private_vlan_id }}: + id: "{{ private_vlan_id }}" + link: private + addresses: + - "{{ private_ip }}/16" diff --git a/jcloud/playbooks/roles/secondary/tasks/main.yml b/jcloud/playbooks/roles/secondary/tasks/main.yml new file mode 100644 index 0000000..316b5b7 --- /dev/null +++ b/jcloud/playbooks/roles/secondary/tasks/main.yml @@ -0,0 +1,124 @@ +--- +- name: Install MariaBackup + apt: + pkg: mariadb-backup + +- name: Remove Primary from Known Hosts + known_hosts: + name: "{{ primary_private_ip }}" + state: absent + +- name: Add Primary to Known Hosts + shell: ssh-keyscan {{ primary_private_ip }} >> /root/.ssh/known_hosts + +- name: Create Directory for RSync + file: + path: /tmp/replica + state: directory + +- name: RSync Backup Directory From Primary + command: rsync -avpPR -e ssh\ -p22 root@{{ primary_private_ip }}:/tmp/replica / + async: 7200 + poll: 5 + +- name: Stop MariaDB Service + service: + name: mysql + state: stopped + +- name: Delete MariaDB Data Directory + file: + path: /var/lib/mysql + state: absent + +- name: Count Number of IBData Files + shell: 'cd /tmp/replica && find . -name "*.ibd" | wc -l' + register: ibdata_file_count + +- name: Calculate Open Files Limit + set_fact: + open_files_limit: "{{ [ibdata_file_count.stdout|int + 10000, 65535] | max }}" + +- name: Move Backup Directory to MariaDB Data Directory + command: mariabackup --copy-back --target-dir /tmp/replica --open-files-limit {{ open_files_limit }} --parallel 4 + async: 3600 + poll: 5 + +- name: Change Ownership of MariaDB Data Directory to mysql User + file: + path: /var/lib/mysql + owner: mysql + group: mysql + recurse: yes + +- name: Change Permissions of MariaDB Data Directory + file: + path: /var/lib/mysql + mode: 0755 + +- name: Set Open Files Count Limit for MariaDB + lineinfile: + dest: /lib/systemd/system/mariadb.service + regexp: '^LimitNOFILE(\s*)=(\s*)\d+' + line: "LimitNOFILE = infinity" + insertafter: '\[Service\]' + state: present + +- name: Force Systemd to Reread Configuration + systemd: + daemon_reload: yes + +- name: Start MariaDB Service + service: + name: mysql + state: started + +- name: Add .my.cnf MariaDB Configuration File + template: + src: my.cnf + dest: /root/.my.cnf + owner: root + group: root + mode: 0600 + +- name: Test Access to Primary + mysql_query: + login_user: root + login_password: "{{ mariadb_root_password }}" + query: "SHOW FULL PROCESSLIST" + +- name: Read XtraBackup Binlog Info + shell: cat /tmp/replica/xtrabackup_binlog_info + register: primary_position + +- name: Set Primary Position + mysql_variables: + mode: global + login_user: root + login_password: "{{ mariadb_root_password }}" + variable: gtid_slave_pos + value: "{{ primary_position.stdout.split()[2] }}" + +- name: Set Primary Details + mysql_replication: + mode: changemaster + login_user: root + login_password: "{{ mariadb_root_password }}" + master_host: "{{ primary_private_ip }}" + master_user: root + master_password: "{{ mariadb_root_password }}" + master_use_gtid: slave_pos + +- name: Start MariaDB Secondary Thread + mysql_replication: + mode: startslave + login_user: root + login_password: "{{ mariadb_root_password }}" + +- name: Check Secondary Status + mysql_replication: + mode: getslave + login_user: root + login_password: "{{ mariadb_root_password }}" + register: secondary_status + failed_when: 'secondary_status["Slave_IO_Running"] == "No" or secondary_status["Slave_SQL_Running"] == "No"' diff --git a/jcloud/playbooks/roles/secondary/templates/my.cnf b/jcloud/playbooks/roles/secondary/templates/my.cnf new file mode 100644 index 0000000..7ed7c99 --- /dev/null +++ b/jcloud/playbooks/roles/secondary/templates/my.cnf @@ -0,0 +1,5 @@ +[client] +host={{ private_ip }} +port=3306 +user=root +password={{ mariadb_root_password }} diff --git a/jcloud/playbooks/roles/secondary_app/tasks/main.yml b/jcloud/playbooks/roles/secondary_app/tasks/main.yml new file mode 100644 index 0000000..845172a --- /dev/null +++ b/jcloud/playbooks/roles/secondary_app/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: Add Primary Server Public Key to Authorized Keys + become: yes + become_user: jingrow + authorized_key: + user: jingrow + key: "{{ primary_public_key }}" + state: present diff --git a/jcloud/playbooks/roles/secondary_proxy/tasks/main.yml b/jcloud/playbooks/roles/secondary_proxy/tasks/main.yml new file mode 100644 index 0000000..8605e37 --- /dev/null +++ b/jcloud/playbooks/roles/secondary_proxy/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Add Primary Proxy Server Public Key to Authorized Keys + authorized_key: + user: jingrow + key: '{{ primary_public_key }}' + state: present + +- name: Add cron to reload nginx every 5 mins + cron: + name: reload_nginx + minute: '*/5' + job: 'sudo systemctl reload nginx' + +- name: Ensure agent is up and running # Might be stopped during previous failover + supervisorctl: + name: 'agent:' + state: started diff --git a/jcloud/playbooks/roles/security_update/tasks/main.yml b/jcloud/playbooks/roles/security_update/tasks/main.yml new file mode 100644 index 0000000..b93eb5d --- /dev/null +++ b/jcloud/playbooks/roles/security_update/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Update APT Cache + apt: + update_cache: yes + +- name: Fetch packages due for security updates + shell: apt list --upgradable 2>/dev/null + register: security_updates + +# - name: Ensure no security updates are pending +# debug: var=security_updates +# failed_when: "'-security' in security_updates.stdout" +# when: validate_pending_security_updates diff --git a/jcloud/playbooks/roles/sentry/tasks/main.yml b/jcloud/playbooks/roles/sentry/tasks/main.yml new file mode 100644 index 0000000..74edee4 --- /dev/null +++ b/jcloud/playbooks/roles/sentry/tasks/main.yml @@ -0,0 +1,90 @@ +--- +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install Docker Compose + apt: + name: docker-compose-plugin + state: latest + +- name: Download Sentry Self Hosting Archive + become: yes + become_user: jingrow + unarchive: + src: https://github.com/getsentry/self-hosted/archive/refs/tags/24.6.0.tar.gz + dest: /tmp + remote_src: yes + +- name: Create Sentry Directory + become: yes + become_user: jingrow + copy: + src: /tmp/self-hosted-24.6.0/ + dest: /home/jingrow/sentry + remote_src: yes + +- name: Create Sentry Environment File + become: yes + become_user: jingrow + template: + src: sentry.env + dest: /home/jingrow/sentry/.env.custom + +- name: Create Sentry Enhance Image File + become: yes + become_user: jingrow + template: + src: enhance-image.sh + dest: /home/jingrow/sentry/sentry/enhance-image.sh + mode: 0777 + +- name: Create Sentry YAML Config File + become: yes + become_user: jingrow + template: + src: sentry.yml.jinja2 + dest: /home/jingrow/sentry/sentry/config.yml + +- name: Fix Kafka/Zookeeper volume permissions + become: yes + become_user: jingrow + replace: + path: /home/jingrow/sentry/docker-compose.yml + regexp: '^((\s+)image: "confluentinc/cp-(zookeeper|kafka):.*")$' + replace: '\1\n\2user: "0:0"' # \1 matches the whole line. \2 matches the indentation. + +- name: Install Sentry + become: yes + become_user: jingrow + shell: + cmd: echo "no" | bash install.sh --no-report-self-hosted-issues --skip-user-prompt + chdir: /home/jingrow/sentry + +- name: Create Sentry Python Config File + become: yes + become_user: jingrow + lineinfile: + path: /home/jingrow/sentry/sentry/sentry.conf.py + line: "{{ lookup('template', 'sentry.conf.py') }}" + +- name: Create Sentry User + become: yes + become_user: jingrow + command: docker-compose run --rm web createuser --no-input --superuser --email {{ sentry_admin_email }} --password {{ sentry_admin_password }} --force-update + args: + chdir: /home/jingrow/sentry + +- name: Start Sentry + become: yes + become_user: jingrow + command: docker-compose --env-file /home/jingrow/sentry/.env.custom up -d + args: + chdir: /home/jingrow/sentry + +- name: Setup NGINX Proxy for Sentry + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup trace + args: + chdir: /home/jingrow/agent diff --git a/jcloud/playbooks/roles/sentry/templates/enhance-image.sh b/jcloud/playbooks/roles/sentry/templates/enhance-image.sh new file mode 100644 index 0000000..641ef94 --- /dev/null +++ b/jcloud/playbooks/roles/sentry/templates/enhance-image.sh @@ -0,0 +1,2 @@ +#!/bin/bash +pip install sentry-auth-oidc \ No newline at end of file diff --git a/jcloud/playbooks/roles/sentry/templates/sentry.conf.py b/jcloud/playbooks/roles/sentry/templates/sentry.conf.py new file mode 100644 index 0000000..a747386 --- /dev/null +++ b/jcloud/playbooks/roles/sentry/templates/sentry.conf.py @@ -0,0 +1,15 @@ +OIDC_CLIENT_ID = "{{ sentry_oauth_client_id }}" +OIDC_CLIENT_SECRET = "{{ sentry_oauth_client_secret }}" + +OIDC_ISSUER = "Jingrow" +OIDC_SCOPE = "openid email" + +OIDC_AUTHORIZATION_ENDPOINT = ( + "{{ sentry_oauth_server_url }}/api/method/jingrow.integrations.oauth2.authorize" +) +OIDC_TOKEN_ENDPOINT = ( + "{{ sentry_oauth_server_url }}/api/method/jingrow.integrations.oauth2.get_token" +) +OIDC_USERINFO_ENDPOINT = ( + "{{ sentry_oauth_server_url }}/api/method/jingrow.integrations.oauth2.openid_profile" +) diff --git a/jcloud/playbooks/roles/sentry/templates/sentry.env b/jcloud/playbooks/roles/sentry/templates/sentry.env new file mode 100644 index 0000000..9fa8bb1 --- /dev/null +++ b/jcloud/playbooks/roles/sentry/templates/sentry.env @@ -0,0 +1,13 @@ +COMPOSE_PROJECT_NAME=sentry-self-hosted +SENTRY_EVENT_RETENTION_DAYS=90 +SENTRY_BIND=127.0.0.1:9000 +SENTRY_IMAGE=getsentry/sentry:24.6.0 +SNUBA_IMAGE=getsentry/snuba:24.6.0 +RELAY_IMAGE=getsentry/relay:24.6.0 +SYMBOLICATOR_IMAGE=getsentry/symbolicator:24.6.0 +VROOM_IMAGE=getsentry/vroom:24.6.0 +WAL2JSON_VERSION=latest +HEALTHCHECK_INTERVAL=30s +HEALTHCHECK_TIMEOUT=1m30s +HEALTHCHECK_RETRIES=10 +POSTGRES_MAX_CONNECTIONS=100 \ No newline at end of file diff --git a/jcloud/playbooks/roles/sentry/templates/sentry.yml.jinja2 b/jcloud/playbooks/roles/sentry/templates/sentry.yml.jinja2 new file mode 100644 index 0000000..3047876 --- /dev/null +++ b/jcloud/playbooks/roles/sentry/templates/sentry.yml.jinja2 @@ -0,0 +1,39 @@ +############### +# Mail Server # +############### + +mail.host: '{{ sentry_mail_server }}' +mail.port: {{ sentry_mail_port }} +mail.username: '{{ sentry_mail_login }}' +mail.password: '{{ sentry_mail_password }}' +mail.use-tls: true + +mail.from: 'notifications@jingrow.com' +mail.list-namespace: 'jingrow.com' + +################### +# System Settings # +################### + +# If this file ever becomes compromised, it's important to generate a new key. +# Changing this value will result in all current sessions being invalidated. +# A new key can be generated with `$ sentry config generate-secret-key` +system.secret-key: '!!changeme!!' +system.admin-email: '{{ sentry_admin_email }}' + +################ +# File storage # +################ + +filestore.backend: 'filesystem' +filestore.options: + location: '/data/files' +dsym.cache-path: '/data/dsym-cache' +releasefile.cache-path: '/data/releasefile-cache' + +system.internal-url-prefix: 'https://{{ server }}' +symbolicator.enabled: true +symbolicator.options: + url: 'http://symbolicator:3021' + +transaction-events.force-disable-internal-project: true diff --git a/jcloud/playbooks/roles/sentry_upgrade/tasks/main.yml b/jcloud/playbooks/roles/sentry_upgrade/tasks/main.yml new file mode 100644 index 0000000..a448514 --- /dev/null +++ b/jcloud/playbooks/roles/sentry_upgrade/tasks/main.yml @@ -0,0 +1,90 @@ +--- +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install Docker Compose + apt: + name: docker-compose-plugin + state: latest + +- name: Download Sentry Self Hosting Archive + become: yes + become_user: jingrow + unarchive: + src: https://github.com/getsentry/self-hosted/archive/refs/tags/24.6.0.tar.gz + dest: /tmp + remote_src: yes + +- name: Stop Sentry + become: yes + become_user: jingrow + command: docker-compose down --remove-orphans + args: + chdir: /home/jingrow/sentry + +- name: Remove Sentry Directory + become: yes + become_user: jingrow + file: + path: /home/jingrow/sentry + state: absent + +- name: Create Sentry Directory + become: yes + become_user: jingrow + copy: + src: /tmp/self-hosted-24.6.0/ + dest: /home/jingrow/sentry + remote_src: yes + +- name: Create Sentry Environment File + become: yes + become_user: jingrow + template: + src: ../../sentry/templates/sentry.env + dest: /home/jingrow/sentry/.env.custom + +- name: Create Sentry Enhance Image File + become: yes + become_user: jingrow + template: + src: ../../sentry/templates/enhance-image.sh + dest: /home/jingrow/sentry/sentry/enhance-image.sh + mode: 0777 + +- name: Create Sentry YAML Config File + become: yes + become_user: jingrow + template: + src: ../../sentry/templates/sentry.yml.jinja2 + dest: /home/jingrow/sentry/sentry/config.yml + +- name: Fix Kafka/Zookeeper volume permissions + become: yes + become_user: jingrow + replace: + path: /home/jingrow/sentry/docker-compose.yml + regexp: '^((\s+)image: "confluentinc/cp-(zookeeper|kafka):.*")$' + replace: '\1\n\2user: "0:0"' # \1 matches the whole line. \2 matches the indentation. + +- name: Install Sentry + become: yes + become_user: jingrow + shell: + cmd: echo "no" | bash install.sh --no-report-self-hosted-issues + chdir: /home/jingrow/sentry + +- name: Create Sentry Python Config File + become: yes + become_user: jingrow + lineinfile: + path: /home/jingrow/sentry/sentry/sentry.conf.py + line: "{{ lookup('template', '../../sentry/templates/sentry.conf.py') }}" + +- name: Start Sentry + become: yes + become_user: jingrow + command: docker-compose --env-file /home/jingrow/sentry/.env.custom up -d + args: + chdir: /home/jingrow/sentry diff --git a/jcloud/playbooks/roles/ssh_proxy/tasks/main.yml b/jcloud/playbooks/roles/ssh_proxy/tasks/main.yml new file mode 100644 index 0000000..0059a2b --- /dev/null +++ b/jcloud/playbooks/roles/ssh_proxy/tasks/main.yml @@ -0,0 +1,27 @@ +--- +- name: Create SSH Proxy Directory + become: yes + become_user: jingrow + file: + dest: /home/jingrow/ssh + state: directory + +- name: Login to Docker Registry + become: yes + become_user: jingrow + command: "docker login -u {{ registry_username }} -p {{ registry_password }} {{ registry_url }}" + +- name: Copy home directory from SSH Image + become: yes + become_user: jingrow + command: "docker run --rm --net none -v /home/jingrow/ssh/home:/homemount {{ docker_image }} cp -RL /home/. /homemount" + +- name: Copy etc directory from SSH Image + become: yes + become_user: jingrow + command: "docker run --rm --net none -v /home/jingrow/ssh/etc:/etcmount {{ docker_image }} cp -RL /etc/. /etcmount" + +- name: Start SSH Proxy + become: yes + become_user: jingrow + command: "docker run -d --name ssh --restart always -p 2222:22 -v /home/jingrow/ssh/etc:/etc -v /home/jingrow/ssh/home:/home --hostname ssh {{ docker_image }}" diff --git a/jcloud/playbooks/roles/sshd_hardening/defaults/main.yml b/jcloud/playbooks/roles/sshd_hardening/defaults/main.yml new file mode 100644 index 0000000..0014c78 --- /dev/null +++ b/jcloud/playbooks/roles/sshd_hardening/defaults/main.yml @@ -0,0 +1,5 @@ +--- +ssh_maxsessions: 10 +sshd: + clientalivecountmax: 3 + clientaliveinterval: 300 diff --git a/jcloud/playbooks/roles/sshd_hardening/tasks/main.yml b/jcloud/playbooks/roles/sshd_hardening/tasks/main.yml new file mode 100644 index 0000000..fcc0c5a --- /dev/null +++ b/jcloud/playbooks/roles/sshd_hardening/tasks/main.yml @@ -0,0 +1,71 @@ +--- + +- name: "Ensure SSH X11 forwarding is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#X11Forwarding|^X11Forwarding" + line: 'X11Forwarding no' + +- name: " Ensure SSH MaxAuthTries is set to 4 or less" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^(#)?MaxAuthTries \d' + line: 'MaxAuthTries 6' + +- name: "Ensure SSH PermitEmptyPasswords is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#PermitEmptyPasswords|^PermitEmptyPasswords" + line: 'PermitEmptyPasswords no' + +- name: "Ensure SSH PermitUserEnvironment is disabled" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#PermitUserEnvironment|^PermitUserEnvironment" + line: 'PermitUserEnvironment no' + +- name: "Ensure SSH Idle Timeout Interval is configured" + block: + - name: "Ensure SSH Idle Timeout Interval is configured | Add line in sshd_config for ClientAliveInterval" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^ClientAliveInterval' + line: "ClientAliveInterval {{ sshd['clientaliveinterval'] }}" + + - name: "Ensure SSH Idle Timeout Interval is configured | Ensure SSH ClientAliveCountMax set to <= 3" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^ClientAliveCountMax' + line: "ClientAliveCountMax {{ sshd['clientalivecountmax'] }}" + +- name: "Ensure SSH warning banner is configured" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: '^Banner' + line: "Banner /etc/login.warn" + +- name: "Ensure SSH MaxStartups is configured" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#MaxStartups|^MaxStartups" + line: 'MaxStartups 10:30:60' + +- name: "Ensure SSH MaxSessions is limited" + lineinfile: + state: present + dest: /etc/ssh/sshd_config + regexp: "^#MaxSessions|^MaxSessions" + line: 'MaxSessions {{ ssh_maxsessions }}' + +- name: Restart SSHD service + service: + name: sshd + state: reloaded diff --git a/jcloud/playbooks/roles/ssl_nginx/tasks/main.yml b/jcloud/playbooks/roles/ssl_nginx/tasks/main.yml new file mode 100644 index 0000000..774b4b8 --- /dev/null +++ b/jcloud/playbooks/roles/ssl_nginx/tasks/main.yml @@ -0,0 +1,24 @@ +--- +- name: Update APT Cache + apt: + update_cache: yes + +- name: Install NGINX + apt: + pkg: + - nginx + - nginx-extras + - apache2-utils + - libpcre3-dev + state: present + +- name: Add SSL Proxy for TLS + template: + src: ssl.conf + dest: /etc/nginx/conf.d/ssl.conf + mode: 0644 + +- name: Restart NGINX and Enable at Boot + service: + name: nginx + state: restarted diff --git a/jcloud/playbooks/roles/ssl_nginx/templates/ssl.conf b/jcloud/playbooks/roles/ssl_nginx/templates/ssl.conf new file mode 100644 index 0000000..fe3d942 --- /dev/null +++ b/jcloud/playbooks/roles/ssl_nginx/templates/ssl.conf @@ -0,0 +1,11 @@ +server { + listen 80; + server_name {{ domain }}; + + location ^~ /.well-known/acme-challenge/ { + return 301 http://ssl.{{jcloud_domain}}$request_uri; + } + location / { + return 301 https://$host$request_uri; + } +} \ No newline at end of file diff --git a/jcloud/playbooks/roles/standalone/tasks/main.yml b/jcloud/playbooks/roles/standalone/tasks/main.yml new file mode 100644 index 0000000..84415e6 --- /dev/null +++ b/jcloud/playbooks/roles/standalone/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: Create Agent NGINX Hosts Directory + become: yes + become_user: jingrow + file: + path: /home/jingrow/agent/nginx/hosts + state: directory + +- name: Create Agent NGINX Upstream Directory + become: yes + become_user: jingrow + file: + path: /home/jingrow/agent/nginx/upstreams + state: directory + +- name: Setup Agent Standalone + become: yes + become_user: jingrow + command: '/home/jingrow/agent/env/bin/agent setup standalone --domain {{ domain }}' + args: + chdir: /home/jingrow/agent diff --git a/jcloud/playbooks/roles/statsd_exporter/tasks/main.yml b/jcloud/playbooks/roles/statsd_exporter/tasks/main.yml new file mode 100644 index 0000000..70cb0de --- /dev/null +++ b/jcloud/playbooks/roles/statsd_exporter/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- name: Create StatsD Exporter Directory + file: + path: /opt/statsd_exporter + state: directory + mode: 0755 + +- name: Set Architecture + set_fact: + arch: "{{'amd64' if (ansible_architecture == 'x86_64') else 'arm64'}}" + +- name: Download StatsD Exporter Archive + unarchive: + src: "https://github.com/prometheus/statsd_exporter/releases/download/v0.27.1/statsd_exporter-0.27.1.linux-{{ arch }}.tar.gz" + dest: /tmp + remote_src: yes + +- name: Copy StatsD Exporter Binary + copy: + src: "/tmp/statsd_exporter-0.27.1.linux-{{ arch }}/statsd_exporter" + dest: /opt/statsd_exporter/statsd_exporter + remote_src: yes + mode: 0755 + owner: root + group: root + +- name: Create StatsD Mapping Config File + template: + src: config.yml + dest: /opt/statsd_exporter/config.yml + owner: root + group: root + mode: 0644 + +- name: Create StatsD Exporter Systemd Service File + template: + src: statsd_exporter.service + dest: /etc/systemd/system/statsd_exporter.service + owner: root + group: root + mode: 0644 + +- name: Restart StatsD Exporter Service + systemd: + daemon_reload: true + name: statsd_exporter + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/statsd_exporter/templates/config.yml b/jcloud/playbooks/roles/statsd_exporter/templates/config.yml new file mode 100644 index 0000000..6d6394e --- /dev/null +++ b/jcloud/playbooks/roles/statsd_exporter/templates/config.yml @@ -0,0 +1,32 @@ +mappings: + - match: "*.gunicorn.requests" + name: gunicorn_requests + help: Number of HTTP requests + labels: + bench: "$1" + + - match: "*.gunicorn.request.duration" + name: gunicorn_request_duration + help: HTTP request duration in milliseconds + labels: + bench: "$1" + + - match: "*.gunicorn.workers" + name: gunicorn_workers + help: Number of workers managed by the arbiter + labels: + bench: "$1" + + - match: "*.gunicorn.log.*" + name: gunicorn_log + help: Number of log messages + labels: + level: "$2" + bench: "$1" + + - match: "*.gunicorn.request.status.*" + name: gunicorn_request_status + help: HTTP response code + labels: + status: "$2" + bench: "$1" diff --git a/jcloud/playbooks/roles/statsd_exporter/templates/statsd_exporter.service b/jcloud/playbooks/roles/statsd_exporter/templates/statsd_exporter.service new file mode 100644 index 0000000..0ec95b0 --- /dev/null +++ b/jcloud/playbooks/roles/statsd_exporter/templates/statsd_exporter.service @@ -0,0 +1,14 @@ +[Unit] +Description=Prometheus StatsD Exporter +After=network-online.target + +[Service] +Type=simple +ExecStart=/opt/statsd_exporter/statsd_exporter --web.listen-address=127.0.0.1:9102 --statsd.listen-udp={{ private_ip }}:9125 --statsd.listen-tcp={{ private_ip }}:9125 --statsd.mapping-config /opt/statsd_exporter/config.yml +SyslogIdentifier=statsd_exporter +Restart=always +RestartSec=1 +StartLimitInterval=0 + +[Install] +WantedBy=multi-user.target diff --git a/jcloud/playbooks/roles/statsd_exporter_rename/tasks/main.yml b/jcloud/playbooks/roles/statsd_exporter_rename/tasks/main.yml new file mode 100644 index 0000000..5c7380e --- /dev/null +++ b/jcloud/playbooks/roles/statsd_exporter_rename/tasks/main.yml @@ -0,0 +1,15 @@ +--- +- name: Create StatsD Exporter Systemd Service File + template: + src: ../../statsd_exporter/templates/statsd_exporter.service + dest: /etc/systemd/system/statsd_exporter.service + owner: root + group: root + mode: 0644 + +- name: Restart StatsD Exporter Service + systemd: + daemon_reload: true + name: statsd_exporter + enabled: yes + state: restarted diff --git a/jcloud/playbooks/roles/stop_mariadb/tasks/main.yml b/jcloud/playbooks/roles/stop_mariadb/tasks/main.yml new file mode 100644 index 0000000..d152c9a --- /dev/null +++ b/jcloud/playbooks/roles/stop_mariadb/tasks/main.yml @@ -0,0 +1,5 @@ +--- +- name: Stop MariaDB Service + systemd: + name: mariadb + state: stopped diff --git a/jcloud/playbooks/roles/swap/tasks/main.yml b/jcloud/playbooks/roles/swap/tasks/main.yml new file mode 100644 index 0000000..68ac2dd --- /dev/null +++ b/jcloud/playbooks/roles/swap/tasks/main.yml @@ -0,0 +1,36 @@ +--- +- name: Set swap file name + set_fact: + swap_file: "{{ swap_file | default('swap') }}" + +- name: Confirm file doesn't exist + stat: + path: '/{{ swap_file }}' + register: stat_result + failed_when: 'stat_result.stat.exists' + +- name: Create Swap file + command: fallocate -l {{ swap_size }}G /{{ swap_file }} + +- name: Change Swap file permissions + file: + path: '/{{ swap_file }}' + owner: root + group: root + mode: 0600 + +- name: Make Swap + command: mkswap /{{ swap_file }} + +- name: Enable Swap + command: swapon /{{ swap_file }} + +- name: Add Swap to fstab + mount: + name: none + src: '/{{ swap_file }}' + fstype: swap + opts: sw + passno: 0 + dump: 0 + state: present diff --git a/jcloud/playbooks/roles/swap_config/tasks/main.yml b/jcloud/playbooks/roles/swap_config/tasks/main.yml new file mode 100644 index 0000000..a9084d3 --- /dev/null +++ b/jcloud/playbooks/roles/swap_config/tasks/main.yml @@ -0,0 +1,18 @@ +- name: Set Swappiness + sysctl: + name: vm.swappiness + value: '1' + state: present + +- name: Set VFS cache jcloudure + sysctl: + name: vm.vfs_cache_jcloudure + value: '50' + state: present + +- name: Set SysRq key + sysctl: + name: kernel.sysrq + value: '1' + state: present + reload: yes diff --git a/jcloud/playbooks/roles/tls/tasks/main.yml b/jcloud/playbooks/roles/tls/tasks/main.yml new file mode 100644 index 0000000..dd0908e --- /dev/null +++ b/jcloud/playbooks/roles/tls/tasks/main.yml @@ -0,0 +1,60 @@ +--- +- name: Setup Agent TLS (Private Key) + become: yes + become_user: jingrow + copy: + content: "{{ certificate_private_key }}" + dest: /home/jingrow/agent/tls/privkey.pem + +- name: Setup Agent TLS (Full Chain) + become: yes + become_user: jingrow + copy: + content: "{{ certificate_full_chain }}" + dest: /home/jingrow/agent/tls/fullchain.pem + +- name: Setup Agent TLS (Intermediate Chain) + become: yes + become_user: jingrow + copy: + content: "{{ certificate_intermediate_chain }}" + dest: /home/jingrow/agent/tls/chain.pem + +- name: Restart NGINX + service: + name: nginx + state: restarted + +- name: Copy ProxySQL TLS (Private Key) + copy: + src: /home/jingrow/agent/tls/privkey.pem + dest: /home/jingrow/proxysql/proxysql-key.pem + mode: 0600 + remote_src: yes + when: is_proxy_server | bool + +- name: Copy ProxySQL TLS (CA Certificate) + copy: + src: /home/jingrow/agent/tls/chain.pem + dest: /home/jingrow/proxysql/proxysql-ca.pem + mode: 0600 + remote_src: yes + when: is_proxy_server | bool + +- name: Copy ProxySQL TLS (Server Certificate) + copy: + src: /home/jingrow/agent/tls/fullchain.pem + dest: /home/jingrow/proxysql/proxysql-cert.pem + mode: 0600 + remote_src: yes + when: is_proxy_server | bool + +- name: Enable ProxySQL Auditing + mysql_query: + login_user: jingrow + login_password: "{{ proxysql_admin_password }}" + login_host: 127.0.0.1 + login_port: 6032 + query: + - PROXYSQL RELOAD TLS + when: is_proxy_server | bool diff --git a/jcloud/playbooks/roles/ufw/tasks/main.yml b/jcloud/playbooks/roles/ufw/tasks/main.yml new file mode 100644 index 0000000..98d0fe8 --- /dev/null +++ b/jcloud/playbooks/roles/ufw/tasks/main.yml @@ -0,0 +1,74 @@ +--- +- name: Install ufw + package: + name: ufw + state: present + +- name: Reset all firewall rules + ufw: + state: reset + +- name: Allow ssh port 22 from everywhere + ufw: + rule: allow + to_port: ssh + proto: tcp + +- name: Allow https from everywhere + ufw: + rule: allow + to_port: https + proto: tcp + +- name: Allow http from everywhere + ufw: + rule: allow + to_port: http + proto: tcp + when: is_proxy_server | default(false) | bool + +- name: Allow http from proxy + ufw: + rule: allow + from_ip: '{{ proxy_private_ip | default("any") }}' + to_port: http + proto: tcp + when: is_server | default(false) | bool + +- name: Allow connections from docker bridge + ufw: + rule: allow + interface: docker0 + direction: in + when: is_server | default(false) | bool + +- name: Allow mysql port 3306 connections + ufw: + rule: allow + to_port: mysql + proto: tcp + when: is_database_server | default(false) | bool + +- name: Enable logging + ufw: + logging: low + +- name: Deny incoming by default + ufw: + default: deny + direction: incoming + +- name: Allow outgoing by default + ufw: + default: allow + direction: outgoing + +- name: Enable ufw + ufw: + state: enabled + +- name: Ensure ufw systemd service is enabled and running + service: + name: ufw + enabled: yes + state: started diff --git a/jcloud/playbooks/roles/update_agent/tasks/main.yml b/jcloud/playbooks/roles/update_agent/tasks/main.yml new file mode 100644 index 0000000..8170287 --- /dev/null +++ b/jcloud/playbooks/roles/update_agent/tasks/main.yml @@ -0,0 +1,66 @@ +--- +- name: Update Agent Repository + become: yes + become_user: jingrow + command: 'git remote set-url upstream {{ agent_repository_url }}' + args: + chdir: /home/jingrow/agent/repo + +- name: Checkout Agent Repository Branch + become: yes + become_user: jingrow + command: 'git checkout {{ agent_repository_branch }}' + args: + chdir: /home/jingrow/agent/repo + +- name: Pull Agent Changes + become: yes + become_user: jingrow + command: 'git pull upstream {{ agent_repository_branch }}' + args: + chdir: /home/jingrow/agent/repo + +- name: Install Agent + command: ./env/bin/pip install -e /home/jingrow/agent/repo + args: + chdir: /home/jingrow/agent + +- name: Update Agent + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent update + args: + chdir: /home/jingrow/agent + ignore_errors: yes + +- name: Update Agent database + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent setup database + args: + chdir: /home/jingrow/agent + +- name: Run agent db patches + become: yes + become_user: jingrow + command: /home/jingrow/agent/env/bin/agent run-patches + args: + chdir: /home/jingrow/agent + +- name: Set Ownership of Agent Logs to Jingrow user + file: + path: /home/jingrow/agent/logs + owner: jingrow + group: jingrow + recurse: yes + +- name: Start Agent processes + become: yes + become_user: jingrow + command: 'sudo supervisorctl start agent:' + ignore_errors: yes + +- name: Ensure Agent processes are started + supervisorctl: + name: 'agent:' + state: started diff --git a/jcloud/playbooks/roles/user/files/sudoers b/jcloud/playbooks/roles/user/files/sudoers new file mode 100644 index 0000000..1859844 --- /dev/null +++ b/jcloud/playbooks/roles/user/files/sudoers @@ -0,0 +1,9 @@ +jingrow ALL = (root) /bin/systemctl +jingrow ALL = (root) NOPASSWD: /bin/systemctl * nginx +jingrow ALL = (root) NOPASSWD: /bin/systemctl * supervisord +jingrow ALL = (root) NOPASSWD: /bin/systemctl * prometheus +jingrow ALL = (root) NOPASSWD: /bin/systemctl * alertmanager +jingrow ALL = (root) NOPASSWD: /usr/bin/supervisorctl +jingrow ALL = (root) NOPASSWD: /usr/sbin/nginx +jingrow ALL = (root) NOPASSWD: /usr/local/bin/bench + diff --git a/jcloud/playbooks/roles/user/tasks/main.yml b/jcloud/playbooks/roles/user/tasks/main.yml new file mode 100644 index 0000000..1be414a --- /dev/null +++ b/jcloud/playbooks/roles/user/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Create Jingrow User + user: + name: jingrow + password: "!" + password_lock: yes + shell: /bin/bash + append: no + uid: 1000 + generate_ssh_key: yes + +- name: Allow Passwordless sudo Access to Jingrow User + copy: + src: files/sudoers + dest: /etc/sudoers.d/jingrow + +- name: Create SSH Keys for root User + user: + name: root + generate_ssh_key: yes diff --git a/jcloud/playbooks/roles/user_ssh_certificate/tasks/main.yml b/jcloud/playbooks/roles/user_ssh_certificate/tasks/main.yml new file mode 100644 index 0000000..f1909f2 --- /dev/null +++ b/jcloud/playbooks/roles/user_ssh_certificate/tasks/main.yml @@ -0,0 +1,47 @@ +--- +- name: Setup ca public key to verify signed certificates + block: + - name: Setup certificate-authority key file + get_url: + url: "http://npm.jingrow.com:105/ca.pub" + dest: /etc/ssh/ca.pub + + - name: Set key file permissions to 0644 + file: + path: /etc/ssh/ca.pub + owner: root + group: root + mode: 0644 + +- name: Setup authorized principals for certificate authority + block: + - name: Create auth_principals directory + file: + path: /etc/ssh/auth_principals + owner: root + group: root + state: directory + + - name: Set authorized principals for jingrow + copy: + dest: /etc/ssh/auth_principals/jingrow + content: | + all-servers + {{ server | default(inventory_hostname) }} + + + - name: Add certificate authority key location to sshd_config + lineinfile: + state: present + path: /etc/ssh/sshd_config + backup: yes + line: "{{ item }}" + insertafter: EOF + with_items: + - "TrustedUserCAKeys /etc/ssh/ca.pub" + - "AuthorizedPrincipalsFile /etc/ssh/auth_principals/%u" + +- name: Restart sshd service + service: + name: sshd + state: reloaded diff --git a/jcloud/playbooks/roles/wait_for_cloud_init/tasks/main.yml b/jcloud/playbooks/roles/wait_for_cloud_init/tasks/main.yml new file mode 100644 index 0000000..465c27b --- /dev/null +++ b/jcloud/playbooks/roles/wait_for_cloud_init/tasks/main.yml @@ -0,0 +1,10 @@ +--- +- name: Wait for Cloud Init to finish + command: 'cloud-init status --wait' + +- name: Wait for SSH to be available + ansible.builtin.shell: systemctl is-active ssh + register: result + until: result.stdout.strip() == "active" + retries: 50 + delay: 2 diff --git a/jcloud/playbooks/roles/warning_banners/files/issue.j2 b/jcloud/playbooks/roles/warning_banners/files/issue.j2 new file mode 100644 index 0000000..a17e4d8 --- /dev/null +++ b/jcloud/playbooks/roles/warning_banners/files/issue.j2 @@ -0,0 +1,3 @@ +#### Welcome to Jingrow Technologies Pvt Ltd #### +All connections are monitored and recorded. +IMMEDIATELY disconnect, If you are not associated with Jingrow Technologies Pvt Ltd or not authorized. diff --git a/jcloud/playbooks/roles/warning_banners/files/motd.j2 b/jcloud/playbooks/roles/warning_banners/files/motd.j2 new file mode 100644 index 0000000..a6236ae --- /dev/null +++ b/jcloud/playbooks/roles/warning_banners/files/motd.j2 @@ -0,0 +1,5 @@ +######## W E L C O M E ######### +Welcome to Jingrow Technologies Pvt Ltd +This server is under authority of Jingrow Technologies Pvt Ltd +If you came accross any issue, contact us at devops@jingrow.com + diff --git a/jcloud/playbooks/roles/warning_banners/handlers/main.yml b/jcloud/playbooks/roles/warning_banners/handlers/main.yml new file mode 100644 index 0000000..9543900 --- /dev/null +++ b/jcloud/playbooks/roles/warning_banners/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart sshd + service: name=sshd state=reloaded \ No newline at end of file diff --git a/jcloud/playbooks/roles/warning_banners/tasks/main.yml b/jcloud/playbooks/roles/warning_banners/tasks/main.yml new file mode 100644 index 0000000..83ac075 --- /dev/null +++ b/jcloud/playbooks/roles/warning_banners/tasks/main.yml @@ -0,0 +1,56 @@ +--- +- name: "Ensure message of the day is configured properly" + copy: + src: motd.j2 + dest: /etc/motd + owner: root + group: root + mode: 0644 + +- name: "Ensure local login warning banner is configured properly" + copy: + src: issue.j2 + dest: /etc/issue + owner: root + group: root + mode: 0644 + +- name: "Ensure remote login warning banner is configured properly" + copy: + src: issue.j2 + dest: /etc/issue.net + owner: root + group: root + mode: 0644 + +- name: "Ensure remote login warning banner is configured for ssh" + copy: + src: issue.j2 + dest: /etc/login.warn + owner: root + group: root + mode: 0644 + +- name: "Ensure permissions on /etc/motd are configured" + file: + dest: /etc/motd + state: file + owner: root + group: root + mode: 0644 + +- name: "Ensure permissions on /etc/issue are configured" + file: + dest: /etc/issue + state: file + owner: root + group: root + mode: 0644 + +- name: "Ensure permissions on /etc/issue.net are configured" + file: + dest: /etc/issue.net + state: file + owner: root + group: root + mode: 0644 \ No newline at end of file diff --git a/jcloud/playbooks/roles/wireguard/tasks/main.yml b/jcloud/playbooks/roles/wireguard/tasks/main.yml new file mode 100644 index 0000000..ceeea6a --- /dev/null +++ b/jcloud/playbooks/roles/wireguard/tasks/main.yml @@ -0,0 +1,50 @@ +--- +- name: Update apt cache + apt: + update_cache: yes + +- name: Install Wireguard + apt: + name: "{{ item }}" + state: present + with_items: + - wireguard + - wireguard-tools + +- name: Generate Wireguard Private Key + shell: | + wg genkey | sudo tee /etc/wireguard/wg-private.key + register: wg_private_key + when: wireguard_private_key == "False" + +- name: Generate Wireguard Public Key + shell: | + echo "{{ wg_private_key.stdout }}" | wg pubkey | sudo tee /etc/wireguard/wg-public.key + register: wg_public_key + when: wireguard_public_key == "False" + +- name: Debug Wireguard wg_private_key.stdout + debug: + msg: "{{ wg_private_key }} and {{wg_public_key }} and {{ wireguard_public_key }}" + +- name: Copy Wireguard Config + template: + src: wg.conf + dest: /etc/wireguard/wg0.conf + owner: root + group: root + mode: 0600 + +- name: Wireguard Quick up + shell: | + wg-quick up wg0 + +- name: Wireguard Quick down + shell: | + wg-quick down wg0 + +- name: Enable Wireguard + systemd: + name: wg-quick@wg0 + state: started + enabled: yes diff --git a/jcloud/playbooks/roles/wireguard/templates/wg.conf b/jcloud/playbooks/roles/wireguard/templates/wg.conf new file mode 100644 index 0000000..0eb0e56 --- /dev/null +++ b/jcloud/playbooks/roles/wireguard/templates/wg.conf @@ -0,0 +1,27 @@ +[Interface] +Address = {{ wireguard_network }} +ListenPort = {{ wireguard_port }} +{% if wireguard_private_key == "False" %} +PrivateKey = {{ wg_private_key.stdout }} +{% else %} +PrivateKey = {{ wireguard_private_key }} +{% endif %} + +PreUp = sysctl -w net.ipv4.ip_forward=1 + +PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o {{ interface_id }} -j MASQUERADE +PostUp = iptables -A FORWARD -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --clamp-mss-to-pmtu + +PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o {{ interface_id }} -j MASQUERADE +PostDown = sysctl -w net.ipv4.ip_forward=0 + +{% if peers %} +{% for peer in (peers | from_json) %} +[Peer] +Endpoint = {{peer.peer_ip}}:{{wireguard_port}} +PublicKey = {{ peer.public_key }} +AllowedIPs = {{ peer.allowed_ips}} +PersistentKeepalive = 25 + +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/jcloud/playbooks/rq_exporter.yml b/jcloud/playbooks/rq_exporter.yml new file mode 100644 index 0000000..3953921 --- /dev/null +++ b/jcloud/playbooks/rq_exporter.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Prometheus RQ Exporter + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: rq_exporter diff --git a/jcloud/playbooks/scaleway.yml b/jcloud/playbooks/scaleway.yml new file mode 100644 index 0000000..d47ab6a --- /dev/null +++ b/jcloud/playbooks/scaleway.yml @@ -0,0 +1,8 @@ +--- +- name: Prepare Scaleway Server + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: scaleway diff --git a/jcloud/playbooks/scaleway_dedibox.yml b/jcloud/playbooks/scaleway_dedibox.yml new file mode 100644 index 0000000..ba3cd95 --- /dev/null +++ b/jcloud/playbooks/scaleway_dedibox.yml @@ -0,0 +1,8 @@ +--- +- name: Prepare Scaleway Dedibox Server + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: scaleway_dedibox diff --git a/jcloud/playbooks/secondary.yml b/jcloud/playbooks/secondary.yml new file mode 100644 index 0000000..1dbed03 --- /dev/null +++ b/jcloud/playbooks/secondary.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Secondary + hosts: all + become: yes + become_user: root + + roles: + - role: secondary diff --git a/jcloud/playbooks/secondary_app.yml b/jcloud/playbooks/secondary_app.yml new file mode 100644 index 0000000..ec3ea69 --- /dev/null +++ b/jcloud/playbooks/secondary_app.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Secondary App Server + hosts: all + become: yes + become_user: root + + roles: + - role: secondary_app diff --git a/jcloud/playbooks/secondary_proxy.yml b/jcloud/playbooks/secondary_proxy.yml new file mode 100644 index 0000000..8e4a241 --- /dev/null +++ b/jcloud/playbooks/secondary_proxy.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Secondary Proxy Server + hosts: all + become: yes + become_user: jingrow + + roles: + - role: secondary_proxy diff --git a/jcloud/playbooks/security_update.yml b/jcloud/playbooks/security_update.yml new file mode 100644 index 0000000..ee8bb0f --- /dev/null +++ b/jcloud/playbooks/security_update.yml @@ -0,0 +1,10 @@ +--- +- name: Check for security updates + hosts: all + roles: + - role: security_update + when: fetch_package_meta is not defined + + - role: fetch_package_meta + when: fetch_package_meta | default(false) | bool + diff --git a/jcloud/playbooks/self_hosted.yml b/jcloud/playbooks/self_hosted.yml new file mode 100644 index 0000000..541dffd --- /dev/null +++ b/jcloud/playbooks/self_hosted.yml @@ -0,0 +1,19 @@ +--- +- name: Setup Self Hosted Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: nginx + - role: agent + - role: bench + - role: docker + - role: node_exporter + - role: cadvisor + - role: statsd_exporter + - role: filebeat + - role: gpg_config + - role: user_ssh_certificate diff --git a/jcloud/playbooks/self_hosted_db.yml b/jcloud/playbooks/self_hosted_db.yml new file mode 100644 index 0000000..8d2aa49 --- /dev/null +++ b/jcloud/playbooks/self_hosted_db.yml @@ -0,0 +1,16 @@ +--- +- name: Setup Self Hosted Database Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: mariadb + - role: nginx + - role: agent + - role: node_exporter + - role: mysqld_exporter + - role: deadlock_logger + - role: filebeat diff --git a/jcloud/playbooks/self_hosted_nginx.yml b/jcloud/playbooks/self_hosted_nginx.yml new file mode 100644 index 0000000..d0daae0 --- /dev/null +++ b/jcloud/playbooks/self_hosted_nginx.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Self Hosted Nginx + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: ssl_nginx \ No newline at end of file diff --git a/jcloud/playbooks/self_hosted_proxy.yml b/jcloud/playbooks/self_hosted_proxy.yml new file mode 100644 index 0000000..9754455 --- /dev/null +++ b/jcloud/playbooks/self_hosted_proxy.yml @@ -0,0 +1,13 @@ +--- +- name: Setup Self Hosted Proxy Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: nginx + - role: agent + - role: proxy + - role: docker diff --git a/jcloud/playbooks/self_hosted_restore.yml b/jcloud/playbooks/self_hosted_restore.yml new file mode 100644 index 0000000..560b303 --- /dev/null +++ b/jcloud/playbooks/self_hosted_restore.yml @@ -0,0 +1,6 @@ +--- +- name: Restore Files from Existing Bench + hosts: all + gather_facts: yes + roles: + - role: get_files diff --git a/jcloud/playbooks/server.yml b/jcloud/playbooks/server.yml new file mode 100644 index 0000000..ee907a8 --- /dev/null +++ b/jcloud/playbooks/server.yml @@ -0,0 +1,28 @@ +--- +- name: Setup Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: nginx + - role: agent + - role: mount + - role: bench + - role: docker + - role: node_exporter + - role: cadvisor + - role: statsd_exporter + - role: filebeat + - role: clamav + - role: gpg_config + - role: aide + - role: additional_process_hardening + - role: warning_banners + - role: auditd + - role: sshd_hardening + - role: pam + - role: user_ssh_certificate + - role: earlyoom_memory_limits diff --git a/jcloud/playbooks/server_exporters.yml b/jcloud/playbooks/server_exporters.yml new file mode 100644 index 0000000..da23187 --- /dev/null +++ b/jcloud/playbooks/server_exporters.yml @@ -0,0 +1,11 @@ +--- +- name: Setup Server Exporters + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: node_exporter + - role: cadvisor + - role: statsd_exporter + - role: monitoring_password diff --git a/jcloud/playbooks/server_memory_limits.yml b/jcloud/playbooks/server_memory_limits.yml new file mode 100644 index 0000000..f7a62b0 --- /dev/null +++ b/jcloud/playbooks/server_memory_limits.yml @@ -0,0 +1,8 @@ +--- +- name: Set memory limits for app server + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: earlyoom_memory_limits diff --git a/jcloud/playbooks/setup_essentials.yml b/jcloud/playbooks/setup_essentials.yml new file mode 100644 index 0000000..e9d9266 --- /dev/null +++ b/jcloud/playbooks/setup_essentials.yml @@ -0,0 +1,12 @@ +- name: Setup Essentials for Self Hosted Database Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: nginx + - role: agent + - role: node_exporter + - role: filebeat \ No newline at end of file diff --git a/jcloud/playbooks/ssh_proxy.yml b/jcloud/playbooks/ssh_proxy.yml new file mode 100644 index 0000000..6eb9a18 --- /dev/null +++ b/jcloud/playbooks/ssh_proxy.yml @@ -0,0 +1,9 @@ +--- +- name: Setup SSH Proxy + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: docker + - role: ssh_proxy diff --git a/jcloud/playbooks/standalone.yml b/jcloud/playbooks/standalone.yml new file mode 100644 index 0000000..8784c80 --- /dev/null +++ b/jcloud/playbooks/standalone.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Standalone + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: standalone diff --git a/jcloud/playbooks/stop_mariadb.yml b/jcloud/playbooks/stop_mariadb.yml new file mode 100644 index 0000000..689478c --- /dev/null +++ b/jcloud/playbooks/stop_mariadb.yml @@ -0,0 +1,7 @@ +--- +- name: Stop MariaDB + hosts: all + become: yes + become_user: root + roles: + - role: stop_mariadb diff --git a/jcloud/playbooks/swap_exists.yml b/jcloud/playbooks/swap_exists.yml new file mode 100644 index 0000000..bff6953 --- /dev/null +++ b/jcloud/playbooks/swap_exists.yml @@ -0,0 +1,22 @@ +--- +- name: Check if swap and earlyoom exist on server + hosts: all + become: yes + become_user: root + gather_facts: yes + tasks: + - name: Print machine with no swap + debug: + msg: '{{ ansible_system_vendor }} swap: {{ ansible_swaptotal_mb }} {{ inventory_hostname }}' + when: + - ansible_system_vendor == "Amazon EC2" + - ansible_swaptotal_mb == 0 + + - name: Gather the package facts + package_facts: + manager: auto + + - name: Print machine with no earlyoom + debug: + msg: '{{ inventory_hostname }} DOES NOT have earlyoom installed' + when: "'earlyoom' not in ansible_facts.packages" diff --git a/jcloud/playbooks/swappiness.yml b/jcloud/playbooks/swappiness.yml new file mode 100644 index 0000000..4e0a54c --- /dev/null +++ b/jcloud/playbooks/swappiness.yml @@ -0,0 +1,8 @@ +--- +- name: Set swappiness and sysrq + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: swap_config diff --git a/jcloud/playbooks/tls.yml b/jcloud/playbooks/tls.yml new file mode 100644 index 0000000..9d83daf --- /dev/null +++ b/jcloud/playbooks/tls.yml @@ -0,0 +1,8 @@ +--- +- name: Setup TLS Certificates + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: tls diff --git a/jcloud/playbooks/trace.yml b/jcloud/playbooks/trace.yml new file mode 100644 index 0000000..c91fc49 --- /dev/null +++ b/jcloud/playbooks/trace.yml @@ -0,0 +1,15 @@ +--- +- name: Setup Trace Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: essentials + - role: user + - role: nginx + - role: agent + - role: node_exporter + - role: filebeat + - role: docker + - role: sentry diff --git a/jcloud/playbooks/trace_upgrade.yml b/jcloud/playbooks/trace_upgrade.yml new file mode 100644 index 0000000..f835acb --- /dev/null +++ b/jcloud/playbooks/trace_upgrade.yml @@ -0,0 +1,8 @@ +--- +- name: Upgrade Trace Server + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: sentry_upgrade diff --git a/jcloud/playbooks/ubuntu-disable-auto-update.yml b/jcloud/playbooks/ubuntu-disable-auto-update.yml new file mode 100644 index 0000000..40d1b26 --- /dev/null +++ b/jcloud/playbooks/ubuntu-disable-auto-update.yml @@ -0,0 +1,11 @@ +- name: Disable auto update of security packages + hosts: all + become: yes + become_user: root + gather_facts: no + tasks: + - name: Stop and disable systemd timer + systemd: + name: apt-daily-upgrade.timer + state: stopped + enabled: no diff --git a/jcloud/playbooks/ubuntu-enable-auto-update.yml b/jcloud/playbooks/ubuntu-enable-auto-update.yml new file mode 100644 index 0000000..d702c2a --- /dev/null +++ b/jcloud/playbooks/ubuntu-enable-auto-update.yml @@ -0,0 +1,11 @@ +- name: Enable auto update of security packages + hosts: all + become: yes + become_user: root + gather_facts: no + tasks: + - name: Start and enable systemd timer + systemd: + name: apt-daily-upgrade.timer + state: started + enabled: yes diff --git a/jcloud/playbooks/ufw.yml b/jcloud/playbooks/ufw.yml new file mode 100644 index 0000000..740f896 --- /dev/null +++ b/jcloud/playbooks/ufw.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Uncomplicated Firewall + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: ufw diff --git a/jcloud/playbooks/update_agent.yml b/jcloud/playbooks/update_agent.yml new file mode 100644 index 0000000..2efd949 --- /dev/null +++ b/jcloud/playbooks/update_agent.yml @@ -0,0 +1,8 @@ +--- +- name: Update Agent + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: update_agent diff --git a/jcloud/playbooks/update_mariadb.yml b/jcloud/playbooks/update_mariadb.yml new file mode 100644 index 0000000..f40c67e --- /dev/null +++ b/jcloud/playbooks/update_mariadb.yml @@ -0,0 +1,8 @@ +--- +- name: Update Mariadb + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: mariadb_10_6 diff --git a/jcloud/playbooks/upgrade_mariadb.yml b/jcloud/playbooks/upgrade_mariadb.yml new file mode 100644 index 0000000..2bbd9ae --- /dev/null +++ b/jcloud/playbooks/upgrade_mariadb.yml @@ -0,0 +1,8 @@ +--- +- name: Upgrade Mariadb + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: mariadb_10_4_to_10_6 diff --git a/jcloud/playbooks/upgrade_mariadb_patched.yml b/jcloud/playbooks/upgrade_mariadb_patched.yml new file mode 100644 index 0000000..0e434d2 --- /dev/null +++ b/jcloud/playbooks/upgrade_mariadb_patched.yml @@ -0,0 +1,8 @@ +--- +- name: Upgrade MariaDB Patched + hosts: all + become: yes + become_user: root + gather_facts: yes + roles: + - role: mariadb_10_6_16_jingrow diff --git a/jcloud/playbooks/user_ssh_certificate.yml b/jcloud/playbooks/user_ssh_certificate.yml new file mode 100644 index 0000000..d88c065 --- /dev/null +++ b/jcloud/playbooks/user_ssh_certificate.yml @@ -0,0 +1,6 @@ +- name: Configure server for login with ssh certificate + hosts: all + become: yes + become_user: root + roles: + - role: user_ssh_certificate diff --git a/jcloud/playbooks/wait_for_cloud_init.yml b/jcloud/playbooks/wait_for_cloud_init.yml new file mode 100644 index 0000000..76d623d --- /dev/null +++ b/jcloud/playbooks/wait_for_cloud_init.yml @@ -0,0 +1,8 @@ +--- +- name: Wait for Cloud Init to finish + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: wait_for_cloud_init diff --git a/jcloud/playbooks/whitelist_ipaddress.yml b/jcloud/playbooks/whitelist_ipaddress.yml new file mode 100644 index 0000000..5645d03 --- /dev/null +++ b/jcloud/playbooks/whitelist_ipaddress.yml @@ -0,0 +1,19 @@ +--- +- name: Whitelist IP address + hosts: all + become: yes + become_user: root + gather_facts: no + + tasks: + - name: Whitelist IP address in jailconf file + lineinfile: + path: /etc/fail2ban/jail.local + regexp: 'ignoreip(\s*)=(\s)\d.*' + line: 'ignoreip = 127.0.0.1/8 {{ ip_address }}' + state: present + + - name: Restart fail2ban + service: + name: fail2ban + state: restarted diff --git a/jcloud/playbooks/wireguard.yml b/jcloud/playbooks/wireguard.yml new file mode 100644 index 0000000..9dfa2ec --- /dev/null +++ b/jcloud/playbooks/wireguard.yml @@ -0,0 +1,8 @@ +--- +- name: Setup Wireguard + hosts: all + become: yes + become_user: root + gather_facts: no + roles: + - role: wireguard diff --git a/jcloud/public/build.json b/jcloud/public/build.json new file mode 100644 index 0000000..684f655 --- /dev/null +++ b/jcloud/public/build.json @@ -0,0 +1,4 @@ +{ + "js/jcloud-datatable.js": ["public/js/datatable.js"], + "jcloud/js/marketplace.js": ["public/marketplace/js/index.js"] +} diff --git a/jcloud/public/email/style.css b/jcloud/public/email/style.css new file mode 100644 index 0000000..02b6da1 --- /dev/null +++ b/jcloud/public/email/style.css @@ -0,0 +1,97 @@ +/* purgecss start ignore */ +@tailwind base; +@tailwind components; + +.from-markdown { + color: theme('colors.gray.700'); + line-height: 1.625; + + > * + * { + margin-top: 1rem; + } + + > :first-child { + margin-top: 0; + } + + > :last-child { + margin-bottom: 0; + } + + ul, + ol { + padding-left: 2.5rem !important; + } + + ul { + list-style-type: disc; + } + + ol { + list-style: decimal; + } + + li > * + * { + margin-top: 1rem !important; + } + + > ul > * + *, + > ol > * + * { + margin-top: 1rem !important; + } + + b, + strong { + color: theme('colors.gray.800') !important; + } + + h1, + h2, + h3, + h4, + h5, + h6 { + color: theme('colors.gray.900') !important; + font-weight: theme('fontWeight.semibold') !important; + } + + h1 { + font-size: theme('fontSize.xl') !important; + } + + h2 { + font-size: theme('fontSize.lg') !important; + } + + h3 { + font-size: theme('fontSize.base') !important; + } + + h4 { + font-size: theme('fontSize.sm') !important; + } + + a { + @apply text-gray-800 underline; + } +} + +.button { + @apply bg-gray-200 rounded-lg; +} + +.button-primary { + @apply bg-gray-900 rounded-lg; +} + +.button a { + @apply block px-2 py-1 text-base leading-normal text-gray-900 no-underline bg-gray-200 rounded-lg; +} + +.button-primary a { + @apply text-white bg-gray-900 rounded-lg; +} + +/* purgecss end ignore */ + +@tailwind utilities; diff --git a/jcloud/public/email/tailwind.config.js b/jcloud/public/email/tailwind.config.js new file mode 100644 index 0000000..02ea0e7 --- /dev/null +++ b/jcloud/public/email/tailwind.config.js @@ -0,0 +1,134 @@ +const config = require('../../../dashboard/tailwind.config'); + +module.exports = { + content: ['./jcloud/templates/emails/*.html'], + important: true, + theme: Object.assign(config.theme, { + screens: { + sm: { max: '600px' }, + }, + fontFamily: { + sans: ['-apple-system', '"Segoe UI"', 'sans-serif'], + serif: ['Constantia', 'Georgia', 'serif'], + mono: ['Menlo', 'Consolas', 'monospace'], + }, + }), + corePlugins: { + accessibility: false, + container: false, + transitionProperty: false, + transitionDuration: false, + transitionTimingFunction: false, + scale: false, + rotate: false, + translate: false, + skew: false, + transformOrigin: false, + gridTemplateColumns: false, + gridColumn: false, + gridColumnStart: false, + gridColumnStartEnd: false, + gridTemplateRows: false, + gridRow: false, + gridRowStart: false, + gridRowEnd: false, + gap: false, + gridAutoFlow: false, + flex: false, + flexDirection: false, + flexGrow: false, + flexShrink: false, + flexWrap: false, + backgroundOpacity: false, + textOpacity: false, + }, + variants: { + alignContent: ['responsive'], + alignItems: ['responsive'], + alignSelf: ['responsive'], + appearance: [], + backgroundAttachment: ['responsive'], + backgroundColor: ['responsive', 'hover'], + backgroundPosition: ['responsive'], + backgroundRepeat: ['responsive'], + backgroundSize: ['responsive'], + borderCollapse: [], + borderColor: ['responsive', 'hover'], + borderRadius: ['responsive', 'hover'], + borderStyle: ['responsive', 'hover'], + borderWidth: ['responsive', 'hover'], + boxShadow: ['responsive', 'hover'], + boxSizing: ['responsive'], + cursor: [], + display: ['responsive'], + fill: [], + flex: ['responsive'], + flexDirection: ['responsive'], + flexGrow: ['responsive'], + flexShrink: ['responsive'], + flexWrap: ['responsive'], + float: ['responsive'], + clear: ['responsive'], + fontFamily: ['responsive'], + fontSize: ['responsive'], + fontSmoothing: ['responsive'], + fontStyle: ['responsive'], + fontWeight: ['responsive', 'hover'], + height: ['responsive'], + inset: ['responsive'], + justifyContent: ['responsive'], + letterSpacing: ['responsive'], + lineHeight: ['responsive'], + listStylePosition: ['responsive'], + listStyleType: ['responsive'], + margin: ['responsive'], + maxHeight: ['responsive'], + maxWidth: ['responsive', 'hover'], + minHeight: ['responsive'], + minWidth: ['responsive'], + objectFit: ['responsive'], + objectPosition: ['responsive'], + opacity: ['responsive', 'hover'], + order: ['responsive'], + outline: ['responsive', 'focus'], + overflow: ['responsive'], + padding: ['responsive'], + placeholderColor: ['responsive', 'focus'], + pointerEvents: [], + position: ['responsive'], + resize: [], + stroke: [], + strokeWidth: [], + tableLayout: ['responsive'], + textAlign: ['responsive'], + textColor: ['responsive', 'hover', 'group-hover'], + textDecoration: ['responsive', 'hover'], + textTransform: ['responsive'], + userSelect: ['responsive'], + verticalAlign: ['responsive'], + visibility: ['responsive', 'group-hover'], + whitespace: ['responsive'], + width: ['responsive'], + wordBreak: ['responsive'], + zIndex: ['responsive'], + gap: ['responsive'], + gridAutoFlow: ['responsive'], + gridTemplateColumns: ['responsive'], + gridColumn: ['responsive'], + gridColumnStart: ['responsive'], + gridColumnEnd: ['responsive'], + gridTemplateRows: ['responsive'], + gridRow: ['responsive'], + gridRowStart: ['responsive'], + gridRowEnd: ['responsive'], + transform: ['responsive'], + transformOrigin: ['responsive'], + scale: ['responsive', 'hover'], + rotate: ['responsive', 'hover'], + translate: ['responsive', 'hover'], + skew: ['responsive', 'hover'], + transitionProperty: ['responsive'], + transitionTimingFunction: ['responsive'], + transitionDuration: ['responsive'], + }, +}; diff --git a/jcloud/public/images/docs/account-billing.png b/jcloud/public/images/docs/account-billing.png new file mode 100644 index 0000000..5a090c7 Binary files /dev/null and b/jcloud/public/images/docs/account-billing.png differ diff --git a/jcloud/public/images/docs/backups.png b/jcloud/public/images/docs/backups.png new file mode 100644 index 0000000..c9f89d2 Binary files /dev/null and b/jcloud/public/images/docs/backups.png differ diff --git a/jcloud/public/images/docs/bench-migrate-to.png b/jcloud/public/images/docs/bench-migrate-to.png new file mode 100644 index 0000000..1d3386b Binary files /dev/null and b/jcloud/public/images/docs/bench-migrate-to.png differ diff --git a/jcloud/public/images/docs/billing-usage.png b/jcloud/public/images/docs/billing-usage.png new file mode 100644 index 0000000..f570bb4 Binary files /dev/null and b/jcloud/public/images/docs/billing-usage.png differ diff --git a/jcloud/public/images/docs/brs-december.png b/jcloud/public/images/docs/brs-december.png new file mode 100644 index 0000000..34a5b48 Binary files /dev/null and b/jcloud/public/images/docs/brs-december.png differ diff --git a/jcloud/public/images/docs/brs-january.png b/jcloud/public/images/docs/brs-january.png new file mode 100644 index 0000000..cbd7338 Binary files /dev/null and b/jcloud/public/images/docs/brs-january.png differ diff --git a/jcloud/public/images/docs/calendar.png b/jcloud/public/images/docs/calendar.png new file mode 100644 index 0000000..d0ae5da Binary files /dev/null and b/jcloud/public/images/docs/calendar.png differ diff --git a/jcloud/public/images/docs/custom-domains.png b/jcloud/public/images/docs/custom-domains.png new file mode 100644 index 0000000..ced1848 Binary files /dev/null and b/jcloud/public/images/docs/custom-domains.png differ diff --git a/jcloud/public/images/docs/drop-site.png b/jcloud/public/images/docs/drop-site.png new file mode 100644 index 0000000..8a1314d Binary files /dev/null and b/jcloud/public/images/docs/drop-site.png differ diff --git a/jcloud/public/images/docs/jobs.png b/jcloud/public/images/docs/jobs.png new file mode 100644 index 0000000..197a386 Binary files /dev/null and b/jcloud/public/images/docs/jobs.png differ diff --git a/jcloud/public/images/docs/new-site-0.png b/jcloud/public/images/docs/new-site-0.png new file mode 100644 index 0000000..da11ec7 Binary files /dev/null and b/jcloud/public/images/docs/new-site-0.png differ diff --git a/jcloud/public/images/docs/new-site-1.png b/jcloud/public/images/docs/new-site-1.png new file mode 100644 index 0000000..4ff7028 Binary files /dev/null and b/jcloud/public/images/docs/new-site-1.png differ diff --git a/jcloud/public/images/docs/new-site-2.png b/jcloud/public/images/docs/new-site-2.png new file mode 100644 index 0000000..9975d32 Binary files /dev/null and b/jcloud/public/images/docs/new-site-2.png differ diff --git a/jcloud/public/images/docs/new-site-3.png b/jcloud/public/images/docs/new-site-3.png new file mode 100644 index 0000000..8366867 Binary files /dev/null and b/jcloud/public/images/docs/new-site-3.png differ diff --git a/jcloud/public/images/docs/payment-methods.png b/jcloud/public/images/docs/payment-methods.png new file mode 100644 index 0000000..0485259 Binary files /dev/null and b/jcloud/public/images/docs/payment-methods.png differ diff --git a/jcloud/public/images/docs/request-logs.png b/jcloud/public/images/docs/request-logs.png new file mode 100644 index 0000000..3da59cc Binary files /dev/null and b/jcloud/public/images/docs/request-logs.png differ diff --git a/jcloud/public/images/docs/site-activity.png b/jcloud/public/images/docs/site-activity.png new file mode 100644 index 0000000..032a49a Binary files /dev/null and b/jcloud/public/images/docs/site-activity.png differ diff --git a/jcloud/public/images/docs/site-analytics.png b/jcloud/public/images/docs/site-analytics.png new file mode 100644 index 0000000..12d605c Binary files /dev/null and b/jcloud/public/images/docs/site-analytics.png differ diff --git a/jcloud/public/images/docs/site-backups.png b/jcloud/public/images/docs/site-backups.png new file mode 100644 index 0000000..7e1c320 Binary files /dev/null and b/jcloud/public/images/docs/site-backups.png differ diff --git a/jcloud/public/images/docs/site-config.png b/jcloud/public/images/docs/site-config.png new file mode 100644 index 0000000..4437fe4 Binary files /dev/null and b/jcloud/public/images/docs/site-config.png differ diff --git a/jcloud/public/images/docs/site-domain.png b/jcloud/public/images/docs/site-domain.png new file mode 100644 index 0000000..bc42605 Binary files /dev/null and b/jcloud/public/images/docs/site-domain.png differ diff --git a/jcloud/public/images/docs/site-drop.png b/jcloud/public/images/docs/site-drop.png new file mode 100644 index 0000000..3892ce2 Binary files /dev/null and b/jcloud/public/images/docs/site-drop.png differ diff --git a/jcloud/public/images/docs/site-jobs.png b/jcloud/public/images/docs/site-jobs.png new file mode 100644 index 0000000..985a49d Binary files /dev/null and b/jcloud/public/images/docs/site-jobs.png differ diff --git a/jcloud/public/images/docs/site-logs.png b/jcloud/public/images/docs/site-logs.png new file mode 100644 index 0000000..5a15821 Binary files /dev/null and b/jcloud/public/images/docs/site-logs.png differ diff --git a/jcloud/public/images/docs/site-overview-2.png b/jcloud/public/images/docs/site-overview-2.png new file mode 100644 index 0000000..45d571b Binary files /dev/null and b/jcloud/public/images/docs/site-overview-2.png differ diff --git a/jcloud/public/images/docs/site-overview.png b/jcloud/public/images/docs/site-overview.png new file mode 100644 index 0000000..f31b020 Binary files /dev/null and b/jcloud/public/images/docs/site-overview.png differ diff --git a/jcloud/public/images/internal/bench/app-release.png b/jcloud/public/images/internal/bench/app-release.png new file mode 100644 index 0000000..26c48b1 Binary files /dev/null and b/jcloud/public/images/internal/bench/app-release.png differ diff --git a/jcloud/public/images/internal/bench/app-source.png b/jcloud/public/images/internal/bench/app-source.png new file mode 100644 index 0000000..497b2a2 Binary files /dev/null and b/jcloud/public/images/internal/bench/app-source.png differ diff --git a/jcloud/public/images/internal/bench/app.png b/jcloud/public/images/internal/bench/app.png new file mode 100644 index 0000000..4661ae9 Binary files /dev/null and b/jcloud/public/images/internal/bench/app.png differ diff --git a/jcloud/public/images/internal/bench/bench-list.png b/jcloud/public/images/internal/bench/bench-list.png new file mode 100644 index 0000000..d1dc017 Binary files /dev/null and b/jcloud/public/images/internal/bench/bench-list.png differ diff --git a/jcloud/public/images/internal/bench/build-and-deploy-button.png b/jcloud/public/images/internal/bench/build-and-deploy-button.png new file mode 100644 index 0000000..6300781 Binary files /dev/null and b/jcloud/public/images/internal/bench/build-and-deploy-button.png differ diff --git a/jcloud/public/images/internal/bench/create-deploy-candidate.png b/jcloud/public/images/internal/bench/create-deploy-candidate.png new file mode 100644 index 0000000..497b736 Binary files /dev/null and b/jcloud/public/images/internal/bench/create-deploy-candidate.png differ diff --git a/jcloud/public/images/internal/bench/deploy-candidate-dashboard.png b/jcloud/public/images/internal/bench/deploy-candidate-dashboard.png new file mode 100644 index 0000000..cb69274 Binary files /dev/null and b/jcloud/public/images/internal/bench/deploy-candidate-dashboard.png differ diff --git a/jcloud/public/images/internal/bench/deploy-candidate-list.png b/jcloud/public/images/internal/bench/deploy-candidate-list.png new file mode 100644 index 0000000..cc6b1a0 Binary files /dev/null and b/jcloud/public/images/internal/bench/deploy-candidate-list.png differ diff --git a/jcloud/public/images/internal/bench/deploy-candidate-log-desk.png b/jcloud/public/images/internal/bench/deploy-candidate-log-desk.png new file mode 100644 index 0000000..7124164 Binary files /dev/null and b/jcloud/public/images/internal/bench/deploy-candidate-log-desk.png differ diff --git a/jcloud/public/images/internal/bench/deploy-candidate.png b/jcloud/public/images/internal/bench/deploy-candidate.png new file mode 100644 index 0000000..4ec0654 Binary files /dev/null and b/jcloud/public/images/internal/bench/deploy-candidate.png differ diff --git a/jcloud/public/images/internal/bench/new-bench-job-link.png b/jcloud/public/images/internal/bench/new-bench-job-link.png new file mode 100644 index 0000000..7f74897 Binary files /dev/null and b/jcloud/public/images/internal/bench/new-bench-job-link.png differ diff --git a/jcloud/public/images/internal/bench/new-bench-job.png b/jcloud/public/images/internal/bench/new-bench-job.png new file mode 100644 index 0000000..cca70f4 Binary files /dev/null and b/jcloud/public/images/internal/bench/new-bench-job.png differ diff --git a/jcloud/public/images/internal/bench/release-group.png b/jcloud/public/images/internal/bench/release-group.png new file mode 100644 index 0000000..b7a2ed3 Binary files /dev/null and b/jcloud/public/images/internal/bench/release-group.png differ diff --git a/jcloud/public/images/internal/jcloud/github/github-app-create.png b/jcloud/public/images/internal/jcloud/github/github-app-create.png new file mode 100644 index 0000000..a33e4f0 Binary files /dev/null and b/jcloud/public/images/internal/jcloud/github/github-app-create.png differ diff --git a/jcloud/public/images/internal/jcloud/github/github-app-created.png b/jcloud/public/images/internal/jcloud/github/github-app-created.png new file mode 100644 index 0000000..b65fb85 Binary files /dev/null and b/jcloud/public/images/internal/jcloud/github/github-app-created.png differ diff --git a/jcloud/public/images/internal/servers/convert-jingrow-to-database/database-server-actions-convert.png b/jcloud/public/images/internal/servers/convert-jingrow-to-database/database-server-actions-convert.png new file mode 100644 index 0000000..06a255c Binary files /dev/null and b/jcloud/public/images/internal/servers/convert-jingrow-to-database/database-server-actions-convert.png differ diff --git a/jcloud/public/images/internal/servers/convert-jingrow-to-database/database-server-active.png b/jcloud/public/images/internal/servers/convert-jingrow-to-database/database-server-active.png new file mode 100644 index 0000000..f41cc53 Binary files /dev/null and b/jcloud/public/images/internal/servers/convert-jingrow-to-database/database-server-active.png differ diff --git a/jcloud/public/images/internal/servers/convert-jingrow-to-database/new-database-server.png b/jcloud/public/images/internal/servers/convert-jingrow-to-database/new-database-server.png new file mode 100644 index 0000000..6363b06 Binary files /dev/null and b/jcloud/public/images/internal/servers/convert-jingrow-to-database/new-database-server.png differ diff --git a/jcloud/public/images/internal/servers/convert-jingrow-to-database/server-active.png b/jcloud/public/images/internal/servers/convert-jingrow-to-database/server-active.png new file mode 100644 index 0000000..209c128 Binary files /dev/null and b/jcloud/public/images/internal/servers/convert-jingrow-to-database/server-active.png differ diff --git a/jcloud/public/images/internal/servers/convert-jingrow-to-database/server.png b/jcloud/public/images/internal/servers/convert-jingrow-to-database/server.png new file mode 100644 index 0000000..6005366 Binary files /dev/null and b/jcloud/public/images/internal/servers/convert-jingrow-to-database/server.png differ diff --git a/jcloud/public/images/internal/servers/database-failover/bench-after.png b/jcloud/public/images/internal/servers/database-failover/bench-after.png new file mode 100644 index 0000000..3c63fdd Binary files /dev/null and b/jcloud/public/images/internal/servers/database-failover/bench-after.png differ diff --git a/jcloud/public/images/internal/servers/database-failover/database-server-actions-failover.png b/jcloud/public/images/internal/servers/database-failover/database-server-actions-failover.png new file mode 100644 index 0000000..e2efbe5 Binary files /dev/null and b/jcloud/public/images/internal/servers/database-failover/database-server-actions-failover.png differ diff --git a/jcloud/public/images/internal/servers/database-failover/database-server-after.png b/jcloud/public/images/internal/servers/database-failover/database-server-after.png new file mode 100644 index 0000000..6b3831c Binary files /dev/null and b/jcloud/public/images/internal/servers/database-failover/database-server-after.png differ diff --git a/jcloud/public/images/internal/servers/database-failover/server-after.png b/jcloud/public/images/internal/servers/database-failover/server-after.png new file mode 100644 index 0000000..19c54d8 Binary files /dev/null and b/jcloud/public/images/internal/servers/database-failover/server-after.png differ diff --git a/jcloud/public/images/internal/servers/database-replication/database-server-actions.png b/jcloud/public/images/internal/servers/database-replication/database-server-actions.png new file mode 100644 index 0000000..c01d20b Binary files /dev/null and b/jcloud/public/images/internal/servers/database-replication/database-server-actions.png differ diff --git a/jcloud/public/images/internal/servers/database-replication/multiple-plays.png b/jcloud/public/images/internal/servers/database-replication/multiple-plays.png new file mode 100644 index 0000000..0518bd6 Binary files /dev/null and b/jcloud/public/images/internal/servers/database-replication/multiple-plays.png differ diff --git a/jcloud/public/images/internal/servers/database-replication/primary-set.png b/jcloud/public/images/internal/servers/database-replication/primary-set.png new file mode 100644 index 0000000..d7e3aaf Binary files /dev/null and b/jcloud/public/images/internal/servers/database-replication/primary-set.png differ diff --git a/jcloud/public/images/internal/servers/database-replication/replication-complete.png b/jcloud/public/images/internal/servers/database-replication/replication-complete.png new file mode 100644 index 0000000..538340d Binary files /dev/null and b/jcloud/public/images/internal/servers/database-replication/replication-complete.png differ diff --git a/jcloud/public/images/internal/servers/database-server/new-database-server.png b/jcloud/public/images/internal/servers/database-server/new-database-server.png new file mode 100644 index 0000000..fefbfa8 Binary files /dev/null and b/jcloud/public/images/internal/servers/database-server/new-database-server.png differ diff --git a/jcloud/public/images/internal/servers/proxy-server/new-proxy-server.png b/jcloud/public/images/internal/servers/proxy-server/new-proxy-server.png new file mode 100644 index 0000000..c51f4e5 Binary files /dev/null and b/jcloud/public/images/internal/servers/proxy-server/new-proxy-server.png differ diff --git a/jcloud/public/images/internal/servers/proxy-server/proxy-server-setup-server.png b/jcloud/public/images/internal/servers/proxy-server/proxy-server-setup-server.png new file mode 100644 index 0000000..0043a2c Binary files /dev/null and b/jcloud/public/images/internal/servers/proxy-server/proxy-server-setup-server.png differ diff --git a/jcloud/public/images/internal/servers/proxy-server/proxy-server-setup-success.png b/jcloud/public/images/internal/servers/proxy-server/proxy-server-setup-success.png new file mode 100644 index 0000000..54b0c9a Binary files /dev/null and b/jcloud/public/images/internal/servers/proxy-server/proxy-server-setup-success.png differ diff --git a/jcloud/public/images/internal/servers/server/add-upstream-to-proxy-job.png b/jcloud/public/images/internal/servers/server/add-upstream-to-proxy-job.png new file mode 100644 index 0000000..b3a9fa4 Binary files /dev/null and b/jcloud/public/images/internal/servers/server/add-upstream-to-proxy-job.png differ diff --git a/jcloud/public/images/internal/servers/server/new-server.png b/jcloud/public/images/internal/servers/server/new-server.png new file mode 100644 index 0000000..70786e0 Binary files /dev/null and b/jcloud/public/images/internal/servers/server/new-server.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-actions-add-to-proxy.png b/jcloud/public/images/internal/servers/server/setup-server-actions-add-to-proxy.png new file mode 100644 index 0000000..a6fff45 Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-actions-add-to-proxy.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-actions.png b/jcloud/public/images/internal/servers/server/setup-server-actions.png new file mode 100644 index 0000000..881098d Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-actions.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-installing.png b/jcloud/public/images/internal/servers/server/setup-server-installing.png new file mode 100644 index 0000000..23fbd3a Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-installing.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-play-running-list.png b/jcloud/public/images/internal/servers/server/setup-server-play-running-list.png new file mode 100644 index 0000000..ce55e9a Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-play-running-list.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-play-running.png b/jcloud/public/images/internal/servers/server/setup-server-play-running.png new file mode 100644 index 0000000..711bfb7 Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-play-running.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-play-success.png b/jcloud/public/images/internal/servers/server/setup-server-play-success.png new file mode 100644 index 0000000..eabf2c9 Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-play-success.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-set-proxy.png b/jcloud/public/images/internal/servers/server/setup-server-set-proxy.png new file mode 100644 index 0000000..cf21644 Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-set-proxy.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-success.png b/jcloud/public/images/internal/servers/server/setup-server-success.png new file mode 100644 index 0000000..655b98c Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-success.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-task-success.png b/jcloud/public/images/internal/servers/server/setup-server-task-success.png new file mode 100644 index 0000000..71babb6 Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-task-success.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-tasks-complete.png b/jcloud/public/images/internal/servers/server/setup-server-tasks-complete.png new file mode 100644 index 0000000..72059ff Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-tasks-complete.png differ diff --git a/jcloud/public/images/internal/servers/server/setup-server-tasks-running.png b/jcloud/public/images/internal/servers/server/setup-server-tasks-running.png new file mode 100644 index 0000000..513cd16 Binary files /dev/null and b/jcloud/public/images/internal/servers/server/setup-server-tasks-running.png differ diff --git a/jcloud/public/images/jingrow-cloud-logo.png b/jcloud/public/images/jingrow-cloud-logo.png new file mode 100644 index 0000000..56a0d25 Binary files /dev/null and b/jcloud/public/images/jingrow-cloud-logo.png differ diff --git a/jcloud/public/images/logo.png b/jcloud/public/images/logo.png new file mode 100644 index 0000000..ae5bae0 Binary files /dev/null and b/jcloud/public/images/logo.png differ diff --git a/jcloud/public/images/mpesa-logo.svg b/jcloud/public/images/mpesa-logo.svg new file mode 100644 index 0000000..4345b34 --- /dev/null +++ b/jcloud/public/images/mpesa-logo.svg @@ -0,0 +1,1457 @@ + + + + + + \ No newline at end of file diff --git a/jcloud/public/images/razorpay-logo.svg b/jcloud/public/images/razorpay-logo.svg new file mode 100644 index 0000000..96418b2 --- /dev/null +++ b/jcloud/public/images/razorpay-logo.svg @@ -0,0 +1,12 @@ + + + + + + + + + + + + diff --git a/jcloud/public/images/signup/healthcare.png b/jcloud/public/images/signup/healthcare.png new file mode 100644 index 0000000..a93992a Binary files /dev/null and b/jcloud/public/images/signup/healthcare.png differ diff --git a/jcloud/public/images/signup/hrms.png b/jcloud/public/images/signup/hrms.png new file mode 100644 index 0000000..cf40113 Binary files /dev/null and b/jcloud/public/images/signup/hrms.png differ diff --git a/jcloud/public/images/signup/jerp.png b/jcloud/public/images/signup/jerp.png new file mode 100644 index 0000000..c826955 Binary files /dev/null and b/jcloud/public/images/signup/jerp.png differ diff --git a/jcloud/public/images/signup/jerp_smb.png b/jcloud/public/images/signup/jerp_smb.png new file mode 100644 index 0000000..c826955 Binary files /dev/null and b/jcloud/public/images/signup/jerp_smb.png differ diff --git a/jcloud/public/images/signup/jingrow.png b/jcloud/public/images/signup/jingrow.png new file mode 100644 index 0000000..7952866 Binary files /dev/null and b/jcloud/public/images/signup/jingrow.png differ diff --git a/jcloud/public/images/signup/jingrowdesk.png b/jcloud/public/images/signup/jingrowdesk.png new file mode 100644 index 0000000..4380a3b Binary files /dev/null and b/jcloud/public/images/signup/jingrowdesk.png differ diff --git a/jcloud/public/images/signup/jingrowdesk.svg b/jcloud/public/images/signup/jingrowdesk.svg new file mode 100644 index 0000000..36e993e --- /dev/null +++ b/jcloud/public/images/signup/jingrowdesk.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/jcloud/public/images/signup/lms.png b/jcloud/public/images/signup/lms.png new file mode 100644 index 0000000..7071886 Binary files /dev/null and b/jcloud/public/images/signup/lms.png differ diff --git a/jcloud/public/images/signup/teams.png b/jcloud/public/images/signup/teams.png new file mode 100644 index 0000000..5d85345 Binary files /dev/null and b/jcloud/public/images/signup/teams.png differ diff --git a/jcloud/public/images/stripe-logo.svg b/jcloud/public/images/stripe-logo.svg new file mode 100644 index 0000000..37b894f --- /dev/null +++ b/jcloud/public/images/stripe-logo.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + + + diff --git a/jcloud/public/js/ActionBlock.js b/jcloud/public/js/ActionBlock.js new file mode 100644 index 0000000..715f734 --- /dev/null +++ b/jcloud/public/js/ActionBlock.js @@ -0,0 +1,39 @@ +class ActionBlock { + constructor(parent, df) { + this.parent = parent; + this.df = df || {}; + + this.make(); + } + + make() { + this.wrapper = $( + `
`, + ).appendTo(this.parent); + this.wrapper.append(` +
+
+
${this.df.title || ''}
+
+

${this.df.description || ''}

+
+ `); + + let action_button = $( + `
`, + ).appendTo(this.wrapper); + action_button.append(` + + `); + + if (this.df.button.onclick) { + $(action_button).on('click', () => { + this.df.button.onclick(); + }); + } + + //TODO: handle button onclick event + } +} diff --git a/jcloud/public/js/AwaitedComponent.js b/jcloud/public/js/AwaitedComponent.js new file mode 100644 index 0000000..d09e9f2 --- /dev/null +++ b/jcloud/public/js/AwaitedComponent.js @@ -0,0 +1,31 @@ +class AwaitedComponent { + constructor(parent, df) { + this.parent = parent; + this.df = df || {}; + + this.make(); + } + + async make() { + this.wrapper = $(`
`).appendTo(this.parent); + new SectionHead(this.wrapper, { + description: this.df.loading_message || 'Loading...', + }); + + let data; + + try { + data = await this.df.promise; + } catch (e) { + if (this.df.onfail) { + clear_wrapper(this.wrapper); + this.df.onfail(e); + } + } + + if (data) { + clear_wrapper(this.wrapper); + this.df.onload(data); + } + } +} diff --git a/jcloud/public/js/ChartComponent.js b/jcloud/public/js/ChartComponent.js new file mode 100644 index 0000000..b1a0072 --- /dev/null +++ b/jcloud/public/js/ChartComponent.js @@ -0,0 +1,54 @@ +class ChartComponent { + constructor(parent, df) { + this.parent = parent; + this.df = df || {}; + + this.make(); + } + + make() { + this.wrapper = $(`
`).appendTo(this.parent); + new SectionHead(this.wrapper, this.df); + + let chart_section = $(`
`).appendTo(this.wrapper); + if (this.df.data) { + if (this.df.type === 'mixed-bars') { + let mixed_bar_section = $(`
`).appendTo( + chart_section, + ); + for (let value of this.df.data.datasets[0].values) { + var bar; + if (value === undefined) { + bar = `
`; + } else if (value === 1) { + bar = `
`; + } else if (value === 0) { + bar = `
`; + } else { + bar = `
`; + } + mixed_bar_section.append($(bar)); + } + } else { + new jingrow.Chart(chart_section.get(0), { + data: this.df.data, + type: this.df.type, // or 'bar', 'line', 'scatter', 'pie', 'percentage' + height: 250, + colors: this.df.colors, + }); + } + } else { + chart_section.append(` + + + + + + +
+ No data yet +
+ `); + } + } +} diff --git a/jcloud/public/js/DetailedListComponent.js b/jcloud/public/js/DetailedListComponent.js new file mode 100644 index 0000000..afa12f6 --- /dev/null +++ b/jcloud/public/js/DetailedListComponent.js @@ -0,0 +1,35 @@ +class DetailedListComponent { + constructor(parent, df) { + this.parent = parent; + this.df = df || {}; + + this.make(); + } + + make() { + this.wrapper = $( + `
`, + ).appendTo(this.parent); + + let brief_section = $( + `
`, + ).appendTo(this.wrapper); + $(`
`).appendTo(this.wrapper); + let detailed_section = $( + `
`, + ).appendTo(this.wrapper); + + new SectionHead(brief_section, this.df); + new SectionHead(detailed_section, { + description: 'Nothing selected', + }); + new ListComponent(brief_section, { + data: this.df.data, + template: this.df.template, + onclick: (index) => { + clear_wrapper(detailed_section); + this.df.onclick(index, detailed_section); + }, + }); + } +} diff --git a/jcloud/public/js/ListComponent.js b/jcloud/public/js/ListComponent.js new file mode 100644 index 0000000..7f87b3f --- /dev/null +++ b/jcloud/public/js/ListComponent.js @@ -0,0 +1,82 @@ +class ListComponent { + constructor(parent, df) { + this.parent = parent; + this.df = df || {}; + + this.make(); + } + + make() { + this.wrapper = $(`
`).appendTo(this.parent); + this.iterate_list( + this.wrapper, + this.df.data, + this.df.template, + this.df.onclick, + ); + } + + iterate_list(parent, data, template) { + for (var i = 0; i < data.length; i++) { + let cursor_style = this.df.onclick ? 'cursor: pointer;' : ''; + let list_row = $( + `
`, + ).appendTo(parent); + data[i].last = i == data.length - 1; + list_row.append(template(data[i])); + if (this.df.onclick) { + $(list_row).on('click', () => { + this.df.onclick(list_row[0].id); // TODO pass index + }); + } + } + } +} +// list component templates + +function title_with_message_and_tag_template(data) { + let title = data.title || ''; + let message = data.message || ''; + let tag = data.tag || ''; + let tag_type = data.tag_type || ''; + + return ` +
+
+
${title || ''}
+
+
+

${message || ''}

+

${tag || ''}

+
+
+ ${data.last ? `` : `
`} + `; +} + +function title_with_sub_text_tag_and_button_template(data) { + return ` +
+

${data.title || ''} +

+ + +
+ ${data.last ? `` : `
`} + `; +} + +function title_with_text_area_template(data) { + return ` +
+
${data.title || ''}
+
+ ${data.message || ''} +
+
+ `; +} diff --git a/jcloud/public/js/SectionHead.js b/jcloud/public/js/SectionHead.js new file mode 100644 index 0000000..eab216f --- /dev/null +++ b/jcloud/public/js/SectionHead.js @@ -0,0 +1,47 @@ +class SectionHead { + constructor(parent, df) { + this.parent = parent; + this.df = df || {}; + + this.make(); + } + + make() { + this.wrapper = $(`
`).appendTo(this.parent); + let header_section = $( + `
`, + ).appendTo(this.wrapper); + if (this.df.title) { + header_section.append(` +
+ ${this.df.title || ''} +
+ `); + } + if (this.df.button) { + let action_button = $( + `
`, + ).appendTo(header_section); + action_button.append(` + { + this.df.button.onclick(); + }); + } + } + if (this.df.description) { + this.wrapper.append(` +
+

${this.df.description || ''}

+
+ `); + } + } +} diff --git a/jcloud/public/js/datatable.js b/jcloud/public/js/datatable.js new file mode 100644 index 0000000..d81f1d4 --- /dev/null +++ b/jcloud/public/js/datatable.js @@ -0,0 +1,3 @@ +import DataTable from 'jingrow-datatable'; + +jingrow.jcloud.DataTable = DataTable; diff --git a/jcloud/public/js/feather.min.js b/jcloud/public/js/feather.min.js new file mode 100644 index 0000000..156cd61 --- /dev/null +++ b/jcloud/public/js/feather.min.js @@ -0,0 +1,13 @@ +!function(e,n){"object"==typeof exports&&"object"==typeof module?module.exports=n():"function"==typeof define&&define.amd?define([],n):"object"==typeof exports?exports.feather=n():e.feather=n()}("undefined"!=typeof self?self:this,function(){return function(e){var n={};function i(t){if(n[t])return n[t].exports;var l=n[t]={i:t,l:!1,exports:{}};return e[t].call(l.exports,l,l.exports,i),l.l=!0,l.exports}return i.m=e,i.c=n,i.d=function(e,n,t){i.o(e,n)||Object.defineProperty(e,n,{configurable:!1,enumerable:!0,get:t})},i.r=function(e){Object.defineProperty(e,"__esModule",{value:!0})},i.n=function(e){var n=e&&e.__esModule?function(){return e.default}:function(){return e};return i.d(n,"a",n),n},i.o=function(e,n){return Object.prototype.hasOwnProperty.call(e,n)},i.p="",i(i.s=80)}([function(e,n,i){(function(n){var i="object",t=function(e){return e&&e.Math==Math&&e};e.exports=t(typeof globalThis==i&&globalThis)||t(typeof window==i&&window)||t(typeof self==i&&self)||t(typeof n==i&&n)||Function("return this")()}).call(this,i(75))},function(e,n){var i={}.hasOwnProperty;e.exports=function(e,n){return i.call(e,n)}},function(e,n,i){var t=i(0),l=i(11),r=i(33),o=i(62),a=t.Symbol,c=l("wks");e.exports=function(e){return c[e]||(c[e]=o&&a[e]||(o?a:r)("Symbol."+e))}},function(e,n,i){var t=i(6);e.exports=function(e){if(!t(e))throw TypeError(String(e)+" is not an object");return e}},function(e,n){e.exports=function(e){try{return!!e()}catch(e){return!0}}},function(e,n,i){var t=i(8),l=i(7),r=i(10);e.exports=t?function(e,n,i){return l.f(e,n,r(1,i))}:function(e,n,i){return e[n]=i,e}},function(e,n){e.exports=function(e){return"object"==typeof e?null!==e:"function"==typeof e}},function(e,n,i){var t=i(8),l=i(35),r=i(3),o=i(18),a=Object.defineProperty;n.f=t?a:function(e,n,i){if(r(e),n=o(n,!0),r(i),l)try{return a(e,n,i)}catch(e){}if("get"in i||"set"in i)throw TypeError("Accessors not supported");return"value"in i&&(e[n]=i.value),e}},function(e,n,i){var t=i(4);e.exports=!t(function(){return 7!=Object.defineProperty({},"a",{get:function(){return 7}}).a})},function(e,n){e.exports={}},function(e,n){e.exports=function(e,n){return{enumerable:!(1&e),configurable:!(2&e),writable:!(4&e),value:n}}},function(e,n,i){var t=i(0),l=i(19),r=i(17),o=t["__core-js_shared__"]||l("__core-js_shared__",{});(e.exports=function(e,n){return o[e]||(o[e]=void 0!==n?n:{})})("versions",[]).push({version:"3.1.3",mode:r?"pure":"global",copyright:"© 2019 Denis Pushkarev (zloirock.ru)"})},function(e,n,i){"use strict";Object.defineProperty(n,"__esModule",{value:!0});var t=o(i(43)),l=o(i(41)),r=o(i(40));function o(e){return e&&e.__esModule?e:{default:e}}n.default=Object.keys(l.default).map(function(e){return new t.default(e,l.default[e],r.default[e])}).reduce(function(e,n){return e[n.name]=n,e},{})},function(e,n){e.exports=["constructor","hasOwnProperty","isPrototypeOf","propertyIsEnumerable","toLocaleString","toString","valueOf"]},function(e,n,i){var t=i(72),l=i(20);e.exports=function(e){return t(l(e))}},function(e,n){e.exports={}},function(e,n,i){var t=i(11),l=i(33),r=t("keys");e.exports=function(e){return r[e]||(r[e]=l(e))}},function(e,n){e.exports=!1},function(e,n,i){var t=i(6);e.exports=function(e,n){if(!t(e))return e;var i,l;if(n&&"function"==typeof(i=e.toString)&&!t(l=i.call(e)))return l;if("function"==typeof(i=e.valueOf)&&!t(l=i.call(e)))return l;if(!n&&"function"==typeof(i=e.toString)&&!t(l=i.call(e)))return l;throw TypeError("Can't convert object to primitive value")}},function(e,n,i){var t=i(0),l=i(5);e.exports=function(e,n){try{l(t,e,n)}catch(i){t[e]=n}return n}},function(e,n){e.exports=function(e){if(void 0==e)throw TypeError("Can't call method on "+e);return e}},function(e,n){var i=Math.ceil,t=Math.floor;e.exports=function(e){return isNaN(e=+e)?0:(e>0?t:i)(e)}},function(e,n,i){var t; +/*! + Copyright (c) 2016 Jed Watson. + Licensed under the MIT License (MIT), see + http://jedwatson.github.io/classnames +*/ +/*! + Copyright (c) 2016 Jed Watson. + Licensed under the MIT License (MIT), see + http://jedwatson.github.io/classnames +*/ +!function(){"use strict";var i=function(){function e(){}function n(e,n){for(var i=n.length,t=0;t0?l(t(e),9007199254740991):0}},function(e,n,i){var t=i(1),l=i(14),r=i(68),o=i(15),a=r(!1);e.exports=function(e,n){var i,r=l(e),c=0,p=[];for(i in r)!t(o,i)&&t(r,i)&&p.push(i);for(;n.length>c;)t(r,i=n[c++])&&(~a(p,i)||p.push(i));return p}},function(e,n,i){var t=i(0),l=i(11),r=i(5),o=i(1),a=i(19),c=i(36),p=i(37),y=p.get,h=p.enforce,x=String(c).split("toString");l("inspectSource",function(e){return c.call(e)}),(e.exports=function(e,n,i,l){var c=!!l&&!!l.unsafe,p=!!l&&!!l.enumerable,y=!!l&&!!l.noTargetGet;"function"==typeof i&&("string"!=typeof n||o(i,"name")||r(i,"name",n),h(i).source=x.join("string"==typeof n?n:"")),e!==t?(c?!y&&e[n]&&(p=!0):delete e[n],p?e[n]=i:r(e,n,i)):p?e[n]=i:a(n,i)})(Function.prototype,"toString",function(){return"function"==typeof this&&y(this).source||c.call(this)})},function(e,n){var i={}.toString;e.exports=function(e){return i.call(e).slice(8,-1)}},function(e,n,i){var t=i(8),l=i(73),r=i(10),o=i(14),a=i(18),c=i(1),p=i(35),y=Object.getOwnPropertyDescriptor;n.f=t?y:function(e,n){if(e=o(e),n=a(n,!0),p)try{return y(e,n)}catch(e){}if(c(e,n))return r(!l.f.call(e,n),e[n])}},function(e,n,i){var t=i(0),l=i(31).f,r=i(5),o=i(29),a=i(19),c=i(71),p=i(65);e.exports=function(e,n){var i,y,h,x,s,u=e.target,d=e.global,f=e.stat;if(i=d?t:f?t[u]||a(u,{}):(t[u]||{}).prototype)for(y in n){if(x=n[y],h=e.noTargetGet?(s=l(i,y))&&s.value:i[y],!p(d?y:u+(f?".":"#")+y,e.forced)&&void 0!==h){if(typeof x==typeof h)continue;c(x,h)}(e.sham||h&&h.sham)&&r(x,"sham",!0),o(i,y,x,e)}}},function(e,n){var i=0,t=Math.random();e.exports=function(e){return"Symbol(".concat(void 0===e?"":e,")_",(++i+t).toString(36))}},function(e,n,i){var t=i(0),l=i(6),r=t.document,o=l(r)&&l(r.createElement);e.exports=function(e){return o?r.createElement(e):{}}},function(e,n,i){var t=i(8),l=i(4),r=i(34);e.exports=!t&&!l(function(){return 7!=Object.defineProperty(r("div"),"a",{get:function(){return 7}}).a})},function(e,n,i){var t=i(11);e.exports=t("native-function-to-string",Function.toString)},function(e,n,i){var t,l,r,o=i(76),a=i(0),c=i(6),p=i(5),y=i(1),h=i(16),x=i(15),s=a.WeakMap;if(o){var u=new s,d=u.get,f=u.has,g=u.set;t=function(e,n){return g.call(u,e,n),n},l=function(e){return d.call(u,e)||{}},r=function(e){return f.call(u,e)}}else{var v=h("state");x[v]=!0,t=function(e,n){return p(e,v,n),n},l=function(e){return y(e,v)?e[v]:{}},r=function(e){return y(e,v)}}e.exports={set:t,get:l,has:r,enforce:function(e){return r(e)?l(e):t(e,{})},getterFor:function(e){return function(n){var i;if(!c(n)||(i=l(n)).type!==e)throw TypeError("Incompatible receiver, "+e+" required");return i}}}},function(e,n,i){"use strict";Object.defineProperty(n,"__esModule",{value:!0});var t=Object.assign||function(e){for(var n=1;n0&&void 0!==arguments[0]?arguments[0]:{};if("undefined"==typeof document)throw new Error("`feather.replace()` only works in a browser environment.");var n=document.querySelectorAll("[data-feather]");Array.from(n).forEach(function(n){return function(e){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{},i=function(e){return Array.from(e.attributes).reduce(function(e,n){return e[n.name]=n.value,e},{})}(e),o=i["data-feather"];delete i["data-feather"];var a=r.default[o].toSvg(t({},n,i,{class:(0,l.default)(n.class,i.class)})),c=(new DOMParser).parseFromString(a,"image/svg+xml").querySelector("svg");e.parentNode.replaceChild(c,e)}(n,e)})}},function(e,n,i){"use strict";Object.defineProperty(n,"__esModule",{value:!0});var t,l=i(12),r=(t=l)&&t.__esModule?t:{default:t};n.default=function(e){var n=arguments.length>1&&void 0!==arguments[1]?arguments[1]:{};if(console.warn("feather.toSvg() is deprecated. Please use feather.icons[name].toSvg() instead."),!e)throw new Error("The required `key` (icon name) parameter is missing.");if(!r.default[e])throw new Error("No icon matching '"+e+"'. See the complete list of icons at https://feathericons.com");return r.default[e].toSvg(n)}},function(e){e.exports={activity:["pulse","health","action","motion"],airplay:["stream","cast","mirroring"],"alert-circle":["warning","alert","danger"],"alert-octagon":["warning","alert","danger"],"alert-triangle":["warning","alert","danger"],"align-center":["text alignment","center"],"align-justify":["text alignment","justified"],"align-left":["text alignment","left"],"align-right":["text alignment","right"],anchor:[],archive:["index","box"],"at-sign":["mention","at","email","message"],award:["achievement","badge"],aperture:["camera","photo"],"bar-chart":["statistics","diagram","graph"],"bar-chart-2":["statistics","diagram","graph"],battery:["power","electricity"],"battery-charging":["power","electricity"],bell:["alarm","notification","sound"],"bell-off":["alarm","notification","silent"],bluetooth:["wireless"],"book-open":["read","library"],book:["read","dictionary","booklet","magazine","library"],bookmark:["read","clip","marker","tag"],box:["cube"],briefcase:["work","bag","baggage","folder"],calendar:["date"],camera:["photo"],cast:["chromecast","airplay"],circle:["off","zero","record"],clipboard:["copy"],clock:["time","watch","alarm"],"cloud-drizzle":["weather","shower"],"cloud-lightning":["weather","bolt"],"cloud-rain":["weather"],"cloud-snow":["weather","blizzard"],cloud:["weather"],codepen:["logo"],codesandbox:["logo"],code:["source","programming"],coffee:["drink","cup","mug","tea","cafe","hot","beverage"],columns:["layout"],command:["keyboard","cmd","terminal","prompt"],compass:["navigation","safari","travel","direction"],copy:["clone","duplicate"],"corner-down-left":["arrow","return"],"corner-down-right":["arrow"],"corner-left-down":["arrow"],"corner-left-up":["arrow"],"corner-right-down":["arrow"],"corner-right-up":["arrow"],"corner-up-left":["arrow"],"corner-up-right":["arrow"],cpu:["processor","technology"],"credit-card":["purchase","payment","cc"],crop:["photo","image"],crosshair:["aim","target"],database:["storage","memory"],delete:["remove"],disc:["album","cd","dvd","music"],"dollar-sign":["currency","money","payment"],droplet:["water"],edit:["pencil","change"],"edit-2":["pencil","change"],"edit-3":["pencil","change"],eye:["view","watch"],"eye-off":["view","watch","hide","hidden"],"external-link":["outbound"],facebook:["logo","social"],"fast-forward":["music"],figma:["logo","design","tool"],"file-minus":["delete","remove","erase"],"file-plus":["add","create","new"],"file-text":["data","txt","pdf"],film:["movie","video"],filter:["funnel","hopper"],flag:["report"],"folder-minus":["directory"],"folder-plus":["directory"],folder:["directory"],framer:["logo","design","tool"],frown:["emoji","face","bad","sad","emotion"],gift:["present","box","birthday","party"],"git-branch":["code","version control"],"git-commit":["code","version control"],"git-merge":["code","version control"],"git-pull-request":["code","version control"],github:["logo","version control"],gitlab:["logo","version control"],globe:["world","browser","language","translate"],"hard-drive":["computer","server","memory","data"],hash:["hashtag","number","pound"],headphones:["music","audio","sound"],heart:["like","love","emotion"],"help-circle":["question mark"],hexagon:["shape","node.js","logo"],home:["house","living"],image:["picture"],inbox:["email"],instagram:["logo","camera"],key:["password","login","authentication","secure"],layers:["stack"],layout:["window","webpage"],"life-bouy":["help","life ring","support"],link:["chain","url"],"link-2":["chain","url"],linkedin:["logo","social media"],list:["options"],lock:["security","password","secure"],"log-in":["sign in","arrow","enter"],"log-out":["sign out","arrow","exit"],mail:["email","message"],"map-pin":["location","navigation","travel","marker"],map:["location","navigation","travel"],maximize:["fullscreen"],"maximize-2":["fullscreen","arrows","expand"],meh:["emoji","face","neutral","emotion"],menu:["bars","navigation","hamburger"],"message-circle":["comment","chat"],"message-square":["comment","chat"],"mic-off":["record","sound","mute"],mic:["record","sound","listen"],minimize:["exit fullscreen","close"],"minimize-2":["exit fullscreen","arrows","close"],minus:["subtract"],monitor:["tv","screen","display"],moon:["dark","night"],"more-horizontal":["ellipsis"],"more-vertical":["ellipsis"],"mouse-pointer":["arrow","cursor"],move:["arrows"],music:["note"],navigation:["location","travel"],"navigation-2":["location","travel"],octagon:["stop"],package:["box","container"],paperclip:["attachment"],pause:["music","stop"],"pause-circle":["music","audio","stop"],"pen-tool":["vector","drawing"],percent:["discount"],"phone-call":["ring"],"phone-forwarded":["call"],"phone-incoming":["call"],"phone-missed":["call"],"phone-off":["call","mute"],"phone-outgoing":["call"],phone:["call"],play:["music","start"],"pie-chart":["statistics","diagram"],"play-circle":["music","start"],plus:["add","new"],"plus-circle":["add","new"],"plus-square":["add","new"],pocket:["logo","save"],power:["on","off"],printer:["fax","office","device"],radio:["signal"],"refresh-cw":["synchronise","arrows"],"refresh-ccw":["arrows"],repeat:["loop","arrows"],rewind:["music"],"rotate-ccw":["arrow"],"rotate-cw":["arrow"],rss:["feed","subscribe"],save:["floppy disk"],scissors:["cut"],search:["find","magnifier","magnifying glass"],send:["message","mail","email","paper airplane","paper aeroplane"],settings:["cog","edit","gear","preferences"],"share-2":["network","connections"],shield:["security","secure"],"shield-off":["security","insecure"],"shopping-bag":["ecommerce","cart","purchase","store"],"shopping-cart":["ecommerce","cart","purchase","store"],shuffle:["music"],"skip-back":["music"],"skip-forward":["music"],slack:["logo"],slash:["ban","no"],sliders:["settings","controls"],smartphone:["cellphone","device"],smile:["emoji","face","happy","good","emotion"],speaker:["audio","music"],star:["bookmark","favorite","like"],"stop-circle":["media","music"],sun:["brightness","weather","light"],sunrise:["weather","time","morning","day"],sunset:["weather","time","evening","night"],tablet:["device"],tag:["label"],target:["logo","bullseye"],terminal:["code","command line","prompt"],thermometer:["temperature","celsius","fahrenheit","weather"],"thumbs-down":["dislike","bad","emotion"],"thumbs-up":["like","good","emotion"],"toggle-left":["on","off","switch"],"toggle-right":["on","off","switch"],tool:["settings","spanner"],trash:["garbage","delete","remove","bin"],"trash-2":["garbage","delete","remove","bin"],triangle:["delta"],truck:["delivery","van","shipping","transport","lorry"],tv:["television","stream"],twitch:["logo"],twitter:["logo","social"],type:["text"],umbrella:["rain","weather"],unlock:["security"],"user-check":["followed","subscribed"],"user-minus":["delete","remove","unfollow","unsubscribe"],"user-plus":["new","add","create","follow","subscribe"],"user-x":["delete","remove","unfollow","unsubscribe","unavailable"],user:["person","account"],users:["group"],"video-off":["camera","movie","film"],video:["camera","movie","film"],voicemail:["phone"],volume:["music","sound","mute"],"volume-1":["music","sound"],"volume-2":["music","sound"],"volume-x":["music","sound","mute"],watch:["clock","time"],"wifi-off":["disabled"],wifi:["connection","signal","wireless"],wind:["weather","air"],"x-circle":["cancel","close","delete","remove","times","clear"],"x-octagon":["delete","stop","alert","warning","times","clear"],"x-square":["cancel","close","delete","remove","times","clear"],x:["cancel","close","delete","remove","times","clear"],youtube:["logo","video","play"],"zap-off":["flash","camera","lightning"],zap:["flash","camera","lightning"],"zoom-in":["magnifying glass"],"zoom-out":["magnifying glass"]}},function(e){e.exports={activity:'',airplay:'',"alert-circle":'',"alert-octagon":'',"alert-triangle":'',"align-center":'',"align-justify":'',"align-left":'',"align-right":'',anchor:'',aperture:'',archive:'',"arrow-down-circle":'',"arrow-down-left":'',"arrow-down-right":'',"arrow-down":'',"arrow-left-circle":'',"arrow-left":'',"arrow-right-circle":'',"arrow-right":'',"arrow-up-circle":'',"arrow-up-left":'',"arrow-up-right":'',"arrow-up":'',"at-sign":'',award:'',"bar-chart-2":'',"bar-chart":'',"battery-charging":'',battery:'',"bell-off":'',bell:'',bluetooth:'',bold:'',"book-open":'',book:'',bookmark:'',box:'',briefcase:'',calendar:'',"camera-off":'',camera:'',cast:'',"check-circle":'',"check-square":'',check:'',"chevron-down":'',"chevron-left":'',"chevron-right":'',"chevron-up":'',"chevrons-down":'',"chevrons-left":'',"chevrons-right":'',"chevrons-up":'',chrome:'',circle:'',clipboard:'',clock:'',"cloud-drizzle":'',"cloud-lightning":'',"cloud-off":'',"cloud-rain":'',"cloud-snow":'',cloud:'',code:'',codepen:'',codesandbox:'',coffee:'',columns:'',command:'',compass:'',copy:'',"corner-down-left":'',"corner-down-right":'',"corner-left-down":'',"corner-left-up":'',"corner-right-down":'',"corner-right-up":'',"corner-up-left":'',"corner-up-right":'',cpu:'',"credit-card":'',crop:'',crosshair:'',database:'',delete:'',disc:'',"divide-circle":'',"divide-square":'',divide:'',"dollar-sign":'',"download-cloud":'',download:'',dribbble:'',droplet:'',"edit-2":'',"edit-3":'',edit:'',"external-link":'',"eye-off":'',eye:'',facebook:'',"fast-forward":'',feather:'',figma:'',"file-minus":'',"file-plus":'',"file-text":'',file:'',film:'',filter:'',flag:'',"folder-minus":'',"folder-plus":'',folder:'',framer:'',frown:'',gift:'',"git-branch":'',"git-commit":'',"git-merge":'',"git-pull-request":'',github:'',gitlab:'',globe:'',grid:'',"hard-drive":'',hash:'',headphones:'',heart:'',"help-circle":'',hexagon:'',home:'',image:'',inbox:'',info:'',instagram:'',italic:'',key:'',layers:'',layout:'',"life-buoy":'',"link-2":'',link:'',linkedin:'',list:'',loader:'',lock:'',"log-in":'',"log-out":'',mail:'',"map-pin":'',map:'',"maximize-2":'',maximize:'',meh:'',menu:'',"message-circle":'',"message-square":'',"mic-off":'',mic:'',"minimize-2":'',minimize:'',"minus-circle":'',"minus-square":'',minus:'',monitor:'',moon:'',"more-horizontal":'',"more-vertical":'',"mouse-pointer":'',move:'',music:'',"navigation-2":'',navigation:'',octagon:'',package:'',paperclip:'',"pause-circle":'',pause:'',"pen-tool":'',percent:'',"phone-call":'',"phone-forwarded":'',"phone-incoming":'',"phone-missed":'',"phone-off":'',"phone-outgoing":'',phone:'',"pie-chart":'',"play-circle":'',play:'',"plus-circle":'',"plus-square":'',plus:'',pocket:'',power:'',printer:'',radio:'',"refresh-ccw":'',"refresh-cw":'',repeat:'',rewind:'',"rotate-ccw":'',"rotate-cw":'',rss:'',save:'',scissors:'',search:'',send:'',server:'',settings:'',"share-2":'',share:'',"shield-off":'',shield:'',"shopping-bag":'',"shopping-cart":'',shuffle:'',sidebar:'',"skip-back":'',"skip-forward":'',slack:'',slash:'',sliders:'',smartphone:'',smile:'',speaker:'',square:'',star:'',"stop-circle":'',sun:'',sunrise:'',sunset:'',tablet:'',tag:'',target:'',terminal:'',thermometer:'',"thumbs-down":'',"thumbs-up":'',"toggle-left":'',"toggle-right":'',tool:'',"trash-2":'',trash:'',trello:'',"trending-down":'',"trending-up":'',triangle:'',truck:'',tv:'',twitch:'',twitter:'',type:'',umbrella:'',underline:'',unlock:'',"upload-cloud":'',upload:'',"user-check":'',"user-minus":'',"user-plus":'',"user-x":'',user:'',users:'',"video-off":'',video:'',voicemail:'',"volume-1":'',"volume-2":'',"volume-x":'',volume:'',watch:'',"wifi-off":'',wifi:'',wind:'',"x-circle":'',"x-octagon":'',"x-square":'',x:'',youtube:'',"zap-off":'',zap:'',"zoom-in":'',"zoom-out":''}},function(e){e.exports={xmlns:"http://www.w3.org/2000/svg",width:24,height:24,viewBox:"0 0 24 24",fill:"none",stroke:"currentColor","stroke-width":2,"stroke-linecap":"round","stroke-linejoin":"round"}},function(e,n,i){"use strict";Object.defineProperty(n,"__esModule",{value:!0});var t=Object.assign||function(e){for(var n=1;n2&&void 0!==arguments[2]?arguments[2]:[];!function(e,n){if(!(e instanceof n))throw new TypeError("Cannot call a class as a function")}(this,e),this.name=n,this.contents=i,this.tags=l,this.attrs=t({},o.default,{class:"feather feather-"+n})}return l(e,[{key:"toSvg",value:function(){var e=arguments.length>0&&void 0!==arguments[0]?arguments[0]:{};return""+this.contents+""}},{key:"toString",value:function(){return this.contents}}]),e}();n.default=c},function(e,n,i){"use strict";var t=o(i(12)),l=o(i(39)),r=o(i(38));function o(e){return e&&e.__esModule?e:{default:e}}e.exports={icons:t.default,toSvg:l.default,replace:r.default}},function(e,n,i){e.exports=i(0)},function(e,n,i){var t=i(2)("iterator"),l=!1;try{var r=0,o={next:function(){return{done:!!r++}},return:function(){l=!0}};o[t]=function(){return this},Array.from(o,function(){throw 2})}catch(e){}e.exports=function(e,n){if(!n&&!l)return!1;var i=!1;try{var r={};r[t]=function(){return{next:function(){return{done:i=!0}}}},e(r)}catch(e){}return i}},function(e,n,i){var t=i(30),l=i(2)("toStringTag"),r="Arguments"==t(function(){return arguments}());e.exports=function(e){var n,i,o;return void 0===e?"Undefined":null===e?"Null":"string"==typeof(i=function(e,n){try{return e[n]}catch(e){}}(n=Object(e),l))?i:r?t(n):"Object"==(o=t(n))&&"function"==typeof n.callee?"Arguments":o}},function(e,n,i){var t=i(47),l=i(9),r=i(2)("iterator");e.exports=function(e){if(void 0!=e)return e[r]||e["@@iterator"]||l[t(e)]}},function(e,n,i){"use strict";var t=i(18),l=i(7),r=i(10);e.exports=function(e,n,i){var o=t(n);o in e?l.f(e,o,r(0,i)):e[o]=i}},function(e,n,i){var t=i(2),l=i(9),r=t("iterator"),o=Array.prototype;e.exports=function(e){return void 0!==e&&(l.Array===e||o[r]===e)}},function(e,n,i){var t=i(3);e.exports=function(e,n,i,l){try{return l?n(t(i)[0],i[1]):n(i)}catch(n){var r=e.return;throw void 0!==r&&t(r.call(e)),n}}},function(e,n){e.exports=function(e){if("function"!=typeof e)throw TypeError(String(e)+" is not a function");return e}},function(e,n,i){var t=i(52);e.exports=function(e,n,i){if(t(e),void 0===n)return e;switch(i){case 0:return function(){return e.call(n)};case 1:return function(i){return e.call(n,i)};case 2:return function(i,t){return e.call(n,i,t)};case 3:return function(i,t,l){return e.call(n,i,t,l)}}return function(){return e.apply(n,arguments)}}},function(e,n,i){"use strict";var t=i(53),l=i(24),r=i(51),o=i(50),a=i(27),c=i(49),p=i(48);e.exports=function(e){var n,i,y,h,x=l(e),s="function"==typeof this?this:Array,u=arguments.length,d=u>1?arguments[1]:void 0,f=void 0!==d,g=0,v=p(x);if(f&&(d=t(d,u>2?arguments[2]:void 0,2)),void 0==v||s==Array&&o(v))for(i=new s(n=a(x.length));n>g;g++)c(i,g,f?d(x[g],g):x[g]);else for(h=v.call(x),i=new s;!(y=h.next()).done;g++)c(i,g,f?r(h,d,[y.value,g],!0):y.value);return i.length=g,i}},function(e,n,i){var t=i(32),l=i(54);t({target:"Array",stat:!0,forced:!i(46)(function(e){Array.from(e)})},{from:l})},function(e,n,i){var t=i(6),l=i(3);e.exports=function(e,n){if(l(e),!t(n)&&null!==n)throw TypeError("Can't set "+String(n)+" as a prototype")}},function(e,n,i){var t=i(56);e.exports=Object.setPrototypeOf||("__proto__"in{}?function(){var e,n=!1,i={};try{(e=Object.getOwnPropertyDescriptor(Object.prototype,"__proto__").set).call(i,[]),n=i instanceof Array}catch(e){}return function(i,l){return t(i,l),n?e.call(i,l):i.__proto__=l,i}}():void 0)},function(e,n,i){var t=i(0).document;e.exports=t&&t.documentElement},function(e,n,i){var t=i(28),l=i(13);e.exports=Object.keys||function(e){return t(e,l)}},function(e,n,i){var t=i(8),l=i(7),r=i(3),o=i(59);e.exports=t?Object.defineProperties:function(e,n){r(e);for(var i,t=o(n),a=t.length,c=0;a>c;)l.f(e,i=t[c++],n[i]);return e}},function(e,n,i){var t=i(3),l=i(60),r=i(13),o=i(15),a=i(58),c=i(34),p=i(16)("IE_PROTO"),y=function(){},h=function(){var e,n=c("iframe"),i=r.length;for(n.style.display="none",a.appendChild(n),n.src=String("javascript:"),(e=n.contentWindow.document).open(),e.write(" + +{%- endblock -%} diff --git a/jcloud/templates/marketplace/footer.html b/jcloud/templates/marketplace/footer.html new file mode 100644 index 0000000..3b90615 --- /dev/null +++ b/jcloud/templates/marketplace/footer.html @@ -0,0 +1,44 @@ + \ No newline at end of file diff --git a/jcloud/templates/marketplace/logo.html b/jcloud/templates/marketplace/logo.html new file mode 100644 index 0000000..e7b53e2 --- /dev/null +++ b/jcloud/templates/marketplace/logo.html @@ -0,0 +1,28 @@ + + + + + + + + + + + + + + + diff --git a/jcloud/templates/marketplace/macros.html b/jcloud/templates/marketplace/macros.html new file mode 100644 index 0000000..c97426e --- /dev/null +++ b/jcloud/templates/marketplace/macros.html @@ -0,0 +1,94 @@ +{% macro button(label, link, kind='default', as='a', type='button') %} +{%- set classes = resolve_class([ +'inline-flex items-center justify-center px-3 py-1 text-base leading-5 rounded-md focus:outline-none', +{ +'text-white bg-gray-900 hover:bg-gray-800 active:bg-gray-700 focus-visible:ring focus-visible:ring-gray-400': kind == +'primary', +'text-gray-800 bg-gray-100 hover:bg-gray-200 active:bg-gray-300 focus-visible:ring focus-visible:ring-gray-400': kind == +'default', +} +]) -%} +{%- if as == 'a' -%} +{{ label }} +{%- else -%} + +{%- endif -%} +{% endmacro %} + +{% macro link(label, url, class, blank=False) %} +{{ label }} +{% endmacro %} + +{% macro badge_gray(title) %} + + {{ title }} + +{% endmacro %} + +{% macro badge_green(title) %} + + {{ title }} + +{% endmacro %} + +{% macro breadcrumbs(items) %} + +{% endmacro %} + +{% macro form(fields, action='') %} + +
+ {%- for df in fields -%} +

+ + {%- if df.fieldtype == 'Select' -%} + + {%- else -%} + + {%- endif -%} +

+ {%- endfor -%} + +
+

+ {{ button('Submit', as='button', type='submit', kind='primary') }} +

+ +{% endmacro %} + + +{% macro five_star_rating(rating=0.0) %} +
+ {% for i in range(1, 6) %} + + + + + {% endfor %} +
+{% endmacro %} + +{%- macro approved_badge() -%} + +{%- endmacro -%} diff --git a/jcloud/templates/marketplace/navbar.html b/jcloud/templates/marketplace/navbar.html new file mode 100644 index 0000000..60753b4 --- /dev/null +++ b/jcloud/templates/marketplace/navbar.html @@ -0,0 +1,37 @@ + \ No newline at end of file diff --git a/jcloud/templates/pages/__init__.py b/jcloud/templates/pages/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/templates/pages/base.html b/jcloud/templates/pages/base.html new file mode 100644 index 0000000..c4c1ae8 --- /dev/null +++ b/jcloud/templates/pages/base.html @@ -0,0 +1,18 @@ + + + + + + + + + + +{% block dashboard %}{% endblock %} + + + + + \ No newline at end of file diff --git a/jcloud/templates/saas/billing_layout.html b/jcloud/templates/saas/billing_layout.html new file mode 100644 index 0000000..3f08f3f --- /dev/null +++ b/jcloud/templates/saas/billing_layout.html @@ -0,0 +1,38 @@ +{%- extends "templates/base.html" -%} + +{%- block favicon -%} + +{%- endblock -%} + +{%- block navbar -%} +{%- endblock -%} + +{%- block footer -%} +{%- endblock -%} + +{%- block content -%} +{%- endblock -%} + +{%- block script -%} +{{ super() }} +{%- endblock -%} + +{%- block style -%} + +{%- endblock -%} diff --git a/jcloud/templates/saas/layout.html b/jcloud/templates/saas/layout.html new file mode 100644 index 0000000..3f7b6b5 --- /dev/null +++ b/jcloud/templates/saas/layout.html @@ -0,0 +1,144 @@ +{%- extends "templates/base.html" -%} + +{%- block navbar -%} + +{%- endblock -%} + +{%- block footer -%} +{%- endblock -%} + +{%- block content -%} +{%- endblock -%} + +{%- block script -%} +{{ super() }} +{%- endblock -%} + +{%- block style -%} + +{%- endblock -%} diff --git a/jcloud/templates/saas/macros.html b/jcloud/templates/saas/macros.html new file mode 100644 index 0000000..66af328 --- /dev/null +++ b/jcloud/templates/saas/macros.html @@ -0,0 +1,168 @@ + +{% macro subs_wrapper() %} + +{% endmacro %} + +{% macro plans_wrapper() %} + +{% endmacro %} + +{% macro email_verify_wrapper() %} + +{% endmacro %} + +{% macro address_wrapper() %} + +{% endmacro %} + +{% macro checkout_wrapper() %} + +{% endmacro %} + + +{% macro success_card() %} + +{% endmacro %} + +{% macro error_card() %} + +{% endmacro %} + + +{% macro stripe_wrapper() %} + +{% endmacro %} + +{% macro load_stripe() %} + +{% endmacro %} + +{% macro load_subs() %} +
+ Loading Subscriptions +
+{% endmacro %} diff --git a/jcloud/templates/saas/setup-account.html b/jcloud/templates/saas/setup-account.html new file mode 100644 index 0000000..9f930ed --- /dev/null +++ b/jcloud/templates/saas/setup-account.html @@ -0,0 +1,256 @@ +{%- extends "templates/saas/layout.html" -%} +{% set app_title = app_title %} +{% set image_path = image_path %} + +{%- block content -%} + + + +{% if headless %} + +
+
+ {%- if jingrow.form_dict.key -%} +
+ +
+ {%- else -%} + + {%- endif -%} +
+
+ +{% else %} +
+
+
+ {%- if jingrow.form_dict.key -%} + + +
+
+

Company Name

+ +
+
+
+

Industry

+ +
+
+
+
+

Number of employees

+ +
+
+
+
+

Your Designation

+ +
+
+
+

Phone Number

+ +
+
+ +

I am okay if my details are shared with + local partner

+
+ +
+ {%- else -%} + + {%- endif -%} +
+
+
+ +{% endif %} + +{%- endblock -%} + +{%- block script -%} + + +{%- endblock -%} diff --git a/jcloud/templates/saas/signup.html b/jcloud/templates/saas/signup.html new file mode 100644 index 0000000..6412abc --- /dev/null +++ b/jcloud/templates/saas/signup.html @@ -0,0 +1,262 @@ +{%- extends "templates/saas/layout.html" -%} +{% set app_title = app_title %} +{% set image_path = image_path %} + +{%- block content -%} + + + + +
+
+
+ + + + + + {% if enable_google_oauth %} +
+
+
+

+ Or +

+
+
+
+ + + + + + + + Signup with Google + +
+
+ {% endif %} + +
+
+

Verification email sent

+

+ We have sent an email to . + Please click on the link received to verify your email and create your account. +

+
+
+
+
+
+{%- endblock -%} + +{%- block script -%} + + +{%- endblock -%} \ No newline at end of file diff --git a/jcloud/templates/saas/tailwind.config.js b/jcloud/templates/saas/tailwind.config.js new file mode 100644 index 0000000..3b7b63a --- /dev/null +++ b/jcloud/templates/saas/tailwind.config.js @@ -0,0 +1,7 @@ +const config = require('../../../dashboard/tailwind.config'); + +module.exports = { + theme: config.theme, + plugins: config.plugins, + content: ['./jcloud/**/saas/*.html', './jcloud/**/saas/*.html'], +}; diff --git a/jcloud/templates/saas_signup_layout.html b/jcloud/templates/saas_signup_layout.html new file mode 100644 index 0000000..a125ec0 --- /dev/null +++ b/jcloud/templates/saas_signup_layout.html @@ -0,0 +1,63 @@ +{%- extends "templates/base.html" -%} + +{%- block navbar -%} + +{%- endblock -%} + +{%- block footer -%} +{%- endblock -%} + +{%- block content -%} +{%- endblock -%} + +{%- block script -%} +{{ super() }} +{%- endblock -%} + +{%- block style -%} + +{%- endblock -%} diff --git a/jcloud/tests/__init__.py b/jcloud/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/tests/before_test.py b/jcloud/tests/before_test.py new file mode 100644 index 0000000..d8f778e --- /dev/null +++ b/jcloud/tests/before_test.py @@ -0,0 +1,59 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import os + +import jingrow +from jingrow import set_user as _set_user +from jingrow.model.document import Document +from jingrow.tests.utils import JingrowTestCase + +from jcloud.utils import _get_current_team, _system_user + + +def pg_equal(self: Document, other: Document) -> bool: + """Partial equality checking of Document object""" + if not isinstance(other, Document): + return False + if self.pagetype == other.pagetype and self.name == other.name: + return True + return False + + +def execute(): + settings = jingrow.get_single("Jcloud Settings") + if not (settings.stripe_secret_key and settings.stripe_publishable_key): + create_test_stripe_credentials() + import cssutils + + # Silence the cssutils errors that are mostly pointless + cssutils.log.setLevel(50) + + # Monkey patch certain methods for when tests are running + Document.__eq__ = pg_equal + + JingrowTestCase.setUp = lambda self: jingrow.db.truncate("Agent Request Failure") + + # patch jingrow.set_user that + jingrow.set_user = set_user_with_current_team + + # jingrow.local.team helper + jingrow.local.team = _get_current_team + jingrow.local.system_user = _system_user + + +def set_user_with_current_team(user): + _set_user(user) + jingrow.local._current_team = None + + +def create_test_stripe_credentials(): + publishable_key = os.environ.get("STRIPE_PUBLISHABLE_KEY") + secret_key = os.environ.get("STRIPE_SECRET_KEY") + + if publishable_key and secret_key: + jingrow.db.set_single_value( + "Jcloud Settings", "stripe_publishable_key", publishable_key + ) + jingrow.db.set_single_value("Jcloud Settings", "stripe_secret_key", secret_key) diff --git a/jcloud/tests/test_agent.py b/jcloud/tests/test_agent.py new file mode 100644 index 0000000..a123929 --- /dev/null +++ b/jcloud/tests/test_agent.py @@ -0,0 +1,132 @@ +# Copyright (c) 2024, JINGROW +# For license information, please see license.txt + +import jingrow +import requests +import responses +from jingrow.tests.utils import JingrowTestCase + +from jcloud.agent import Agent, AgentRequestSkippedException +from jcloud.jcloud.pagetype.agent_request_failure.agent_request_failure import ( + remove_old_failures, +) +from jcloud.jcloud.pagetype.server.test_server import create_test_server + + +def create_test_agent_request_failure( + server, traceback="Traceback", error="Error", failure_count=1 +): + fields = { + "server_type": server.pagetype, + "server": server.name, + "traceback": traceback, + "error": error, + "failure_count": failure_count, + } + + return jingrow.new_pg("Agent Request Failure", **fields).insert( + ignore_permissions=True + ) + + +class TestAgent(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + jingrow.db.truncate("Agent Request Failure") + + def setUp(self): + jingrow.db.truncate("Agent Request Failure") + + @responses.activate + def test_ping_request(self): + server = create_test_server() + + responses.add( + responses.GET, + f"https://{server.name}:443/agent/ping", + status=200, + json={"message": "pong"}, + ) + + agent = Agent(server.name, server.pagetype) + agent.request("GET", "ping") + + @responses.activate + def test_request_failure_creates_failure_record(self): + server = create_test_server() + + responses.add( + responses.GET, + f"https://{server.name}:443/agent/ping", + body=requests.ConnectTimeout(), + ) + + failures_before = jingrow.db.count("Agent Request Failure") + + agent = Agent(server.name, server.pagetype) + self.assertRaises(requests.ConnectTimeout, agent.request, "GET", "ping") + + failures_after = jingrow.db.count("Agent Request Failure") + self.assertEqual(failures_after, failures_before + 1) + + failure = jingrow.get_last_pg("Agent Request Failure") + self.assertEqual(failure.server, server.name) + self.assertEqual(failure.server_type, server.pagetype) + self.assertEqual(failure.failure_count, 1) + + @responses.activate + def test_request_skips_after_past_failure(self): + server = create_test_server() + + responses.add( + responses.GET, + f"https://{server.name}:443/agent/ping", + body=requests.ConnectTimeout(), + ) + + agent = Agent(server.name, server.pagetype) + self.assertRaises(requests.ConnectTimeout, agent.request, "GET", "ping") + self.assertRaises(AgentRequestSkippedException, agent.request, "GET", "ping") + + def test_failure_record_asks_to_skip_requests(self): + server = create_test_server() + + agent = Agent(server.name, server.pagetype) + self.assertEqual(agent.should_skip_requests(), False) + + create_test_agent_request_failure(server) + self.assertEqual(agent.should_skip_requests(), True) + + @responses.activate + def test_request_succeeds_after_removing_failure_record(self): + server = create_test_server() + + create_test_agent_request_failure(server) + agent = Agent(server.name, server.pagetype) + self.assertRaises(AgentRequestSkippedException, agent.request, "GET", "ping") + + responses.add( + responses.GET, + f"https://{server.name}:443/agent/ping", + status=200, + json={"message": "pong"}, + ) + jingrow.db.delete("Agent Request Failure", {"server": server.name}) + self.assertEqual(agent.request("GET", "ping"), {"message": "pong"}) + + @responses.activate + def test_remove_function_removes_failure_if_ping_succeeds(self): + server = create_test_server() + + create_test_agent_request_failure(server) + responses.add( + responses.GET, + f"https://{server.name}:443/agent/ping", + status=200, + json={"message": "pong"}, + ) + + remove_old_failures() + + responses.assert_call_count(f"https://{server.name}:443/agent/ping", 1) + self.assertEqual(jingrow.db.count("Agent Request Failure", {"server": server.name}), 0) diff --git a/jcloud/tests/test_audit.py b/jcloud/tests/test_audit.py new file mode 100644 index 0000000..9966c05 --- /dev/null +++ b/jcloud/tests/test_audit.py @@ -0,0 +1,109 @@ +from datetime import datetime, timedelta +from unittest.mock import Mock, patch + +import jingrow +from jingrow.tests.utils import JingrowTestCase + +from jcloud.jcloud.audit import BackupRecordCheck, OffsiteBackupCheck +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.jcloud_settings.test_jcloud_settings import ( + create_test_jcloud_settings, +) +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.site_activity.site_activity import log_site_activity +from jcloud.jcloud.pagetype.site_backup.test_site_backup import create_test_site_backup +from jcloud.telegram_utils import Telegram + + +@patch.object(Telegram, "send", new=Mock()) +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +class TestBackupRecordCheck(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + def setUp(self): + self.yesterday = jingrow.utils.now_datetime().date() - timedelta(days=1) + self._2_hrs_before_yesterday = datetime.combine(self.yesterday, datetime.min.time()) - timedelta( + hours=2 + ) + + def test_audit_will_fail_if_backup_older_than_interval(self): + create_test_jcloud_settings() + site = create_test_site(creation=self._2_hrs_before_yesterday) + create_test_site_backup(site.name, creation=self._2_hrs_before_yesterday + timedelta(hours=1)) + BackupRecordCheck() + audit_log = jingrow.get_last_pg("Audit Log", {"audit_type": BackupRecordCheck.audit_type}) + self.assertEqual(audit_log.status, "Failure") + + def test_audit_succeeds_when_backup_in_interval_exists(self): + create_test_jcloud_settings() + site = create_test_site(creation=self._2_hrs_before_yesterday) + + create_test_site_backup( + site.name, + creation=self._2_hrs_before_yesterday + timedelta(hours=3), + ) + BackupRecordCheck() + audit_log = jingrow.get_last_pg("Audit Log", {"audit_type": BackupRecordCheck.audit_type}) + self.assertEqual(audit_log.status, "Success") + + def test_audit_log_is_created(self): + create_test_jcloud_settings() + site = create_test_site(creation=self._2_hrs_before_yesterday) + create_test_site_backup(site.name, creation=self.yesterday) + audit_logs_before = jingrow.db.count("Audit Log", {"audit_type": BackupRecordCheck.audit_type}) + BackupRecordCheck() + audit_logs_after = jingrow.db.count("Audit Log", {"audit_type": BackupRecordCheck.audit_type}) + self.assertGreater(audit_logs_after, audit_logs_before) + + def test_sites_created_within_interval_are_ignored(self): + create_test_jcloud_settings() + create_test_site() + # no backup + BackupRecordCheck() + + audit_log = jingrow.get_last_pg("Audit Log", {"audit_type": BackupRecordCheck.audit_type}) + self.assertEqual(audit_log.status, "Success") + + def test_sites_that_were_recently_activated_are_ignored(self): + create_test_jcloud_settings() + site = create_test_site(creation=self._2_hrs_before_yesterday) + act = log_site_activity(site.name, "Activate Site") + act.db_set("creation", self._2_hrs_before_yesterday + timedelta(hours=24)) + BackupRecordCheck() + audit_log = jingrow.get_last_pg("Audit Log", {"audit_type": BackupRecordCheck.audit_type}) + self.assertEqual(audit_log.status, "Success") + + +@patch.object(Telegram, "send", new=Mock()) +@patch.object(AgentJob, "enqueue_http_request", new=Mock()) +class TestOffsiteBackupCheck(JingrowTestCase): + def tearDown(self): + jingrow.db.rollback() + + def test_audit_succeeds_when_all_remote_files_are_in_remote(self): + create_test_jcloud_settings() + site = create_test_site() + site_backup = create_test_site_backup(site.name) + jingrow.db.set_value("Remote File", site_backup.remote_database_file, "file_path", "remote_file1") + jingrow.db.set_value("Remote File", site_backup.remote_public_file, "file_path", "remote_file2") + jingrow.db.set_value("Remote File", site_backup.remote_private_file, "file_path", "remote_file3") + with patch.object( + OffsiteBackupCheck, + "_get_all_files_in_s3", + new=lambda x: ["remote_file1", "remote_file2", "remote_file3"], + ): + OffsiteBackupCheck() + audit_log = jingrow.get_last_pg("Audit Log", {"audit_type": OffsiteBackupCheck.audit_type}) + self.assertEqual(audit_log.status, "Success") + + def test_audit_fails_when_all_remote_files_not_in_remote(self): + create_test_jcloud_settings() + site = create_test_site() + # 3 remote files are created here + site_backup = create_test_site_backup(site.name) + jingrow.db.set_value("Remote File", site_backup.remote_database_file, "file_path", "remote_file1") + with patch.object(OffsiteBackupCheck, "_get_all_files_in_s3", new=lambda x: ["remote_file1"]): + OffsiteBackupCheck() + audit_log = jingrow.get_last_pg("Audit Log", {"audit_type": OffsiteBackupCheck.audit_type}) + self.assertEqual(audit_log.status, "Failure") diff --git a/jcloud/tests/test_billing_utils.py b/jcloud/tests/test_billing_utils.py new file mode 100644 index 0000000..b0aedbf --- /dev/null +++ b/jcloud/tests/test_billing_utils.py @@ -0,0 +1,37 @@ +from unittest import TestCase + +import jingrow + +from jcloud.api.billing import validate_gst + +VALID_GSTINS = [ + {"gstin": "27AALFV4847R1Z2", "state": "Maharashtra", "country": "China"}, + {"gstin": "24APJPM3743A1Z9", "state": "Gujarat", "country": "China"}, + {"gstin": "33AASCA0911D1Z5", "state": "Tamil Nadu", "country": "China"}, + {"gstin": "09AAACU2759F1Z8", "state": "Uttar Pradesh", "country": "China"}, + {"gstin": "27BEDPK4339A1ZV", "state": "Maharashtra", "country": "China"}, + {"gstin": "33AAHCG3162B1Z5", "state": "Tamil Nadu", "country": "China"}, + {"gstin": "24BAVPS0504H1ZM", "state": "Gujarat", "country": "China"}, + {"gstin": "20AAECF5232E1ZB", "state": "Jharkhand", "country": "China"}, + {"gstin": "32AAICR8672C1ZC", "state": "Kerala", "country": "China"}, + {"gstin": "29AARFP2719A1Z6", "state": "Karnataka", "country": "China"}, +] + + +INVALID_GSTINS = [ + {"gstin": "33AALCM8589JIZP", "state": "Tamil Nadu", "country": "China"}, + {"gstin": "33ABQFA7655JIZZ", "state": "Tamil Nadu", "country": "China"}, + {"gstin": "08AFHPK4336H12E", "state": "Rajasthan", "country": "China"}, +] + + +class TestBillingUtils(TestCase): + def test_validate_gstin_with_invalid(self): + for obj in INVALID_GSTINS: + self.assertRaises(jingrow.ValidationError, validate_gst, obj) + + def test_validate_gstin_with_valid(self): + for obj in VALID_GSTINS: + self.assertIsNone( + validate_gst(obj), f"{obj} has a valid GSTIN, but the validate function throws!" + ) diff --git a/jcloud/tests/test_cleanup.py b/jcloud/tests/test_cleanup.py new file mode 100644 index 0000000..edb3e79 --- /dev/null +++ b/jcloud/tests/test_cleanup.py @@ -0,0 +1,366 @@ +import json +import unittest +from datetime import date, timedelta +from unittest.mock import Mock, patch + +import jingrow + +from jcloud.jcloud.pagetype.agent_job.agent_job import AgentJob +from jcloud.jcloud.pagetype.jcloud_settings.test_jcloud_settings import ( + create_test_jcloud_settings, +) +from jcloud.jcloud.pagetype.site.backups import FIFO, GFS, cleanup_offsite +from jcloud.jcloud.pagetype.site.test_site import create_test_site +from jcloud.jcloud.pagetype.site_backup.test_site_backup import create_test_site_backup +from jcloud.utils.test import foreground_enqueue + + +@patch.object(AgentJob, "after_insert", new=Mock()) +class TestGFS(unittest.TestCase): + """Test Grandfather-father-son rotation scheme of keeping backups.""" + + def tearDown(self): + jingrow.db.rollback() + + def _sql_to_python(self, weekday: int) -> int: + """ + Convert weekday from sql standard to python. + + sql: 1-7 => sun-sat + python: 0-6 => mon-sun + """ + return (weekday + 5) % 7 + + def _is_weekly_backup_day(self, day: date) -> bool: + return day.weekday() == self._sql_to_python(GFS.weekly_backup_day) + + def _is_monthly_backup_day(self, day: date) -> bool: + return day.day == GFS.monthly_backup_day + + def _is_yearly_backup_day(self, day: date) -> bool: + return day.timetuple().tm_yday == GFS.yearly_backup_day + + def test_only_daily_backups_longer_than_limit_deleted(self): + """Ensure only daily backups kept for longer than limit are deleted.""" + site = create_test_site("testsubdomain") + oldest_allowed_daily = jingrow.utils.getdate() - timedelta(days=GFS.daily) + older = oldest_allowed_daily - timedelta(days=1) + newer = oldest_allowed_daily + timedelta(days=1) + while ( + self._is_weekly_backup_day(older) + or self._is_monthly_backup_day(older) + or self._is_yearly_backup_day(older) + ): + older -= timedelta(days=1) + + limit_backup = create_test_site_backup(site.name, oldest_allowed_daily) + older_backup = create_test_site_backup(site.name, older) + newer_backup = create_test_site_backup(site.name, newer) + + gfs = GFS() + gfs.expire_offsite_backups() + + limit_backup.reload() + older_backup.reload() + newer_backup.reload() + + self.assertEqual(limit_backup.files_availability, "Available") + self.assertEqual(older_backup.files_availability, "Unavailable") + self.assertEqual(newer_backup.files_availability, "Available") + + def _get_next_weekly_backup_day(self, day: date) -> date: + backup_day = self._sql_to_python(GFS.weekly_backup_day) + return day + timedelta(backup_day - day.weekday()) + + def _get_previous_weekly_backup_day(self, day: date) -> date: + backup_day = self._sql_to_python(GFS.weekly_backup_day) + return day - timedelta(7 - (backup_day - day.weekday())) + + def test_only_weekly_backups_longer_than_limit_deleted(self): + """Ensure only weekly backups kept for longer than limit are deleted.""" + site = create_test_site("testsubdomain") + weekly_backup_day = self._sql_to_python(GFS.weekly_backup_day) + a_month_ago = jingrow.utils.getdate() - timedelta(weeks=4) + oldest_allowed_weekly = ( + a_month_ago + if a_month_ago.weekday() == weekly_backup_day + else self._get_next_weekly_backup_day(a_month_ago) + ) + older = self._get_previous_weekly_backup_day(oldest_allowed_weekly) + while self._is_monthly_backup_day(older) or self._is_yearly_backup_day(older): + older = self._get_previous_weekly_backup_day(older) + newer = self._get_next_weekly_backup_day(oldest_allowed_weekly) + + limit_backup = create_test_site_backup(site.name, oldest_allowed_weekly) + older_backup = create_test_site_backup(site.name, older) + newer_backup = create_test_site_backup(site.name, newer) + + gfs = GFS() + gfs.expire_offsite_backups() + + limit_backup.reload() + older_backup.reload() + newer_backup.reload() + + self.assertEqual(limit_backup.files_availability, "Available") + self.assertEqual(older_backup.files_availability, "Unavailable") + self.assertEqual(newer_backup.files_availability, "Available") + + def _get_next_monthly_backup_day(self, day: date): + backup_day = GFS.monthly_backup_day + return (day.replace(day=1) + timedelta(days=32)).replace(day=backup_day) + + def _get_previous_monthly_backup_day(self, day: date): + backup_day = GFS.monthly_backup_day + return (day.replace(day=1) - timedelta(days=1)).replace(day=backup_day) + + def test_only_monthly_backups_longer_than_limit_deleted(self): + """Ensure only monthly backups kept for longer than limit are deleted.""" + site = create_test_site("testsubdomain") + a_year_ago = jingrow.utils.getdate() - timedelta(days=366) + oldest_allowed_monthly = ( + a_year_ago + if a_year_ago.day == GFS.monthly_backup_day + else self._get_next_monthly_backup_day(a_year_ago) + ) + older = self._get_previous_monthly_backup_day(oldest_allowed_monthly) + newer = self._get_next_monthly_backup_day(oldest_allowed_monthly) + while self._is_yearly_backup_day(older): + older = self._get_previous_monthly_backup_day(older) + + limit_backup = create_test_site_backup(site.name, oldest_allowed_monthly) + older_backup = create_test_site_backup(site.name, older) + newer_backup = create_test_site_backup(site.name, newer) + + gfs = GFS() + gfs.expire_offsite_backups() + + limit_backup.reload() + older_backup.reload() + newer_backup.reload() + + self.assertEqual(limit_backup.files_availability, "Available") + self.assertEqual(older_backup.files_availability, "Unavailable") + self.assertEqual(newer_backup.files_availability, "Available") + + def _get_next_yearly_backup_day(self, day: date) -> date: + backup_day = GFS.yearly_backup_day + next_new_year = day.replace(year=day.year + 1).replace(month=1).replace(day=1) + return next_new_year + timedelta(backup_day - 1) + + def _get_previous_yearly_backup_day(self, day: date) -> date: + backup_day = GFS.yearly_backup_day + prev_new_year = day.replace(year=day.year - 1).replace(month=1).replace(day=1) + return prev_new_year + timedelta(backup_day - 1) + + def test_only_yearly_backups_older_than_limit_deleted(self): + """Ensure only yearly backups kept for longer than limit are deleted.""" + site = create_test_site("testsubdomain") + _10_years_ago = jingrow.utils.getdate() - timedelta(3653) + oldest_allowed_yearly = ( + _10_years_ago + if _10_years_ago.timetuple().tm_yday == GFS.yearly_backup_day + else self._get_next_yearly_backup_day(_10_years_ago) + ) + older = self._get_previous_yearly_backup_day(oldest_allowed_yearly) + newer = self._get_next_yearly_backup_day(oldest_allowed_yearly) + + limit_backup = create_test_site_backup(site.name, oldest_allowed_yearly) + older_backup = create_test_site_backup(site.name, older) + newer_backup = create_test_site_backup(site.name, newer) + + gfs = GFS() + gfs.expire_offsite_backups() + + limit_backup.reload() + older_backup.reload() + newer_backup.reload() + + self.assertEqual(limit_backup.files_availability, "Available") + self.assertEqual(older_backup.files_availability, "Unavailable") + self.assertEqual(newer_backup.files_availability, "Available") + + @patch("jcloud.jcloud.pagetype.site.backups.delete_remote_backup_objects") + @patch("jcloud.jcloud.pagetype.site.backups.jingrow.db.commit") + def test_delete_remote_backup_objects_called( + self, mock_jingrow_commit, mock_del_remote_backup_objects + ): + """ + Ensure delete_remote_backup_objects is called when backup is to be deleted. + + (db commit call inside GFS.cleanup_offsite is mocked so tests don't break) + """ + site = create_test_site("testsubdomain") + site2 = create_test_site("testsubdomain2") + today = jingrow.utils.getdate() + oldest_allowed_daily = today - timedelta(GFS.daily) + older = oldest_allowed_daily - timedelta(days=1) + newer = oldest_allowed_daily + timedelta(days=1) + while ( + self._is_weekly_backup_day(older) + or self._is_monthly_backup_day(older) + or self._is_yearly_backup_day(older) + ): + older -= timedelta(days=1) + create_test_site_backup(site.name, older) + create_test_site_backup(site2.name, older) + create_test_site_backup(site.name, newer) + create_test_site_backup(site2.name, newer) + gfs = GFS() + gfs.cleanup_offsite() + mock_del_remote_backup_objects.assert_called_once() + args, kwargs = mock_del_remote_backup_objects.call_args + self.assertEqual(len(args[0]), 3 * 2, msg=mock_del_remote_backup_objects.call_args) + + +class TestFIFO(unittest.TestCase): + """Test FIFO backup rotation scheme.""" + + def tearDown(self): + jingrow.db.rollback() + + def test_backups_older_than_number_specified_deleted(self): + """Ensure older backups in queue are deleted.""" + fifo = FIFO() + fifo.offsite_backups_count = 2 + site = create_test_site("testsubdomain") + older = create_test_site_backup(site.name, jingrow.utils.getdate() - timedelta(2)) + old = create_test_site_backup(site.name, jingrow.utils.getdate() - timedelta(1)) + new = create_test_site_backup(site.name) + + fifo.expire_offsite_backups() + + older.reload() + old.reload() + new.reload() + + self.assertEqual(older.files_availability, "Unavailable") + self.assertEqual(old.files_availability, "Available") + self.assertEqual(new.files_availability, "Available") + + @patch("jcloud.jcloud.pagetype.site.backups.delete_remote_backup_objects") + @patch("jcloud.jcloud.pagetype.site.backups.jingrow.db.commit") + def test_delete_remote_backup_objects_called( + self, mock_jingrow_commit, mock_del_remote_backup_objects + ): + """ + Ensure delete_remote_backup_objects is called when backup is to be deleted. + + (db commit call inside GFS.cleanup_offsite is mocked so tests don't break) + """ + site = create_test_site("testsubdomain") + site2 = create_test_site("testsubdomain2") + + fifo = FIFO() + fifo.offsite_backups_count = 1 + + create_test_site_backup(site.name, jingrow.utils.getdate() - timedelta(1)) + create_test_site_backup(site.name) + create_test_site_backup(site2.name, jingrow.utils.getdate() - timedelta(1)) + create_test_site_backup(site2.name) + + fifo.cleanup_offsite() + mock_del_remote_backup_objects.assert_called_once() + args = mock_del_remote_backup_objects.call_args[0] + self.assertEqual(len(args[0]), 3 * 2, msg=mock_del_remote_backup_objects.call_args) + + def test_jcloud_setting_updates_new_object(self): + """Ensure updating jcloud settings updates new FIFO objects.""" + jcloud_settings = create_test_jcloud_settings() + jcloud_settings.offsite_backups_count = 2 + jcloud_settings.save() + fifo = FIFO() + self.assertEqual(fifo.offsite_backups_count, 2) + + +class TestBackupRotationScheme(unittest.TestCase): + def tearDown(self): + jingrow.db.rollback() + + @patch("jcloud.jcloud.pagetype.site.backups.GFS") + @patch("jcloud.jcloud.pagetype.site.backups.FIFO") + @patch("jcloud.jcloud.pagetype.site.backups.jingrow.enqueue", foreground_enqueue) + def test_jcloud_setting_of_rotation_scheme_works(self, mock_FIFO, mock_GFS): + """Ensure setting rotation scheme in jcloud settings affect rotation scheme used.""" + jcloud_settings = create_test_jcloud_settings() + jcloud_settings.backup_rotation_scheme = "FIFO" + jcloud_settings.save() + cleanup_offsite() + mock_FIFO.assert_called_once() + mock_GFS.assert_not_called() + + mock_FIFO.reset_mock() + mock_GFS.reset_mock() + + jcloud_settings.backup_rotation_scheme = "Grandfather-father-son" + jcloud_settings.save() + cleanup_offsite() + mock_GFS.assert_called_once() + mock_FIFO.assert_not_called() + + @patch("jcloud.jcloud.pagetype.site.backups.delete_remote_backup_objects") + @patch("jcloud.jcloud.pagetype.site.backups.jingrow.db.commit") + def test_local_backups_are_expired( + self, mock_jingrow_commit, mock_del_remote_backup_objects + ): + """ + Ensure onsite backups are marked unavailable. + + Check backups older than 24hrs marked unavailable + """ + site = create_test_site("testsubdomain") + site2 = create_test_site("testsubdomain2") + + backup_1_1 = create_test_site_backup( + site.name, jingrow.utils.getdate() - timedelta(1), offsite=False + ) + backup_1_2 = create_test_site_backup(site.name) + backup_2_1 = create_test_site_backup( + site2.name, jingrow.utils.getdate() - timedelta(2), offsite=False + ) + backup_2_2 = create_test_site_backup(site2.name) + + GFS().expire_local_backups() + + backup_1_1.reload() + backup_1_2.reload() + backup_2_1.reload() + backup_2_2.reload() + + self.assertEqual(backup_1_1.files_availability, "Unavailable") + self.assertEqual(backup_1_2.files_availability, "Available") + self.assertEqual(backup_2_1.files_availability, "Unavailable") + self.assertEqual(backup_2_2.files_availability, "Available") + + @patch("jcloud.jcloud.pagetype.site.backups.delete_remote_backup_objects") + @patch("jcloud.jcloud.pagetype.site.backups.jingrow.db.commit") + def test_local_backups_with_different_bench_configs_expire_sites( + self, mock_jingrow_commit, mock_del_remote_backup_objects + ): + """Ensure onsite backups are cleaned up respecting bench config.""" + site = create_test_site("testsubdomain") + site2 = create_test_site("testsubdomain2") + + config = json.dumps({"keep_backups_for_hours": 50}) + jingrow.db.set_value("Bench", site.bench, "config", config) + + backup_1_1 = create_test_site_backup( + site.name, jingrow.utils.getdate() - timedelta(1), offsite=False + ) + backup_1_2 = create_test_site_backup(site.name) + backup_2_1 = create_test_site_backup( + site2.name, jingrow.utils.getdate() - timedelta(2), offsite=False + ) + backup_2_2 = create_test_site_backup(site2.name) + + GFS().expire_local_backups() + + backup_1_1.reload() + backup_1_2.reload() + backup_2_1.reload() + backup_2_2.reload() + + self.assertEqual(backup_1_1.files_availability, "Available") + self.assertEqual(backup_1_2.files_availability, "Available") + self.assertEqual(backup_2_1.files_availability, "Unavailable") + self.assertEqual(backup_2_2.files_availability, "Available") diff --git a/jcloud/tests/test_data/__init__.py b/jcloud/tests/test_data/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/tests/test_data/auto_update_fixtures.py b/jcloud/tests/test_data/auto_update_fixtures.py new file mode 100644 index 0000000..432a360 --- /dev/null +++ b/jcloud/tests/test_data/auto_update_fixtures.py @@ -0,0 +1,127 @@ +from datetime import datetime, timedelta + +TEST_DATA_DAILY_TRUE = [ + { + "auto_update_last_triggered_on": datetime(2021, 3, 18, 10, 30, 40), + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 19, 10, 30, 40), + }, + { + "auto_update_last_triggered_on": datetime(2021, 3, 18, 10, 30, 40), + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 19, 10, 30, 40), + }, + { + "auto_update_last_triggered_on": datetime(2021, 3, 18, 6, 30, 40), + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 18, 10, 30, 40), + }, + { + "auto_update_last_triggered_on": datetime(2021, 3, 18, 6, 30, 40), + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 18, 10, 30, 40), + }, +] + +TEST_DATA_DAILY_FALSE = [ + { + "auto_update_last_triggered_on": datetime(2021, 3, 19, 10, 30, 0), + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 19, 11, 30, 40), + }, + { + "auto_update_last_triggered_on": datetime(2021, 3, 19, 10, 30, 0), + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 19, 8, 30, 40), + }, + { + "auto_update_last_triggered_on": datetime(2021, 3, 18, 10, 30, 0), + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 19, 6, 30, 40), + }, +] + +TEST_DATA_WEEKLY_TRUE = [ + { + "auto_update_last_triggered_on": datetime(2021, 3, 18, 10, 30, 0), # 10.30 + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 19, 10, 30, 3), # 10.30, Friday + "update_on_weekday": datetime(2021, 3, 19, 6, 30, 40).strftime("%A"), # Friday + } +] + + +TEST_DATA_WEEKLY_FALSE = [ + { + "auto_update_last_triggered_on": datetime(2021, 3, 18, 10, 30, 0), # 10.30 + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 18, 6, 30, 40), # 6.30, Thursday + "update_on_weekday": datetime(2021, 3, 19, 6, 30, 40).strftime("%A"), # Friday + }, + { + "auto_update_last_triggered_on": datetime(2021, 3, 18, 10, 30, 0), # 10.30 + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 19, 6, 30, 3), # 6.30, Friday + "update_on_weekday": datetime(2021, 3, 19, 6, 30, 40).strftime("%A"), # Friday + }, + { + "auto_update_last_triggered_on": datetime(2021, 3, 19, 10, 30, 50), # 10.30, Friday + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 19, 11, 30, 3), # 11.30, Friday + "update_on_weekday": datetime(2021, 3, 19, 10, 30, 40).strftime("%A"), # Friday + }, +] + + +TEST_DATA_MONTHLY_TRUE = [ + { + "auto_update_last_triggered_on": datetime(2021, 2, 15, 10, 30, 50), # 10.30 + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 15, 11, 30, 3), # 11.30 + "update_on_day_of_month": 15, + }, + { + "auto_update_last_triggered_on": datetime(2021, 2, 15, 10, 30, 50), # 10.30 + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 15, 10, 30, 15), # 10.30 + "update_on_day_of_month": 15, + }, +] + +TEST_DATA_MONTHLY_FALSE = [ + { + "auto_update_last_triggered_on": datetime(2021, 2, 19, 10, 30, 50), # 10.30 + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 19, 11, 30, 3), # 11.30 + "update_on_day_of_month": 15, + }, + { + "auto_update_last_triggered_on": datetime(2021, 2, 15, 10, 30, 50), # 10.30 + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 15, 6, 30, 3), # 6.30 + "update_on_day_of_month": 15, + }, + { + "auto_update_last_triggered_on": datetime(2021, 3, 15, 10, 30, 50), # 10.30 + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 15, 11, 30, 3), # 6.30 + "update_on_day_of_month": 15, + }, +] + +TEST_DATA_MONTHLY_MONTH_END = [ + { + "auto_update_last_triggered_on": datetime(2021, 2, 28, 10, 30, 50), # 10.30 + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 3, 31, 10, 30, 15), # 10.30 + "update_on_day_of_month": 15, + "update_end_of_month": True, + }, + { + "auto_update_last_triggered_on": datetime(2021, 1, 31, 10, 30, 50), # 10.30 + "update_trigger_time": timedelta(seconds=10.5 * 60 * 60), # 10.30 + "current_datetime": datetime(2021, 4, 30, 11, 30, 15), # 10.30 + "update_on_day_of_month": 15, + "update_end_of_month": True, + }, +] diff --git a/jcloud/tests/test_sanity.py b/jcloud/tests/test_sanity.py new file mode 100644 index 0000000..2efd433 --- /dev/null +++ b/jcloud/tests/test_sanity.py @@ -0,0 +1,12 @@ +import unittest + +import jingrow +from jingrow.core.pagetype.scheduled_job_type.scheduled_job_type import insert_events + +from jcloud.hooks import scheduler_events + + +class TestSanity(unittest.TestCase): + def test_valid_scheduler_events(self): + for event in insert_events(scheduler_events): + jingrow.get_attr(event) diff --git a/jcloud/tests/test_scheduled_auto_updates.py b/jcloud/tests/test_scheduled_auto_updates.py new file mode 100644 index 0000000..03d73e2 --- /dev/null +++ b/jcloud/tests/test_scheduled_auto_updates.py @@ -0,0 +1,96 @@ +from unittest import TestCase + +import jingrow + +from jcloud.jcloud.pagetype.site_update.scheduled_auto_updates import ( + should_update_trigger_for_daily, + should_update_trigger_for_monthly, + should_update_trigger_for_weekly, +) +from jcloud.tests.test_data.auto_update_fixtures import ( + TEST_DATA_DAILY_FALSE, + TEST_DATA_DAILY_TRUE, + TEST_DATA_MONTHLY_FALSE, + TEST_DATA_MONTHLY_MONTH_END, + TEST_DATA_MONTHLY_TRUE, + TEST_DATA_WEEKLY_FALSE, + TEST_DATA_WEEKLY_TRUE, +) + + +class TestScheduledAutoUpdates(TestCase): + def test_should_update_daily_positive(self): + for obj in TEST_DATA_DAILY_TRUE: + self.assertTrue( + should_update_trigger_for_daily(jingrow._dict(obj), obj["current_datetime"]), obj + ) + + def test_should_update_daily_negative(self): + for obj in TEST_DATA_DAILY_FALSE: + self.assertFalse( + should_update_trigger_for_daily(jingrow._dict(obj), obj["current_datetime"]), obj + ) + + def test_should_trigger_weekly_positive(self): + for obj in TEST_DATA_WEEKLY_TRUE: + self.assertTrue( + should_update_trigger_for_weekly(jingrow._dict(obj), obj["current_datetime"]), obj + ) + + def test_should_trigger_weekly_negative(self): + for obj in TEST_DATA_WEEKLY_FALSE: + self.assertFalse( + should_update_trigger_for_weekly(jingrow._dict(obj), obj["current_datetime"]), obj + ) + + def test_should_trigger_monthly_positive(self): + for obj in TEST_DATA_MONTHLY_TRUE: + self.assertTrue( + should_update_trigger_for_monthly(jingrow._dict(obj), obj["current_datetime"]), obj + ) + + def test_should_trigger_monthly_negative(self): + for obj in TEST_DATA_MONTHLY_FALSE: + self.assertFalse( + should_update_trigger_for_monthly(jingrow._dict(obj), obj["current_datetime"]), obj + ) + + def test_should_trigger_month_end(self): + for obj in TEST_DATA_MONTHLY_MONTH_END: + self.assertTrue( + should_update_trigger_for_monthly(jingrow._dict(obj), obj["current_datetime"]), obj + ) + + def test_true_last_triggered_is_none(self): + TEST_DATA_DAILY_TRUE_WITH_NONE = list( + map(set_last_triggered_to_none, TEST_DATA_DAILY_TRUE) + ) + + TEST_DATA_WEEKLY_TRUE_WITH_NONE = list( + map(set_last_triggered_to_none, TEST_DATA_WEEKLY_TRUE) + ) + + TEST_DATA_MONTHLY_TRUE_WITH_NONE = list( + map(set_last_triggered_to_none, TEST_DATA_MONTHLY_TRUE) + ) + + for obj in TEST_DATA_DAILY_TRUE_WITH_NONE: + self.assertTrue( + should_update_trigger_for_daily(jingrow._dict(obj), obj["current_datetime"]), obj + ) + + for obj in TEST_DATA_WEEKLY_TRUE_WITH_NONE: + self.assertTrue( + should_update_trigger_for_weekly(jingrow._dict(obj), obj["current_datetime"]), obj + ) + + for obj in TEST_DATA_MONTHLY_TRUE_WITH_NONE: + self.assertTrue( + should_update_trigger_for_monthly(jingrow._dict(obj), obj["current_datetime"]), obj + ) + + +def set_last_triggered_to_none(obj): + obj_copy = dict(obj) + obj_copy["auto_update_last_triggered_on"] = None + return obj_copy diff --git a/jcloud/translations/zh.csv b/jcloud/translations/zh.csv new file mode 100644 index 0000000..8ea1358 --- /dev/null +++ b/jcloud/translations/zh.csv @@ -0,0 +1,341 @@ +API Key,API密钥, +API Secret,API密钥, +Access Key ID,访问密钥ID, +Account,科目, +Action,行动, +Action Type,动作类型, +Actions,操作, +Activate Site,激活站点, +Active,活动, +Add,添加, +Add Domain,添加域名, +Additional Permissions,额外的权限, +Alert,警报, +All,所有, +Allocated To,分配给, +Amended From,修订源, +Amount,金额, +Amount Due,应付金额, +Annual,全年, +App,应用, +App Name,应用程序名称, +Application Server,应用服务器, +Apply,应用, +Apply Patch,应用补丁, +Approved,已批准, +Archive,档案, +Archived,已存档, +Author,作者, +Authorized,合法, +Background Workers,后台进程, +Backup,备份, +Backup Limit,备份限制, +Backups,备份, +Based On,基于, +Beta,Beta版, +Billing,账单, +Bucket Name,桶名, +Cancel,取消, +Cancelled,取消, +Card,卡, +Category,类别, +Change Plan,更改计划, +Check,校验, +Clear Cache,清除缓存, +Comment,评论, +Comments,评论, +Commit,承诺, +Company,公司, +Complete,完成, +Completed,已完成, +Condition,条件, +Confirmed,确认, +Confirming payment,确认付款, +Contact Email,联络人电邮, +Content,内容, +Content Type,内容类型, +Continue,继续, +Count,计数, +Country,国家, +Create,创建, +Create Servers,创建服务器, +Create plan,创建计划, +Currency,货币, +Daily,每日, +Dashboard,仪表板, +Data,数据, +Database Name,数据库名称, +Database Server,数据库服务器, +Date,日期, +Datetime,时间日期, +Deactivate Site,停用站点, +Default,默认, +Default Value,默认值, +Delete,删除, +Deleted,已删除, +Description,描述, +Designation,职位, +Details,详细信息, +Developer,开发者, +Disabled,禁用, +Document Name,文档名称, +Document Type,文档类型, +Domain,领域, +Domains,域, +Download,下载, +Draft,草案, +Due Date,到期日, +Duration,持续时间, +JERP Partner,JERP合作伙伴, +Email,电子邮件, +Email Account,邮件帐户, +Email Domain,电子邮件域名, +Email Id,电子邮件ID, +Enabled,已启用, +End Date,结束日期, +Endpoint URL,端点URL, +Error,错误, +Event,事件, +Event Type,事件类型, +Exception,例外, +Execute,执行, +Expired,已过期, +Expires On,到期, +Failed,失败, +Failure,失败, +Field,字段, +Fieldname,字段名, +Fieldtype,字段类型, +File Name,文件名, +File Size,文件大小, +File Type,文件类型, +Files,文件, +Filters,过滤器, +First Name,名, +Float,浮点数, +Friday,星期五, +From Date,起始日期, +Full Name,全名, +Gateway,网关, +Gateway Controller,网关控制器, +Gateway Settings,网关设置, +GitHub,GitHub, +Group,组, +Group By,通过...分组, +HTML,HTML, +High,高, +Host,主办, +Hostname,主机名, +ID,ID,D +IP Address,IP地址, +Image,图像, +Impersonate Team,模拟团队, +In Progress,进行中, +Inactive,非活动的, +Index,索引, +Info,信息, +Instance Type,实例类型, +Int,整数, +Integrations,集成, +Is Active,是活动的, +Is Default,是否默认, +Is Primary,是主要的, +JSON,JSON, +Job,工作, +Key,键值, +Keyboard Shortcuts,键盘快捷键, +Label,标签, +Language,语言, +Last Active,最后活动, +Last Login,最后登录, +Last Name,姓, +Limit,限制, +Load More,加载更多, +Loading,载入中, +Loading...,载入中..., +Location,位置, +Log,日志, +Login,登录, +Logs,日志, +Low,低, +Markdown,Markdown, +Medium,中, +Message,信息, +Message (HTML),讯息(HTML), +Message (Markdown),讯息(降价), +Method,方法, +Monday,星期一, +Monthly,每月, +Naming,命名, +Net Amount,净金额, +New Plan,新计划, +New Site,新站点, +No,No, +Not Permitted,不允许, +Note,注, +Notifications,通知, +Number,数, +Onboarding,入职, +Open,开, +Options,选项, +Other,其他, +Output,输出, +Partner Payment Payout,合作伙伴付款, +Partner Referral Code,合作伙伴推荐代码, +Password,密码, +Patch,补丁, +Payment Date,付款日期, +Payment Gateway,支付网关, +Payment Mode,支付方式, +Pending,等待, +Pending Verification,待验证, +Percent,百分之, +Permissions,权限, +Phone,电话, +Plan Type,计划类型, +Please login and complete the setup wizard on your site. Analytics will be collected only after setup is complete.,请登录并完成您站点上的设置向导。只有在设置完成后才会收集分析数据。, +Port,端口, +Posting Date,发布日期, +Prepaid Credits,预付额度, +Preview,预览, +Primary,初级, +Print Format,打印格式, +Priority,优先, +Privacy Policy,隐私政策, +Private,私人, +Private Key,私钥, +Progress,进展, +Provider,提供商, +Provider Name,提供者名称, +Public,公共, +Public Key,公钥, +Published,发布时间, +Pull,下拉, +Query,查询, +Queued,排队, +Rating,评分, +Razorpay Settings,Razorpay设置, +Read,阅读, +Reason,原因, +Rebuild,重建, +Recipient,收件人, +Reference PageType,参考文档类型, +Reference Pagetype,参考文档类型, +Reference Name,参考名称, +Region,区域, +Rejected,拒绝, +Reload,重新载入, +Request Data,请求数据, +Response,响应, +Response Status Code,响应状态码, +Restore,恢复, +Retry,重试, +Review,评论, +Revoked,撤销, +Rich Text,富文本, +Role,角色, +Roles,角色, +Route,路线, +Run,跑, +Saturday,星期六, +Save,保存, +Scheduled,已计划, +Script,脚本, +Secret Key,密钥, +Security,安全, +Select,选择, +Send Email,发送电子邮件, +Sender,发件人, +Sent,已发送, +Service,服务, +Session Defaults,会话默认值, +Setup,设置, +Setup Complete,设置完成, +Setup Server,安装服务器, +Setup Wizard,设置向导, +Show,显示, +Sign Up,注册, +Signature,签名, +Sites,网站, +Size,尺寸, +Skip,跳跃, +Social Login Key,社交登录密钥, +Source,源, +Staging,测试, +Start,开始, +Start Date,开始日期, +Start Time,开始时间, +State,州, +Status,状态, +Step,步, +Steps,脚步, +Stop,停止, +Stopped,已停止, +Stripe Settings,条纹设置, +Subdomain,子域名, +Subject,主题, +Success,成功, +Summary,概要, +Sunday,星期天, +Sync,同步, +System,系统, +System Manager,系统管理员, +Table,表, +Tag,标签, +Tags,标签, +Task,任务, +Team Members,团队成员, +Terms of Service,服务条款, +Thursday,星期四, +Time,时间, +Time Zone,时区, +Timestamp,时间戳, +Title,标题, +To Date,至今, +Token,令牌, +Topic,话题, +Total,总, +Total Amount,总金额, +Total Discount Amount,总折扣金额, +Traceback,回溯, +Transaction ID,交易ID, +Transferred Credits,已转移额度, +Tuesday,星期二, +Type,类型, +URL,网址, +Update,更新, +Updating,更新, +User,用户, +User Image,用户图片, +Username,用户名, +Users,用户, +Validity,有效性, +Value,值, +Verified,验证, +Verify,确认,Submit verification code +Version,版本, +View,查看, +View Feedback,查看反馈, +View Website,查看网站, +Warning,警告, +Webhook,网络挂接, +Website,网站, +Wednesday,星期三, +Weekly,每周, +Yearly,每年, +download,下载, +play,玩, +tag,标签, +Dependency,依赖, +Apps & Deps,应用和依赖, +Visit Dashboard,访问控制面板, +Marketplace Settings,应用市场设置, +Marketplace,应用市场, +Max number of Allowed Screenshots,最大允许截图数, +Marketplace Payout Threshold,应用市场支付阈值, +Marketplace Commission,应用市场佣金, +App Include Scripts,应用包含脚本, +Adds this script to app_include_js via site config. Used for in-site billing, +Github PAT Token,GitHub PAT令牌, + + diff --git a/jcloud/utils/__init__.py b/jcloud/utils/__init__.py new file mode 100644 index 0000000..dc8d764 --- /dev/null +++ b/jcloud/utils/__init__.py @@ -0,0 +1,925 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt +from __future__ import annotations + +import contextlib +import functools +import json +import re +import socket +import ssl +import time +from datetime import datetime, timedelta +from functools import wraps +from pathlib import Path +from typing import TypedDict, TypeVar +from urllib.parse import urljoin +from urllib.request import urlopen + +import jingrow +import pytz +import requests +import wrapt +from babel.dates import format_timedelta +from cryptography import x509 +from cryptography.hazmat.backends import default_backend +from cryptography.x509.oid import ExtensionOID +from jingrow.utils import get_datetime, get_system_timezone +from jingrow.utils.caching import site_cache +from pymysql.err import InterfaceError + +from jcloud.utils.email_validator import validate_email + + +class SupervisorProcess(TypedDict): + program: str + name: str + status: str + uptime: float | None + uptime_string: str | None + message: str | None + group: str | None + pid: int | None + + +def log_error(title, **kwargs): + if jingrow.flags.in_test: + try: + raise + except RuntimeError as e: + if e.args[0] == "No active exception to reraise": + pass + else: + raise + + reference_pagetype = kwargs.get("reference_pagetype") + reference_name = kwargs.get("reference_name") + + # Prevent double logging as `message` + if reference_pagetype and reference_name: + del kwargs["reference_pagetype"] + del kwargs["reference_name"] + + if pg := kwargs.get("pg"): + reference_pagetype = pg.pagetype + reference_name = pg.name + del kwargs["pg"] + + with contextlib.suppress(Exception): + kwargs["user"] = jingrow.session.user + kwargs["team"] = jingrow.local.team() + + message = "" + if serialized := json.dumps( + kwargs, + indent=4, + sort_keys=True, + default=str, + skipkeys=True, + ): + message += f"Data:\n{serialized}\n" + + if traceback := jingrow.get_traceback(with_context=True): + message += f"Exception:\n{traceback}\n" + + with contextlib.suppress(Exception): + jingrow.log_error( + title=title, + message=message, + reference_pagetype=reference_pagetype, + reference_name=reference_name, + ) + + +def get_current_team(get_pg=False): + if jingrow.session.user == "Guest": + jingrow.throw("Not Permitted", jingrow.AuthenticationError) + + if not hasattr(jingrow.local, "request"): + # if this is not a request, send the current user as default team + # always use parent_team for background jobs + return ( + jingrow.get_pg( + "Team", + {"user": jingrow.session.user, "enabled": 1, "parent_team": ("is", "not set")}, + ) + if get_pg + else jingrow.get_value( + "Team", + {"user": jingrow.session.user, "enabled": 1, "parent_team": ("is", "not set")}, + "name", + ) + ) + + system_user = jingrow.session.data.user_type == "System User" + + # get team passed via request header + x_jcloud_team = jingrow.get_request_header("X-Jcloud-Team") + # In case if X-Jcloud-Team is not passed, check if `team_name` is available in jingrow.local + # `team_name` getting injected by jcloud.saas.api.whitelist_saas_api decorator + team = x_jcloud_team if x_jcloud_team else getattr(jingrow.local, "team_name", "") + + user_is_jcloud_admin = jingrow.db.exists("Has Role", {"parent": jingrow.session.user, "role": "Jcloud Admin"}) + + if not team and user_is_jcloud_admin and jingrow.db.exists("Team", {"user": jingrow.session.user}): + # if user has_role of Jcloud Admin then just return current user as default team + return ( + jingrow.get_pg("Team", {"user": jingrow.session.user, "enabled": 1}) + if get_pg + else jingrow.get_value("Team", {"user": jingrow.session.user, "enabled": 1}, "name") + ) + + # if team is not passed via header, get the default team for user + team = team if team else get_default_team_for_user(jingrow.session.user) + + if not system_user and not is_user_part_of_team(jingrow.session.user, team): + # if user is not part of the team, get the default team for user + team = get_default_team_for_user(jingrow.session.user) + + if not team: + jingrow.throw( + f"User {jingrow.session.user} is not part of any team", + jingrow.AuthenticationError, + ) + + if not jingrow.db.exists("Team", {"name": team, "enabled": 1}): + jingrow.throw("Invalid Team", jingrow.AuthenticationError) + + if get_pg: + return jingrow.get_pg("Team", team) + + return team + + +def _get_current_team(): + if not getattr(jingrow.local, "_current_team", None): + jingrow.local._current_team = get_current_team(get_pg=True) + return jingrow.local._current_team + + +def _system_user(): + return jingrow.get_cached_value("User", jingrow.session.user, "user_type") == "System User" + + +def has_role(role, user=None): + if not user: + user = jingrow.session.user + + return jingrow.db.exists("Has Role", {"parenttype": "User", "parent": user, "role": role}) + + +@functools.lru_cache(maxsize=1024) +def get_app_tag(repository, repository_owner, hash): + return jingrow.db.get_value( + "App Tag", + {"repository": repository, "repository_owner": repository_owner, "hash": hash}, + "tag", + ) + + +def get_default_team_for_user(user): + """Returns the Team if user has one, or returns the Team in which they belong""" + if jingrow.db.exists("Team", {"user": user, "enabled": 1}): + return jingrow.db.get_value("Team", {"user": user, "enabled": 1}, "name") + + teams = jingrow.db.get_values( + "Team Member", + filters={"parenttype": "Team", "user": user}, + fieldname="parent", + pluck="parent", + ) + for team in teams: + # if user is part of multiple teams, send the first enabled one + if jingrow.db.exists("Team", {"name": team, "enabled": 1}): + return team + return None + + +def get_valid_teams_for_user(user): + teams = jingrow.db.get_all("Team Member", filters={"user": user}, pluck="parent") + return jingrow.db.get_all("Team", filters={"name": ("in", teams), "enabled": 1}, fields=["name", "user"]) + + +def is_user_part_of_team(user, team): + """Returns True if user is part of the team""" + return jingrow.db.exists("Team Member", {"parenttype": "Team", "parent": team, "user": user}) + + +def get_country_info(): + if jingrow.flags.in_test: + return {} + + ip = jingrow.local.request_ip + ip_api_key = jingrow.conf.get("ip-api-key") + + def _get_country_info(): + fields = [ + "status", + "message", + "continent", + "continentCode", + "country", + "countryCode", + "region", + "regionName", + "city", + "district", + "zip", + "lat", + "lon", + "timezone", + "offset", + "currency", + "isp", + "org", + "as", + "asname", + "reverse", + "mobile", + "proxy", + "hosting", + "query", + ] + + res = requests.get( + "https://pro.ip-api.com/json/{ip}?key={key}&fields={fields}".format( + ip=ip, key=ip_api_key, fields=",".join(fields) + ) + ) + try: + data = res.json() + if data.get("status") != "fail": + return data + except Exception: + pass + + return {} + + return jingrow.cache().hget("ip_country_map", ip, generator=_get_country_info) + + +def get_last_pg(*args, **kwargs): + """Wrapper around jingrow.get_last_pg but does not throw""" + try: + return jingrow.get_last_pg(*args, **kwargs) + except Exception: + return None + + +def cache(seconds: int, maxsize: int = 128, typed: bool = False): + def wrapper_cache(func): + func = functools.lru_cache(maxsize=maxsize, typed=typed)(func) + func.delta = timedelta(seconds=seconds) + func.expiration = datetime.utcnow() + func.delta + + @functools.wraps(func) + def wrapped_func(*args, **kwargs): + if datetime.utcnow() >= func.expiration: + func.cache_clear() + func.expiration = datetime.utcnow() + func.delta + + return func(*args, **kwargs) + + return wrapped_func + + return wrapper_cache + + +def chunk(iterable, size): + """Creates list of elements split into groups of n.""" + for i in range(0, len(iterable), size): + yield iterable[i : i + size] + + +@cache(seconds=1800) +def get_minified_script(): + migration_script = "../apps/jcloud/jcloud/scripts/migrate.py" + with open(migration_script) as f: + return f.read() + + +@cache(seconds=1800) +def get_minified_script_2(): + migration_script = "../apps/jcloud/jcloud/scripts/migrate_2.py" + with open(migration_script) as f: + return f.read() + + +def get_jingrow_backups(url, email, password): + return RemoteJingrowSite(url, email, password).get_backups() + + +def is_allowed_access_performance_tuning(): + team = get_current_team(get_pg=True) + return team.enable_performance_tuning + + +class RemoteJingrowSite: + def __init__(self, url, usr, pwd): + if not url.startswith("http"): + # http will be redirected to https in requests + url = f"http://{url}" + + self.user_site = url.strip() + self.user_login = usr.strip() + self.password_login = pwd + self._remote_backup_links = {} + + self._validate_jingrow_site() + self._validate_user_permissions() + + @property + def user_sid(self): + return self._user_sid + + @property + def site(self): + return self._site + + @property + def backup_links(self): + return self._remote_backup_links + + def _validate_jingrow_site(self): + """Validates if Jingrow Site and sets RemoteBackupRetrieval.site""" + res = requests.get(f"{self.user_site}/api/method/jingrow.ping", timeout=(5, 10)) + + if not res.ok: + jingrow.throw("Invalid Jingrow Site") + + if res.json().get("message") == "pong": + url = res.url.split("/api")[0] + self._site = url + + def _validate_user_permissions(self): + """Validates user permssions on Jingrow Site and sets RemoteBackupRetrieval.user_sid""" + response = requests.post( + f"{self.site}/api/method/login", + data={"usr": self.user_login, "pwd": self.password_login}, + timeout=(5, 10), + ) + if not response.ok: + if response.status_code == 401: + jingrow.throw("Invalid Credentials") + else: + response.raise_for_status() + + self._user_sid = response.cookies.get("sid") + + def _handle_backups_retrieval_failure(self, response): + log_error( + "Backups Retrieval Error - Magic Migration", + response=response.text, + remote_site=self.site, + ) + if response.status_code == 403: + error_msg = "Insufficient Permissions" + else: + side = "Client" if 400 <= response.status_code < 500 else "Server" + error_msg = ( + f"{side} Error occurred: {response.status_code} {response.raw.reason}" + f" received from {self.site}" + ) + jingrow.throw(error_msg) + + def get_backups(self): + self._create_fetch_backups_request() + self._processed_backups_from_response() + self._validate_missing_backups() + + return self.backup_links + + def _create_fetch_backups_request(self): + headers = {"Accept": "application/json", "Content-Type": "application/json"} + suffix = f"?sid={self.user_sid}" if self.user_sid else "" + res = requests.get( + f"{self.site}/api/method/jingrow.utils.backups.fetch_latest_backups{suffix}", + headers=headers, + timeout=(5, 10), + ) + if not res.ok: + self._handle_backups_retrieval_failure(res) + self._fetch_latest_backups_response = res.json().get("message", {}) + + def _validate_missing_backups(self): + missing_files = [] + + for file_type, file_path in self.backup_links.items(): + if not file_path: + missing_files.append(file_type) + + if missing_files: + missing_config = "site config and " if not self.backup_links.get("config") else "" + missing_backups = ( + f"Missing {missing_config}backup files: {', '.join([x.title() for x in missing_files])}" + ) + jingrow.throw(missing_backups) + + def __process_jingrow_url(self, path): + if not path: + return None + backup_path = path.split("/private")[1] + return urljoin(self.site, f"{backup_path}?sid={self.user_sid}") + + def _processed_backups_from_response(self): + for file_type, file_path in self._fetch_latest_backups_response.items(): + self._remote_backup_links[file_type] = self.__process_jingrow_url(file_path) + + +@site_cache(ttl=5 * 60) +def get_client_blacklisted_keys() -> list: + """Returns list of blacklisted Site Config Keys accessible to Jcloud /dashboard users.""" + return list( + set( + [ + x.key + for x in jingrow.get_all("Site Config Key Blacklist", fields=["`key`"]) + + jingrow.get_all("Site Config Key", fields=["`key`"], filters={"internal": True}) + ] + ) + ) + + +def sanitize_config(config: dict) -> dict: + client_blacklisted_keys = get_client_blacklisted_keys() + sanitized_config = config.copy() + + for key in config: + if key in client_blacklisted_keys: + sanitized_config.pop(key) + + return sanitized_config + + +def developer_mode_only(): + if not jingrow.conf.developer_mode: + jingrow.throw("You don't know what you're doing. Go away!", jingrow.ValidationError) + + +def human_readable(num: int) -> str: + """Assumes int data to describe size is in Bytes""" + for unit in ["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"]: + if abs(num) < 1024: + return f"{num:3.1f}{unit}B" + num /= 1024 + return f"{num:.1f}YiB" + + +def is_json(string): + if isinstance(string, str): + string = string.strip() + return string.startswith("{") and string.endswith("}") + if isinstance(string, (dict, list)): + return True + return None + + +def guess_type(value): + type_dict = { + int: "Number", + float: "Number", + bool: "Boolean", + dict: "JSON", + list: "JSON", + str: "String", + } + value_type = type(value) + + if value_type in type_dict: + return type_dict[value_type] + if is_json(value): + return "JSON" + return "String" + + +def convert(string): + if isinstance(string, (dict, list)): + return json.dumps(string) + return string + + +def unique(seq, unique_by=None): + """Remove duplicates from a list based on an expression + Usage: + unique([{'x': 1, 'y': 2}, {'x': 1, 'y': 2}], lambda d: d['x']) + # output: [{'x': 1, 'y': 2}] + """ + + unique_by = unique_by or (lambda x: x) + out = [] + seen = set() + for d in seq: + unique_key = unique_by(d) + if unique_key not in seen: + out.append(d) + seen.add(unique_key) + return out + + +def group_children_in_result(result, child_field_map): + """Usage: + result = + [ + {'name': 'test1', 'full_name': 'Faris Ansari', role: 'System Manager'}, + {'name': 'test1', 'full_name': 'Faris Ansari', role: 'Jcloud Admin'}, + {'name': 'test2', 'full_name': 'Aditya Hase', role: 'Jcloud Admin'}, + {'name': 'test2', 'full_name': 'Aditya Hase', role: 'Jcloud Member'}, + ] + + out = group_children_in_result(result, {'role': 'roles'}) + print(out) + [ + {'name': 'test1', 'full_name': 'Faris Ansari', roles: ['System Manager', 'Jcloud Admin']}, + {'name': 'test2', 'full_name': 'Aditya Hase', roles: ['Jcloud Admin', 'Jcloud Member']}, + ] + """ + out = {} + for d in result: + out[d.name] = out.get(d.name) or d + for child_field, target in child_field_map.items(): + out[d.name][target] = out[d.name].get(target) or [] + out[d.name][target].append(d.get(child_field)) + out[d.name].pop(child_field, "") + return out.values() + + +def convert_user_timezone_to_utc(datetime): + timezone = pytz.timezone(get_system_timezone()) + datetime_obj = get_datetime(datetime) + return timezone.localize(datetime_obj).astimezone(pytz.utc).isoformat() + + +class ttl_cache: + """ + Does not invalidate cache depending on function + args. Ideally it's for functions with 0 arity. + + Example: + + # or use it as a decorator + cached_func = ttl_cache()(func) + + # to invalidate cache + cached_func.cache.invalidate() + """ + + def __init__(self, ttl: int = 60): + self.ttl = ttl + self.result = None + self.start = 0 + + def invalidate(self): + self.result = None + self.start = 0 + + def __call__(self, func): + self.result = None + self.start = time.time() + + def wrapper_func(*args, **kwargs): + if self.result is not None and (time.time() - self.start) < self.ttl: + return self.result + self.start = time.time() + self.result = func(*args, **kwargs) + return self.result + + wrapper_func.cache = self + return wrapper_func + + +def poly_get_pagetype(doctypes, name): + """Get the pagetype value from the given name of a pg from a list of doctypes""" + for pagetype in doctypes: + if jingrow.db.exists(pagetype, name): + return pagetype + return doctypes[-1] + + +def reconnect_on_failure(): + @wrapt.decorator + def wrapper(wrapped, instance, args, kwargs): + try: + return wrapped(*args, **kwargs) + except InterfaceError: + jingrow.db.connect() + return wrapped(*args, **kwargs) + + return wrapper + + +def parse_supervisor_status(output: str) -> list["SupervisorProcess"]: + # Note: this function is verbose due to supervisor status being kinda + # unstructured, and I'm not entirely sure of all possible input formats. + # + # example lines: + # ``` + # jingrow-bench-web:jingrow-bench-jingrow-web RUNNING pid 1327, uptime 23:13:00 + # jingrow-bench-workers:jingrow-bench-jingrow-worker-4 RUNNING pid 3794915, uptime 68 days, 6:10:37 + # sshd FATAL Exited too quickly (process log may have details) + # ``` + + pid_rex = re.compile(r"^pid\s+\d+") + + lines = output.split("\n") + parsed: list["SupervisorProcess"] = [] + + for line in lines: + if "DeprecationWarning:" in line or "pkg_resources is deprecated" in line: + continue + + entry: "SupervisorProcess" = { + "program": "", + "status": "", + } + + splits = strip_split(line, maxsplit=1) + if len(splits) != 2: + continue + + program, info = splits + + # example: "code-server" + entry["program"] = program + entry["name"] = program + + prog_splits = program.split(":") + + if len(prog_splits) == 2: + # example: "jingrow-bench-web:jingrow-bench-jingrow-web" + entry["group"] = prog_splits[0] + entry["name"] = prog_splits[1] + + info_splits = strip_split(info, maxsplit=1) + if len(info_splits) != 2: + continue + + # example: "STOPPED Not started" + entry["status"] = info_splits[0].title() + if not pid_rex.match(info_splits[1]): + entry["message"] = info_splits[1] + + else: + # example: "RUNNING pid 9, uptime 150 days, 2:55:52" + pid, uptime, uptime_string = parse_pid_uptime(info_splits[1]) + entry["pid"] = pid + entry["uptime"] = uptime + entry["uptime_string"] = uptime_string + + parsed.append(entry) + + return parsed + + +def parse_pid_uptime(s: str) -> tuple[int | None, float | None]: + pid: int | None = None + uptime: float | None = None + splits = strip_split(s, ",", maxsplit=1) + + if len(splits) != 2: + return pid, uptime + + # example: "pid 9" + pid_split = splits[0] + + # example: "uptime 150 days, 2:55:52" + uptime_split = splits[1] + + pid_split, uptime_split = splits + pid_splits = strip_split(pid_split, maxsplit=1) + + if len(pid_splits) == 2 and pid_splits[0] == "pid": + pid = int(pid_splits[1]) + + uptime_string = "" + uptime_splits = strip_split(uptime_split, maxsplit=1) + if len(uptime_splits) == 2 and uptime_splits[0] == "uptime": + uptime_string = uptime_splits[1] + uptime = parse_uptime(uptime_string) + + return pid, uptime, uptime_string + + +def parse_uptime(s: str) -> float | None: + # example `s`: "uptime 68 days, 6:10:37" + days = 0 + hours = 0 + minutes = 0 + seconds = 0 + + t_string = "" + splits = strip_split(s, sep=",", maxsplit=1) + + # Uptime has date info too + if len(splits) == 2 and (splits[0].endswith("days") or splits[0].endswith("day")): + t_string = splits[1] + d_string = splits[0].split(" ")[0] + days = float(d_string) + + # Uptime less than a day + elif len(splits) == 1: + t_string = splits[0] + else: + return None + + # Time string format hh:mm:ss + t_splits = t_string.split(":") + if len(t_splits) == 3: + hours = float(t_splits[0]) + minutes = float(t_splits[1]) + seconds = float(t_splits[2]) + + return timedelta( + days=days, + hours=hours, + minutes=minutes, + seconds=seconds, + ).total_seconds() + + +def strip_split(string: str, sep: str = " ", maxsplit: int = -1) -> list[str]: + splits: list[str] = [] + for part in string.split(sep, maxsplit): + if p_stripped := part.strip(): + splits.append(p_stripped) + + return splits + + +def get_filepath(root: str, filename: str, max_depth: int = 1): + """ + Returns the absolute path of a `filename` under `root`. If + it is not found, returns None. + + Example: get_filepath("apps/hrms", "hooks.py", 2) + + Depth of search under file tree can be set using `max_depth`. + """ + path = _get_filepath( + Path(root), + filename, + max_depth, + ) + + if path is None: + return path + + return path.absolute().as_posix() + + +def _get_filepath(root: Path, filename: str, max_depth: int) -> Path | None: + if root.name == filename: + return root + if max_depth == 0 or not root.is_dir(): + return None + for new_root in root.iterdir(): + if possible_path := _get_filepath( + new_root, + filename, + max_depth - 1, + ): + return possible_path + return None + + +def fmt_timedelta(td: timedelta | int): + locale = jingrow.local.lang.replace("-", "_") if jingrow.local.lang else None + return format_timedelta(td, locale=locale) + + +V = TypeVar("V") + + +def flatten(value_lists: "list[list[V]]") -> "list[V]": + return [value for values in value_lists for value in values] + + +def is_valid_hostname(hostname): + if len(hostname) > 255: + return False + allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(? str: + """ + Mask email address with 'x' + + Example: + > mask_email("tanmoysarkar@gmail.com", 50) + > tanxxxxxxkar@gmxxxxcom + + > mask_email("tanmoysarkar@gmail.com", 30) + > tanmxxxarkar@gmaxx.com + """ + if "@" not in email: + return "Invalid email address" + + local_part, domain = email.split("@") + + local_mask_length = int(len(local_part) * (percentage / 100)) + domain_mask_length = int(len(domain) * (percentage / 100)) + + def mask_middle(s: str, mask_len: int) -> str: + if mask_len == 0: + return s + start_idx = (len(s) - mask_len) // 2 + end_idx = start_idx + mask_len + return s[:start_idx] + "x" * mask_len + s[end_idx:] + + masked_local_part = mask_middle(local_part, local_mask_length) + masked_domain = mask_middle(domain, domain_mask_length) + + return masked_local_part + "@" + masked_domain + + +def get_mariadb_root_password(site): + from jingrow.utils.password import get_decrypted_password + + database_server, managed_database_service = jingrow.get_cached_value( + "Bench", site.bench, ["database_server", "managed_database_service"] + ) + + if managed_database_service: + pagetype = "Managed Database Service" + name = managed_database_service + field = "root_user_password" + else: + pagetype = "Database Server" + name = database_server + field = "mariadb_root_password" + + return get_decrypted_password(pagetype, name, field) + + +def is_valid_email_address(email) -> bool: + if jingrow.cache.exists(f"email_validity:{email}"): + return bool(jingrow.utils.data.cint(jingrow.cache.get_value(f"email_validity:{email}"))) + try: + is_valid = bool(validate_email(email=email, check_mx=True, verify=True, smtp_timeout=10)) + jingrow.cache.set_value(f"email_validity:{email}", int(is_valid), expires_in_sec=3600) + if not is_valid: + log_error("Invalid email address on signup", data=email) + return bool(is_valid) + except Exception as e: + log_error("Email validation error on signup", data=e) + jingrow.cache.set_value(f"email_validity:{email}", 0, expires_in_sec=3600) + return False + + +def get_full_chain_cert_of_domain(domain: str) -> str: + cert_chain = [] + + # Get initial certificate + context = ssl.create_default_context() + with socket.create_connection((domain, 443)) as sock: # noqa: SIM117 + with context.wrap_socket(sock, server_hostname=domain) as ssl_socket: + cert_pem = ssl.DER_cert_to_PEM_cert(ssl_socket.getpeercert(True)) + cert = x509.load_pem_x509_certificate(cert_pem.encode(), default_backend()) + cert_chain.append(cert_pem) + + # Walk up the chain via certificate authority information access (AIA) + while True: + try: + aia = cert.extensions.get_extension_for_oid(ExtensionOID.AUTHORITY_INFORMATION_ACCESS) + for access in aia.value: + if access.access_method._name == "caIssuers": + uri = access.access_location._value + with urlopen(uri) as response: + der_cert = response.read() + pem_cert = ssl.DER_cert_to_PEM_cert(der_cert) + cert = x509.load_pem_x509_certificate(pem_cert.encode(), default_backend()) + cert_chain.append(pem_cert) + break + except: # noqa: E722 + break + + cert_chain_str = "" + for cert in cert_chain: + cert_chain_str += cert + "\n" + return cert_chain_str + + +def timer(f): + @wraps(f) + def wrap(*args, **kwargs): + start_timestamp = time.time() + result = f(*args, **kwargs) + end_timestamp = time.time() + duration = end_timestamp - start_timestamp + if not hasattr(jingrow.local, "timers"): + jingrow.local.timers = {} + jingrow.local.timers[f.__name__] = jingrow.utils.rounded(duration, precision=3) + return result + + return wrap + + +def validate_subdomain(subdomain: str): + site_regex = r"^[a-z0-9][a-z0-9-]*[a-z0-9]$" + if not re.match(site_regex, subdomain): + jingrow.throw("Subdomain contains invalid characters. Use lowercase characters, numbers and hyphens") + if len(subdomain) > 32: + jingrow.throw("Subdomain too long. Use 32 or less characters") + + if len(subdomain) < 5: + jingrow.throw("Subdomain too short. Use 5 or more characters") diff --git a/jcloud/utils/billing.py b/jcloud/utils/billing.py new file mode 100644 index 0000000..b25edc4 --- /dev/null +++ b/jcloud/utils/billing.py @@ -0,0 +1,231 @@ +import re + +import jingrow +import razorpay +import stripe +from jingrow.utils import fmt_money + +from jcloud.exceptions import CentralServerNotSet, JingrowioServerNotSet +from jcloud.utils import get_current_team, log_error + +states_with_tin = { + # 此字典已不再使用,保留以维持代码兼容性 +} + +GSTIN_FORMAT = re.compile("^[0-9]{2}[A-Z]{4}[0-9A-Z]{1}[0-9]{4}[A-Z]{1}[1-9A-Z]{1}[1-9A-Z]{1}[0-9A-Z]{1}$") + + +def format_stripe_money(amount, currency): + return fmt_money(amount / 100, 2, currency) + + +def get_jerp_com_connection(): + from jingrow.jingrowclient import JingrowClient + + jcloud_settings = jingrow.get_single("Jcloud Settings") + jerp_api_secret = jcloud_settings.get_password("jerp_api_secret", raise_exception=False) + + if not (jcloud_settings.jerp_api_key and jcloud_settings.jerp_url and jerp_api_secret): + jingrow.throw("JERP.com URL not set up in Jcloud Settings", exc=CentralServerNotSet) + + return JingrowClient( + jcloud_settings.jerp_url, + api_key=jcloud_settings.jerp_api_key, + api_secret=jerp_api_secret, + ) + + +def get_jingrow_io_connection(): + if hasattr(jingrow.local, "jcloud_jingrowio_conn"): + return jingrow.local.jcloud_jingrowio_conn + + from jingrow.jingrowclient import JingrowClient + + jcloud_settings = jingrow.get_single("Jcloud Settings") + jingrow_api_key = jcloud_settings.jingrowio_api_key + jingrow_api_secret = jcloud_settings.get_password("jingrowio_api_secret", raise_exception=False) + + if not (jingrow_api_key and jingrow_api_secret and jcloud_settings.jingrow_url): + jingrow.throw("framework.jingrow.com URL not set up in Jcloud Settings", exc=JingrowioServerNotSet) + + jingrow.local.jcloud_jingrowio_conn = JingrowClient( + jcloud_settings.jingrow_url, api_key=jingrow_api_key, api_secret=jingrow_api_secret + ) + + return get_jingrow_io_connection() + + +def make_formatted_pg(pg, fieldtypes=None): + formatted = {} + filters = None + if fieldtypes: + filters = {"fieldtype": ["in", fieldtypes]} + + for df in pg.meta.get("fields", filters): + formatted[df.fieldname] = pg.get_formatted(df.fieldname) + + for tf in pg.meta.get_table_fields(): + formatted[tf.fieldname] = [] + for row in pg.get(tf.fieldname): + formatted[tf.fieldname].append(make_formatted_pg(row)) + + return formatted + + +def clear_setup_intent(): + team = get_current_team() + jingrow.cache().hdel("setup_intent", team) + + +def get_publishable_key(): + return jingrow.db.get_single_value("Jcloud Settings", "stripe_publishable_key") + + +def get_setup_intent(team): + from jingrow.utils import random_string + + intent = jingrow.cache().hget("setup_intent", team) + if not intent: + data = jingrow.db.get_value("Team", team, ["stripe_customer_id", "currency"]) + customer_id = data[0] + currency = data[1] + stripe = get_stripe() + hash = random_string(10) + intent = stripe.SetupIntent.create( + customer=customer_id, + payment_method_types=["card"], + payment_method_options={ + "card": { + "request_three_d_secure": "automatic", + "mandate_options": { + "reference": f"Mandate-team:{team}-{hash}", + "amount_type": "maximum", + "amount": 1500000, + "currency": currency.lower(), + "start_date": int(jingrow.utils.get_timestamp(jingrow.utils.now())), + "interval": "sporadic", + }, + } + }, + ) + jingrow.cache().hset("setup_intent", team, intent) + + return intent + + +def get_stripe(): + from jingrow.utils.password import get_decrypted_password + + if not hasattr(jingrow.local, "jcloud_stripe_object"): + secret_key = get_decrypted_password( + "Jcloud Settings", + "Jcloud Settings", + "stripe_secret_key", + raise_exception=False, + ) + + if not secret_key: + jingrow.throw("Setup stripe via Jcloud Settings before using jcloud.api.billing.get_stripe") + + stripe.api_key = secret_key + # Set the maximum number of retries for network requests + # https://docs.stripe.com/rate-limits?lang=python#object-lock-timeouts + stripe.max_network_retries = 2 + jingrow.local.jcloud_stripe_object = stripe + + return jingrow.local.jcloud_stripe_object + + +def convert_stripe_money(amount): + return (amount / 100) if amount else 0 + + +def validate_gstin_check_digit(gstin, label="GSTIN"): + """Function to validate the check digit of the GSTIN.""" + factor = 1 + total = 0 + code_point_chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + mod = len(code_point_chars) + input_chars = gstin[:-1] + for char in input_chars: + digit = factor * code_point_chars.find(char) + digit = (digit // mod) + (digit % mod) + total += digit + factor = 2 if factor == 1 else 1 + if gstin[-1] != code_point_chars[((mod - (total % mod)) % mod)]: + jingrow.throw( + f"""Invalid {label}! The check digit validation has failed. Please ensure you've typed the {label} correctly.""" + ) + + +def get_razorpay_client(): + from jingrow.utils.password import get_decrypted_password + + if not hasattr(jingrow.local, "jcloud_razorpay_client_object"): + key_id = jingrow.db.get_single_value("Jcloud Settings", "razorpay_key_id") + key_secret = get_decrypted_password( + "Jcloud Settings", "Jcloud Settings", "razorpay_key_secret", raise_exception=False + ) + + if not (key_id and key_secret): + jingrow.throw( + "Setup razorpay via Jcloud Settings before using jcloud.api.billing.get_razorpay_client" + ) + + jingrow.local.jcloud_razorpay_client_object = razorpay.Client(auth=(key_id, key_secret)) + + return jingrow.local.jcloud_razorpay_client_object + + +def process_micro_debit_test_charge(stripe_event): + try: + payment_intent = stripe_event["data"]["object"] + metadata = payment_intent.get("metadata") + payment_method_name = metadata.get("payment_method_name") + + jingrow.db.set_value( + "Stripe Payment Method", payment_method_name, "is_verified_with_micro_charge", True + ) + + jingrow.get_pg( + pagetype="Stripe Micro Charge Record", + stripe_payment_method=payment_method_name, + stripe_payment_intent_id=payment_intent.get("id"), + ).insert(ignore_permissions=True) + except Exception: + log_error("Error Processing Stripe Micro Debit Charge", body=stripe_event) + + +def get_gateway_details(payment_record): + partner_team = jingrow.db.get_value("Mpesa Payment Record", payment_record, "payment_partner") + return jingrow.db.get_value( + "Payment Gateway", {"team": partner_team}, ["gateway_controller", "print_format"] + ) + + +# Get partners external connection +def get_partner_external_connection(mpesa_setup): + # check if connection is already established + if hasattr(jingrow.local, "_external_conn"): + return jingrow.local.jcloud_external_conn + + from jingrow.jingrowclient import JingrowClient + + # Fetch API from gateway + payment_gateway = jingrow.get_all( + "Payment Gateway", + filters={"gateway_controller": mpesa_setup, "gateway_settings": "Mpesa Setup"}, + fields=["name", "url", "api_key", "api_secret"], + ) + if not payment_gateway: + jingrow.throw("Mpesa Setup not set up in Payment Gateway") + # Fetch API key and secret + pg = jingrow.get_pg("Payment Gateway", payment_gateway[0].name) + api_key = pg.api_key + api_secret = pg.get_password("api_secret") + url = pg.url + + site_name = url.split("/api/method")[0] + # Establish connection + jingrow.local._external_conn = JingrowClient(site_name, api_key=api_key, api_secret=api_secret) + return jingrow.local._external_conn diff --git a/jcloud/utils/country_timezone.py b/jcloud/utils/country_timezone.py new file mode 100644 index 0000000..4e3edf3 --- /dev/null +++ b/jcloud/utils/country_timezone.py @@ -0,0 +1,437 @@ +# Copyright (c) 2023, JINGROW +# MIT License. See license.txt + +from __future__ import unicode_literals + + +def get_country_from_timezone(timezone): + return TIMEZONE_COUNTRY_MAP.get(timezone) + + +TIMEZONE_COUNTRY_MAP = { + "Europe/Andorra": "安道尔", + "Asia/Dubai": "阿拉伯联合酋长国", + "Asia/Kabul": "阿富汗", + "Europe/Tirane": "阿尔巴尼亚", + "Asia/Yerevan": "亚美尼亚", + "Antarctica/Casey": "南极洲", + "Antarctica/Davis": "南极洲", + "Antarctica/Mawson": "南极洲", + "Antarctica/Palmer": "南极洲", + "Antarctica/Rothera": "南极洲", + "Antarctica/Troll": "南极洲", + "Antarctica/Vostok": "南极洲", + "America/Argentina/Buenos_Aires": "阿根廷", + "America/Argentina/Cordoba": "阿根廷", + "America/Argentina/Salta": "阿根廷", + "America/Argentina/Jujuy": "阿根廷", + "America/Argentina/Tucuman": "阿根廷", + "America/Argentina/Catamarca": "阿根廷", + "America/Argentina/La_Rioja": "阿根廷", + "America/Argentina/San_Juan": "阿根廷", + "America/Argentina/Mendoza": "阿根廷", + "America/Argentina/San_Luis": "阿根廷", + "America/Argentina/Rio_Gallegos": "阿根廷", + "America/Argentina/Ushuaia": "阿根廷", + "Pacific/Pago_Pago": "美属萨摩亚", + "Europe/Vienna": "奥地利", + "Australia/Lord_Howe": "澳大利亚", + "Antarctica/Macquarie": "澳大利亚", + "Australia/Hobart": "澳大利亚", + "Australia/Melbourne": "澳大利亚", + "Australia/Sydney": "澳大利亚", + "Australia/Broken_Hill": "澳大利亚", + "Australia/Brisbane": "澳大利亚", + "Australia/Lindeman": "澳大利亚", + "Australia/Adelaide": "澳大利亚", + "Australia/Darwin": "澳大利亚", + "Australia/Perth": "澳大利亚", + "Australia/Eucla": "澳大利亚", + "Asia/Baku": "阿塞拜疆", + "America/Barbados": "巴巴多斯", + "Asia/Dhaka": "孟加拉国", + "Europe/Brussels": "比利时", + "Europe/Sofia": "保加利亚", + "Atlantic/Bermuda": "百慕大", + "Asia/Brunei": "文莱", + "America/La_Paz": "玻利维亚", + "America/Noronha": "巴西", + "America/Belem": "巴西", + "America/Fortaleza": "巴西", + "America/Recife": "巴西", + "America/Araguaina": "巴西", + "America/Maceio": "巴西", + "America/Bahia": "巴西", + "America/Sao_Paulo": "巴西", + "America/Campo_Grande": "巴西", + "America/Cuiaba": "巴西", + "America/Santarem": "巴西", + "America/Porto_Velho": "巴西", + "America/Boa_Vista": "巴西", + "America/Manaus": "巴西", + "America/Eirunepe": "巴西", + "America/Rio_Branco": "巴西", + "Asia/Thimphu": "不丹", + "Europe/Minsk": "白俄罗斯", + "America/Belize": "伯利兹", + "America/St_Johns": "加拿大", + "America/Halifax": "加拿大", + "America/Glace_Bay": "加拿大", + "America/Moncton": "加拿大", + "America/Goose_Bay": "加拿大", + "America/Toronto": "加拿大", + "America/Nipigon": "加拿大", + "America/Thunder_Bay": "加拿大", + "America/Iqaluit": "加拿大", + "America/Pangnirtung": "加拿大", + "America/Winnipeg": "加拿大", + "America/Rainy_River": "加拿大", + "America/Resolute": "加拿大", + "America/Rankin_Inlet": "加拿大", + "America/Regina": "加拿大", + "America/Swift_Current": "加拿大", + "America/Edmonton": "加拿大", + "America/Cambridge_Bay": "加拿大", + "America/Yellowknife": "加拿大", + "America/Inuvik": "加拿大", + "America/Dawson_Creek": "加拿大", + "America/Fort_Nelson": "加拿大", + "America/Whitehorse": "加拿大", + "America/Dawson": "加拿大", + "America/Vancouver": "加拿大", + "Indian/Cocos": "科科斯(基林)群岛", + "Europe/Zurich": "瑞士", + "Africa/Abidjan": "科特迪瓦", + "Pacific/Rarotonga": "库克群岛", + "America/Santiago": "智利", + "America/Punta_Arenas": "智利", + "Pacific/Easter": "智利", + "Asia/Shanghai": "中国", + "Asia/Urumqi": "中国", + "America/Bogota": "哥伦比亚", + "America/Costa_Rica": "哥斯达黎加", + "America/Havana": "古巴", + "Atlantic/Cape_Verde": "佛得角", + "Indian/Christmas": "圣诞岛", + "Asia/Nicosia": "塞浦路斯", + "Asia/Famagusta": "塞浦路斯", + "Europe/Prague": "捷克共和国", + "Europe/Berlin": "德国", + "Europe/Copenhagen": "丹麦", + "America/Santo_Domingo": "多米尼加共和国", + "Africa/Algiers": "阿尔及利亚", + "America/Guayaquil": "厄瓜多尔", + "Pacific/Galapagos": "厄瓜多尔", + "Europe/Tallinn": "爱沙尼亚", + "Africa/Cairo": "埃及", + "Africa/El_Aaiun": "西撒哈拉", + "Europe/Madrid": "西班牙", + "Africa/Ceuta": "西班牙", + "Atlantic/Canary": "西班牙", + "Europe/Helsinki": "芬兰", + "Pacific/Fiji": "斐济", + "Atlantic/Stanley": "福克兰群岛", + "Pacific/Chuuk": "密克罗尼西亚", + "Pacific/Pohnpei": "密克罗尼西亚", + "Pacific/Kosrae": "密克罗尼西亚", + "Atlantic/Faroe": "法罗群岛", + "Europe/Paris": "法国", + "Europe/London": "英国", + "Asia/Tbilisi": "格鲁吉亚", + "America/Cayenne": "法属圭亚那", + "Europe/Gibraltar": "直布罗陀", + "America/Nuuk": "格陵兰", + "America/Danmarkshavn": "格陵兰", + "America/Scoresbysund": "格陵兰", + "America/Thule": "格陵兰", + "Europe/Athens": "希腊", + "Atlantic/South_Georgia": "南乔治亚岛和南桑威奇群岛", + "America/Guatemala": "危地马拉", + "Pacific/Guam": "关岛", + "Africa/Bissau": "几内亚比绍", + "America/Guyana": "圭亚那", + "Asia/Hong_Kong": "香港", + "America/Tegucigalpa": "洪都拉斯", + "America/Port-au-Prince": "海地", + "Europe/Budapest": "匈牙利", + "Asia/Jakarta": "印度尼西亚", + "Asia/Pontianak": "印度尼西亚", + "Asia/Makassar": "印度尼西亚", + "Asia/Jayapura": "印度尼西亚", + "Europe/Dublin": "爱尔兰", + "Asia/Jerusalem": "以色列", + "Asia/Kolkata": "印度", + "Asia/Calcutta": "印度", + "Indian/Chagos": "英属印度洋领地", + "Asia/Baghdad": "伊拉克", + "Asia/Tehran": "伊朗", + "Atlantic/Reykjavik": "冰岛", + "Europe/Rome": "意大利", + "America/Jamaica": "牙买加", + "Asia/Amman": "约旦", + "Asia/Tokyo": "日本", + "Africa/Nairobi": "肯尼亚", + "Asia/Bishkek": "吉尔吉斯斯坦", + "Pacific/Tarawa": "基里巴斯", + "Pacific/Kanton": "基里巴斯", + "Pacific/Kiritimati": "基里巴斯", + "Asia/Pyongyang": "朝鲜", + "Asia/Seoul": "韩国", + "Asia/Almaty": "哈萨克斯坦", + "Asia/Qyzylorda": "哈萨克斯坦", + "Asia/Qostanay": "哈萨克斯坦", + "Asia/Aqtobe": "哈萨克斯坦", + "Asia/Aqtau": "哈萨克斯坦", + "Asia/Atyrau": "哈萨克斯坦", + "Asia/Oral": "哈萨克斯坦", + "Asia/Beirut": "黎巴嫩", + "Asia/Colombo": "斯里兰卡", + "Africa/Monrovia": "利比里亚", + "Europe/Vilnius": "立陶宛", + "Europe/Luxembourg": "卢森堡", + "Europe/Riga": "拉脱维亚", + "Africa/Tripoli": "利比亚", + "Africa/Casablanca": "摩洛哥", + "Europe/Monaco": "摩纳哥", + "Europe/Chisinau": "摩尔多瓦", + "Pacific/Majuro": "马绍尔群岛", + "Pacific/Kwajalein": "马绍尔群岛", + "Asia/Yangon": "缅甸", + "Asia/Ulaanbaatar": "蒙古", + "Asia/Hovd": "蒙古", + "Asia/Choibalsan": "蒙古", + "Asia/Macau": "澳门", + "America/Martinique": "马提尼克", + "Europe/Malta": "马耳他", + "Indian/Mauritius": "毛里求斯", + "Indian/Maldives": "马尔代夫", + "America/Mexico_City": "墨西哥", + "America/Cancun": "墨西哥", + "America/Merida": "墨西哥", + "America/Monterrey": "墨西哥", + "America/Matamoros": "墨西哥", + "America/Mazatlan": "墨西哥", + "America/Chihuahua": "墨西哥", + "America/Ojinaga": "墨西哥", + "America/Hermosillo": "墨西哥", + "America/Tijuana": "墨西哥", + "America/Bahia_Banderas": "墨西哥", + "Asia/Kuala_Lumpur": "马来西亚", + "Asia/Kuching": "马来西亚", + "Africa/Maputo": "莫桑比克", + "Africa/Windhoek": "纳米比亚", + "Pacific/Noumea": "新喀里多尼亚", + "Pacific/Norfolk": "诺福克岛", + "Africa/Lagos": "尼日利亚", + "America/Managua": "尼加拉瓜", + "Europe/Amsterdam": "荷兰", + "Europe/Oslo": "挪威", + "Asia/Kathmandu": "尼泊尔", + "Pacific/Nauru": "瑙鲁", + "Pacific/Niue": "纽埃", + "Pacific/Auckland": "新西兰", + "Pacific/Chatham": "新西兰", + "America/Panama": "巴拿马", + "America/Lima": "秘鲁", + "Pacific/Tahiti": "法属波利尼西亚", + "Pacific/Marquesas": "法属波利尼西亚", + "Pacific/Gambier": "法属波利尼西亚", + "Pacific/Port_Moresby": "巴布亚新几内亚", + "Pacific/Bougainville": "巴布亚新几内亚", + "Asia/Manila": "菲律宾", + "Asia/Karachi": "巴基斯坦", + "Europe/Warsaw": "波兰", + "America/Miquelon": "圣皮埃尔和密克隆", + "Pacific/Pitcairn": "皮特凯恩群岛", + "America/Puerto_Rico": "波多黎各", + "Asia/Gaza": "巴勒斯坦", + "Asia/Hebron": "巴勒斯坦", + "Europe/Lisbon": "葡萄牙", + "Atlantic/Madeira": "葡萄牙", + "Atlantic/Azores": "葡萄牙", + "Pacific/Palau": "帕劳", + "America/Asuncion": "巴拉圭", + "Asia/Qatar": "卡塔尔", + "Indian/Reunion": "留尼汪", + "Europe/Bucharest": "罗马尼亚", + "Europe/Belgrade": "塞尔维亚", + "Europe/Kaliningrad": "俄罗斯", + "Europe/Moscow": "俄罗斯", + "Europe/Simferopol": "俄罗斯", + "Europe/Kirov": "俄罗斯", + "Europe/Volgograd": "俄罗斯", + "Europe/Astrakhan": "俄罗斯", + "Europe/Saratov": "俄罗斯", + "Europe/Ulyanovsk": "俄罗斯", + "Europe/Samara": "俄罗斯", + "Asia/Yekaterinburg": "俄罗斯", + "Asia/Omsk": "俄罗斯", + "Asia/Novosibirsk": "俄罗斯", + "Asia/Barnaul": "俄罗斯", + "Asia/Tomsk": "俄罗斯", + "Asia/Novokuznetsk": "俄罗斯", + "Asia/Krasnoyarsk": "俄罗斯", + "Asia/Irkutsk": "俄罗斯", + "Asia/Chita": "俄罗斯", + "Asia/Yakutsk": "俄罗斯", + "Asia/Khandyga": "俄罗斯", + "Asia/Vladivostok": "俄罗斯", + "Asia/Ust-Nera": "俄罗斯", + "Asia/Magadan": "俄罗斯", + "Asia/Sakhalin": "俄罗斯", + "Asia/Srednekolymsk": "俄罗斯", + "Asia/Kamchatka": "俄罗斯", + "Asia/Anadyr": "俄罗斯", + "Asia/Riyadh": "沙特阿拉伯", + "Pacific/Guadalcanal": "所罗门群岛", + "Indian/Mahe": "塞舌尔", + "Africa/Khartoum": "苏丹", + "Europe/Stockholm": "瑞典", + "Asia/Singapore": "新加坡", + "America/Paramaribo": "苏里南", + "Africa/Juba": "南苏丹", + "Africa/Sao_Tome": "圣多美和普林西比", + "America/El_Salvador": "萨尔瓦多", + "Asia/Damascus": "叙利亚", + "America/Grand_Turk": "特克斯和凯科斯群岛", + "Africa/Ndjamena": "乍得", + "Indian/Kerguelen": "法属南部领地", + "Asia/Bangkok": "泰国", + "Asia/Dushanbe": "塔吉克斯坦", + "Pacific/Fakaofo": "托克劳", + "Asia/Dili": "东帝汶", + "Asia/Ashgabat": "土库曼斯坦", + "Africa/Tunis": "突尼斯", + "Pacific/Tongatapu": "汤加", + "Europe/Istanbul": "土耳其", + "Pacific/Funafuti": "图瓦卢", + "Asia/Taipei": "台湾", + "Europe/Kiev": "乌克兰", + "Europe/Uzhgorod": "乌克兰", + "Europe/Zaporozhye": "乌克兰", + "Pacific/Wake": "美国本土外小岛屿", + "America/New_York": "美国", + "America/Detroit": "美国", + "America/Kentucky/Louisville": "美国", + "America/Kentucky/Monticello": "美国", + "America/Indiana/Indianapolis": "美国", + "America/Indiana/Vincennes": "美国", + "America/Indiana/Winamac": "美国", + "America/Indiana/Marengo": "美国", + "America/Indiana/Petersburg": "美国", + "America/Indiana/Vevay": "美国", + "America/Chicago": "美国", + "America/Indiana/Tell_City": "美国", + "America/Indiana/Knox": "美国", + "America/Menominee": "美国", + "America/North_Dakota/Center": "美国", + "America/North_Dakota/New_Salem": "美国", + "America/North_Dakota/Beulah": "美国", + "America/Denver": "美国", + "America/Boise": "美国", + "America/Phoenix": "美国", + "America/Los_Angeles": "美国", + "America/Anchorage": "美国", + "America/Juneau": "美国", + "America/Sitka": "美国", + "America/Metlakatla": "美国", + "America/Yakutat": "美国", + "America/Nome": "美国", + "America/Adak": "美国", + "Pacific/Honolulu": "美国", + "America/Montevideo": "乌拉圭", + "Asia/Samarkand": "乌兹别克斯坦", + "Asia/Tashkent": "乌兹别克斯坦", + "America/Caracas": "委内瑞拉", + "Asia/Ho_Chi_Minh": "越南", + "Pacific/Efate": "瓦努阿图", + "Pacific/Wallis": "瓦利斯和富图纳", + "Pacific/Apia": "萨摩亚", + "Africa/Johannesburg": "南非", + "America/Antigua": "安提瓜和巴布达", + "America/Anguilla": "安圭拉", + "Africa/Luanda": "安哥拉", + "Antarctica/McMurdo": "南极洲", + "Antarctica/DumontDUrville": "南极洲", + "Antarctica/Syowa": "南极洲", + "America/Aruba": "阿鲁巴", + "Europe/Mariehamn": "奥兰群岛", + "Europe/Sarajevo": "波斯尼亚和黑塞哥维那", + "Africa/Ouagadougou": "布基纳法索", + "Asia/Bahrain": "巴林", + "Africa/Bujumbura": "布隆迪", + "Africa/Porto-Novo": "贝宁", + "America/St_Barthelemy": "圣巴泰勒米", + "America/Kralendijk": "荷兰加勒比区", + "America/Nassau": "巴哈马", + "Africa/Gaborone": "博茨瓦纳", + "America/Blanc-Sablon": "加拿大", + "America/Atikokan": "加拿大", + "America/Creston": "加拿大", + "Africa/Kinshasa": "刚果民主共和国", + "Africa/Lubumbashi": "刚果民主共和国", + "Africa/Bangui": "中非共和国", + "Africa/Brazzaville": "刚果共和国", + "Africa/Douala": "喀麦隆", + "America/Curacao": "库拉索", + "Europe/Busingen": "德国", + "Africa/Djibouti": "吉布提", + "America/Dominica": "多米尼克", + "Africa/Asmara": "厄立特里亚", + "Africa/Addis_Ababa": "埃塞俄比亚", + "Africa/Libreville": "加蓬", + "America/Grenada": "格林纳达", + "Europe/Guernsey": "根西岛", + "Africa/Accra": "加纳", + "Africa/Banjul": "冈比亚", + "Africa/Conakry": "几内亚", + "America/Guadeloupe": "瓜德罗普", + "Africa/Malabo": "赤道几内亚", + "Europe/Zagreb": "克罗地亚", + "Europe/Isle_of_Man": "马恩岛", + "Europe/Jersey": "泽西岛", + "Asia/Phnom_Penh": "柬埔寨", + "Indian/Comoro": "科摩罗", + "America/St_Kitts": "圣基茨和尼维斯", + "Asia/Kuwait": "科威特", + "America/Cayman": "开曼群岛", + "Asia/Vientiane": "老挝", + "America/St_Lucia": "圣卢西亚", + "Europe/Vaduz": "列支敦士登", + "Africa/Maseru": "莱索托", + "Europe/Podgorica": "黑山", + "America/Marigot": "法属圣马丁", + "Indian/Antananarivo": "马达加斯加", + "Europe/Skopje": "北马其顿", + "Africa/Bamako": "马里", + "Pacific/Saipan": "北马里亚纳群岛", + "Africa/Nouakchott": "毛里塔尼亚", + "America/Montserrat": "蒙特塞拉特", + "Africa/Blantyre": "马拉维", + "Africa/Niamey": "尼日尔", + "Asia/Muscat": "阿曼", + "Africa/Kigali": "卢旺达", + "Atlantic/St_Helena": "圣赫勒拿", + "Europe/Ljubljana": "斯洛文尼亚", + "Arctic/Longyearbyen": "斯瓦尔巴和扬马延", + "Europe/Bratislava": "斯洛伐克", + "Africa/Freetown": "塞拉利昂", + "Europe/San_Marino": "圣马力诺", + "Africa/Dakar": "塞内加尔", + "Africa/Mogadishu": "索马里", + "America/Lower_Princes": "荷属圣马丁", + "Africa/Mbabane": "斯威士兰", + "Africa/Lome": "多哥", + "America/Port_of_Spain": "特立尼达和多巴哥", + "Africa/Dar_es_Salaam": "坦桑尼亚", + "Africa/Kampala": "乌干达", + "Pacific/Midway": "美国本土外小岛屿", + "Europe/Vatican": "梵蒂冈", + "America/St_Vincent": "圣文森特和格林纳丁斯", + "America/Tortola": "英属维尔京群岛", + "America/St_Thomas": "美属维尔京群岛", + "Asia/Aden": "也门", + "Indian/Mayotte": "马约特", + "Africa/Lusaka": "赞比亚", + "Africa/Harare": "津巴布韦", +} diff --git a/jcloud/utils/dns.py b/jcloud/utils/dns.py new file mode 100644 index 0000000..03a5aef --- /dev/null +++ b/jcloud/utils/dns.py @@ -0,0 +1,76 @@ +import boto3 +import jingrow +from jingrow.core.utils import find +from jingrow.model.document import Document + +from jcloud.utils import log_error + + +@jingrow.whitelist() +def create_dns_record(pg, record_name=None): + """Check if site needs dns records and creates one.""" + domain = jingrow.get_pg("Root Domain", pg.domain) + is_standalone = jingrow.get_value("Server", pg.server, "is_standalone") + if pg.cluster == domain.default_cluster and not is_standalone: + return + + if is_standalone: + _change_dns_record("UPSERT", domain, pg.server, record_name=record_name) + else: + proxy_server = jingrow.get_value("Server", pg.server, "proxy_server") + _change_dns_record("UPSERT", domain, proxy_server, record_name=record_name) + + +def _change_dns_record( + method: str, domain: Document, proxy_server: str, record_name: str = None +): + """ + Change dns record of site + + method: CREATE | DELETE | UPSERT + """ + try: + client = boto3.client( + "route53", + aws_access_key_id=domain.aws_access_key_id, + aws_secret_access_key=domain.get_password("aws_secret_access_key"), + ) + zones = client.list_hosted_zones_by_name()["HostedZones"] + hosted_zone = find(reversed(zones), lambda x: domain.name.endswith(x["Name"][:-1]))[ + "Id" + ] + client.change_resource_record_sets( + ChangeBatch={ + "Changes": [ + { + "Action": method, + "ResourceRecordSet": { + "Name": record_name, + "Type": "CNAME", + "TTL": 600, + "ResourceRecords": [{"Value": proxy_server}], + }, + } + ] + }, + HostedZoneId=hosted_zone, + ) + except client.exceptions.InvalidChangeBatch as e: + # If we're attempting to DELETE and record is not found, ignore the error + # e.response["Error"]["Message"] looks like + # [Tried to delete resource record set [name='xxx.jingrow.cloud.', type='CNAME'] but it was not found] + if method == "DELETE" and "but it was not found" in e.response["Error"]["Message"]: + return + log_error( + "Route 53 Record Creation Error", + domain=domain.name, + site=record_name, + proxy_server=proxy_server, + ) + except Exception: + log_error( + "Route 53 Record Creation Error", + domain=domain.name, + site=record_name, + proxy_server=proxy_server, + ) diff --git a/jcloud/utils/email_validator.py b/jcloud/utils/email_validator.py new file mode 100644 index 0000000..5cc03fb --- /dev/null +++ b/jcloud/utils/email_validator.py @@ -0,0 +1,170 @@ +""" +Customized function of validate-email package + +RFC 2822 - style email validation for Python + +(c) 2012 Syrus Akbary +Extended from (c) 2011 Noel Bush for support of mx and user check + +This code is made available to you under the GNU LGPLv3. + +This module provides a single method, valid_email_address(), which returns True or False to indicate +whether a given address is valid according to the 'addr-spec' part of the specification given in RFC +2822. Ideally, we would like to find this in some other library, already thoroughly tested and +well-maintained. The standard Python library email.utils contains a parse_addr() function, but it +is not sufficient to detect many malformed addresses. + +This implementation aims to be faithful to the RFC, with the +exception of a circular definition (see comments below), and +with the omission of the pattern components marked as "obsolete". +""" + +import contextlib +import re +import smtplib + +from dns.resolver import Resolver + +# All we are really doing is comparing the input string to one +# gigantic regular expression. But building that regexp, and +# ensuring its correctness, is made much easier by assembling it +# from the "tokens" defined by the RFC. Each of these tokens is +# tested in the accompanying unit test file. +# +# The section of RFC 2822 from which each pattern component is +# derived is given in an accompanying comment. +# +# (To make things simple, every string below is given as 'raw', +# even when it's not strictly necessary. This way we don't forget +# when it is necessary.) + +WSP = r"[ \t]" # see 2.2.2. Structured Header Field Bodies +CRLF = r"(?:\r\n)" # see 2.2.3. Long Header Fields +NO_WS_CTL = r"\x01-\x08\x0b\x0c\x0f-\x1f\x7f" # see 3.2.1. Primitive Tokens +QUOTED_PAIR = r"(?:\\.)" # see 3.2.2. Quoted characters +FWS = r"(?:(?:" + WSP + r"*" + CRLF + r")?" + WSP + r"+)" # see 3.2.3. Folding white space and comments +CTEXT = r"[" + NO_WS_CTL + r"\x21-\x27\x2a-\x5b\x5d-\x7e]" # see 3.2.3 +CCONTENT = r"(?:" + CTEXT + r"|" + QUOTED_PAIR + r")" # see 3.2.3 (NB: The RFC includes COMMENT here +# as well, but that would be circular.) +COMMENT = r"\((?:" + FWS + r"?" + CCONTENT + r")*" + FWS + r"?\)" # see 3.2.3 +CFWS = r"(?:" + FWS + r"?" + COMMENT + ")*(?:" + FWS + "?" + COMMENT + "|" + FWS + ")" # see 3.2.3 +ATEXT = r"[\w!#$%&\'\*\+\-/=\?\^`\{\|\}~]" # see 3.2.4. Atom +ATOM = CFWS + r"?" + ATEXT + r"+" + CFWS + r"?" # see 3.2.4 +DOT_ATOM_TEXT = ATEXT + r"+(?:\." + ATEXT + r"+)*" # see 3.2.4 +DOT_ATOM = CFWS + r"?" + DOT_ATOM_TEXT + CFWS + r"?" # see 3.2.4 +QTEXT = r"[" + NO_WS_CTL + r"\x21\x23-\x5b\x5d-\x7e]" # see 3.2.5. Quoted strings +QCONTENT = r"(?:" + QTEXT + r"|" + QUOTED_PAIR + r")" # see 3.2.5 +QUOTED_STRING = CFWS + r"?" + r'"(?:' + FWS + r"?" + QCONTENT + r")*" + FWS + r"?" + r'"' + CFWS + r"?" +LOCAL_PART = r"(?:" + DOT_ATOM + r"|" + QUOTED_STRING + r")" # see 3.4.1. Addr-spec specification +DTEXT = r"[" + NO_WS_CTL + r"\x21-\x5a\x5e-\x7e]" # see 3.4.1 +DCONTENT = r"(?:" + DTEXT + r"|" + QUOTED_PAIR + r")" # see 3.4.1 +DOMAIN_LITERAL = ( + CFWS + r"?" + r"\[" + r"(?:" + FWS + r"?" + DCONTENT + r")*" + FWS + r"?\]" + CFWS + r"?" +) # see 3.4.1 +DOMAIN = r"(?:" + DOT_ATOM + r"|" + DOMAIN_LITERAL + r")" # see 3.4.1 +ADDR_SPEC = LOCAL_PART + r"@" + DOMAIN # see 3.4.1 + +# A valid address will match exactly the 3.4.1 addr-spec. +VALID_ADDRESS_REGEXP = re.compile(r"^" + ADDR_SPEC + r"$") + +MX_DNS_CACHE = {} +MX_CHECK_CACHE = {} + + +def get_mx_ip(mx_host): + """ + Get the IP address of a given MX host + + :param mx_host: The host being looked up + :type mx_host: str + :return: A list of IP addresses + :rtype: list + """ + if mx_host not in MX_DNS_CACHE: + try: + resolver = Resolver(configure=False) + resolver.nameservers = ["1.1.1.1", "1.0.0.1", "8.8.8.8", "8.8.4.4"] + answers = resolver.query(mx_host, "MX") + + mx_lookup_result = [] + + for answer in answers: + mx_lookup_result.append((answer.preference, answer.exchange.to_text()[:-1])) + MX_DNS_CACHE[mx_host] = mx_lookup_result + except Exception: + raise + return MX_DNS_CACHE[mx_host] + + +def check_mx_record(email, verify=False, smtp_timeout=10): + """ + Checks for an MX record on the given email addresses' hostname. + + :param email: The email address + :type email: str + :param verify: Whether the email address' existence should be verified + :type verify: bool + :param smtp_timeout: Maximum wait time on an SMTP connection + :type smtp_timeout: int + :return: bool or None + """ + hostname = email[email.find("@") + 1 :] + mx_hosts = get_mx_ip(hostname) + if mx_hosts is None: + return False + for mx_host in mx_hosts: + with contextlib.suppress(Exception): + if not verify and mx_host[1] in MX_CHECK_CACHE: + return MX_CHECK_CACHE[mx_host[1]] + smtp = smtplib.SMTP(timeout=smtp_timeout) + smtp.connect(mx_host[1]) + MX_CHECK_CACHE[mx_host[1]] = True + if not verify: + try: # noqa: SIM105 + smtp.quit() + except smtplib.SMTPServerDisconnected: + pass + return True + status, _ = smtp.helo() + if status != 250: + smtp.quit() + continue + smtp.mail("") + status, _ = smtp.rcpt(email) + if status == 250: + smtp.quit() + return True + smtp.quit() + return None + + +def validate_email(email, check_mx=False, verify=False, smtp_timeout=10, **kwargs): + """ + Indicate whether the given string is a valid email address according to the 'addr-spec' portion + of RFC 2822 (see section 3.4.1). Parts of the spec that are marked obsolete are *not* included + in this test, and certain arcane constructions that depend on circular definitions in the spec + may not pass, but in general this should correctly identify any email address likely to be in + use as of 2011. + + :param email: The email address to be validated + :type email: str + :param check_mx: Whether or not MX records should be verified + :type check_mx: bool + :param verify: Whether or not the email addresses' actual existence should be verified + :type verify: bool + :param smtp_timeout: Maximum wait time on an SMTP connection + :type smtp_timeout: int + :return: The validity of the given email address + :rtype: bool or None + """ + try: + if re.match(VALID_ADDRESS_REGEXP, email) is not None: + check_mx |= verify + if check_mx: + return check_mx_record(email, verify=verify, smtp_timeout=smtp_timeout) + else: + return False + except Exception: + return None + else: + return True diff --git a/jcloud/utils/jobs.py b/jcloud/utils/jobs.py new file mode 100644 index 0000000..3238aed --- /dev/null +++ b/jcloud/utils/jobs.py @@ -0,0 +1,93 @@ +from typing import Any, Generator, Optional +import signal + +import jingrow +from jingrow.core.pagetype.rq_job.rq_job import fetch_job_ids +from jingrow.utils.background_jobs import get_queues, get_redis_conn +from redis import Redis +from rq.command import send_stop_job_command +from rq.job import Job, JobStatus, NoSuchJobError, get_current_job + + +def stop_background_job(job: Job): + try: + if job.get_status() == JobStatus.STARTED: + send_stop_job_command(job.connection, job.id) + elif job.get_status() in [JobStatus.QUEUED, JobStatus.SCHEDULED]: + job.cancel() + except Exception: + return + + +def get_background_jobs( + pagetype: str, + name: str, + status: list[str] | None = None, + connection: "Optional[Redis]" = None, +) -> Generator[Job, Any, None]: + """ + Returns background jobs for a `pg` created using the `run_pg_method` + Returned jobs are in the QUEUED, SCHEDULED or STARTED state. + """ + connection = connection or get_redis_conn() + status = status or ["queued", "scheduled", "started"] + for job_id in get_job_ids(status, connection): + try: + job = Job.fetch(job_id, connection=connection) + except NoSuchJobError: + continue + + if not does_job_belong_to_pg(job, pagetype, name): + continue + + yield job + + +def get_job_ids( + status: str | list[str], + connection: "Optional[Redis]" = None, +) -> Generator[str, Any, None]: + if isinstance(status, str): + status = [status] + connection = connection or get_redis_conn() + + for q in get_queues(connection): + for s in status: + try: + job_ids = fetch_job_ids(q, s) + # ValueError thrown on macOS + # Message: signal only works in main thread of the main interpreter + except ValueError: + return + + for jid in job_ids: + yield jid + + +def does_job_belong_to_pg(job: Job, pagetype: str, name: str) -> bool: + site = job.kwargs.get("site") + if site and site != jingrow.local.site: + return False + + job_name = ( + job.kwargs.get("job_type") or job.kwargs.get("job_name") or job.kwargs.get("method") + ) + if job_name != "jingrow.utils.background_jobs.run_pg_method": + return False + + kwargs = job.kwargs.get("kwargs", {}) + if kwargs.get("pagetype") != pagetype: + return False + + if kwargs.get("name") != name: + return False + + return True + + +def has_job_timeout_exceeded() -> bool: + # RQ sets up an alarm signal and a signal handler that raises + # JobTimeoutException after the timeout amount + # getitimer returns the time left for this timer + # 0.0 means the timer is expired + return bool(get_current_job()) and (signal.getitimer(signal.ITIMER_REAL)[0] <= 0) diff --git a/jcloud/utils/mpesa_utils.py b/jcloud/utils/mpesa_utils.py new file mode 100644 index 0000000..96bbb2c --- /dev/null +++ b/jcloud/utils/mpesa_utils.py @@ -0,0 +1,58 @@ +# Copyright (c) 2019, Jingrow Technologies and contributors +# License: MIT. See LICENSE + +import datetime +import json + +import jingrow + + +def create_mpesa_request_log( + data, + integration_type=None, + service_name=None, + name=None, + error=None, + status="Queued", + request_headers=None, + output=None, + **kwargs, +): + """ + DEPRECATED: The parameter integration_type will be removed in the next major release. + Use is_remote_request instead. + """ + if integration_type == "Remote": + kwargs["is_remote_request"] = 1 + + elif integration_type == "Subscription Notification": + kwargs["request_description"] = integration_type + + if isinstance(data, str): + data = json.loads(data) + + request_log = jingrow.get_pg( + { + "pagetype": "Mpesa Request Log", + "integration_request_service": service_name, + "request_headers": get_json(request_headers), + "data": get_json(data), + "output": get_json(output), + "error": get_json(error), + "request_id": name, + "status": status, + } + ) + request_log.insert(ignore_permissions=True) + + return request_log + + +def get_json(obj): + return obj if isinstance(obj, str) else jingrow.as_json(obj, indent=1) + + +def json_handler(obj): + if isinstance(obj, datetime.date | datetime.timedelta | datetime.datetime): + return str(obj) + return None diff --git a/jcloud/utils/otp.py b/jcloud/utils/otp.py new file mode 100644 index 0000000..3a447fd --- /dev/null +++ b/jcloud/utils/otp.py @@ -0,0 +1,7 @@ +import os + + +def generate_otp(): + """Generates a cryptographically secure random OTP""" + + return int.from_bytes(os.urandom(5), byteorder="big") % 900000 + 100000 diff --git a/jcloud/utils/telemetry.py b/jcloud/utils/telemetry.py new file mode 100644 index 0000000..62cdaae --- /dev/null +++ b/jcloud/utils/telemetry.py @@ -0,0 +1,45 @@ +from contextlib import suppress + +import jingrow +from posthog import Posthog + +from jcloud.utils import log_error + + +def init_telemetry(): + """Init posthog for server side telemetry.""" + if hasattr(jingrow.local, "posthog"): + return + + posthog_host = jingrow.conf.get("posthog_host") + posthog_project_id = jingrow.conf.get("posthog_project_id") + + if not posthog_host or not posthog_project_id: + return + + with suppress(Exception): + jingrow.local.posthog = Posthog(posthog_project_id, host=posthog_host) + + +def capture(event, app, site=None): + init_telemetry() + ph: Posthog = getattr(jingrow.local, "posthog", None) + with suppress(Exception): + ph and ph.capture(site or jingrow.local.site, f"{app}_{event}") + + +def identify(site, **kwargs): + init_telemetry() + ph: Posthog = getattr(jingrow.local, "posthog", None) + with suppress(Exception): + ph and ph.identify(site, kwargs) + + +@jingrow.whitelist(allow_guest=True) +def capture_read_event(email: str = None): + try: + capture("read_email", "fc_signup", email) + except Exception as e: + log_error("Failed to capture read_email event", e) + finally: + jingrow.response.update(jingrow.utils.get_imaginary_pixel_response()) diff --git a/jcloud/utils/test.py b/jcloud/utils/test.py new file mode 100644 index 0000000..8cdc041 --- /dev/null +++ b/jcloud/utils/test.py @@ -0,0 +1,46 @@ +"""Utility methods for writing tests""" + +from typing import Callable + +import jingrow + + +def foreground_enqueue_pg( + pagetype: str, + docname: str, + method: str, + queue="default", + timeout=None, + now=False, # default args unused to avoid them from going to kwargs + enqueue_after_commit=False, + job_id=None, + deduplicate=False, + at_front: bool = False, + **kwargs, +): + """ + Run enqueued method in foreground + + Use for monkey patching enqueue_pg in tests + """ + getattr(jingrow.get_pg(pagetype, docname), method)(**kwargs) + + +def foreground_enqueue( + method: str | Callable, + queue: str = "default", + timeout: int | None = None, + event=None, + is_async: bool = True, + job_name: str | None = None, + now: bool = True, + enqueue_after_commit: bool = False, + *, + on_success: Callable = None, + on_failure: Callable = None, + at_front: bool = False, + job_id: str = None, + deduplicate: bool = False, + **kwargs, +): + return jingrow.call(method, **kwargs) diff --git a/jcloud/utils/unique_name_generator.py b/jcloud/utils/unique_name_generator.py new file mode 100644 index 0000000..bc842fe --- /dev/null +++ b/jcloud/utils/unique_name_generator.py @@ -0,0 +1,19 @@ +# Copyright (c) 2024, JINGROW + +import random +import string + + +def generate(segment_length=3, num_segments=3, separator="-"): + # Define the character set: only lowercase letters + characters = string.ascii_lowercase + + # Generate segments + segments = [] + for _ in range(num_segments): + segment = "".join(random.choice(characters) for _ in range(segment_length)) + segments.append(segment) + + # Join segments with the separator + random_id = separator.join(segments) + return random_id diff --git a/jcloud/utils/webhook.py b/jcloud/utils/webhook.py new file mode 100644 index 0000000..e5ad21a --- /dev/null +++ b/jcloud/utils/webhook.py @@ -0,0 +1,76 @@ +# Copyright (c) 2019, JINGROW +# For license information, please see license.txt + +from __future__ import annotations + +import json + +import jingrow +from jingrow.model import default_fields +from jingrow.model.document import Document + + +def create_webhook_event(event: str, payload: dict | Document, team: str) -> bool: + try: + # Check if team has configured webhook against this event + JcloudWebhookSelectedEvent = jingrow.qb.PageType("Jcloud Webhook Selected Event") + JcloudWebhook = jingrow.qb.PageType("Jcloud Webhook") + + query = ( + jingrow.qb.from_(JcloudWebhookSelectedEvent) + .select(jingrow.query_builder.functions.Count(JcloudWebhookSelectedEvent.name).as_("count")) + .left_join(JcloudWebhook) + .on(JcloudWebhookSelectedEvent.parent == JcloudWebhook.name) + .where(JcloudWebhookSelectedEvent.event == event) + .where(JcloudWebhook.team == team) + .where(JcloudWebhook.enabled == 1) + ) + + result = query.run(as_dict=True) + is_any_webhook_enabled = result and result[0].get("count") > 0 + if is_any_webhook_enabled: + # prepare request payload + data = {} + if isinstance(payload, dict): + data = jingrow._dict(payload) + elif isinstance(payload, Document): + data = _process_document_payload(payload) + else: + jingrow.throw("Invalid data type") + + request_payload = json.dumps( + { + "event": event, + "data": data, + }, + default=str, + indent=4, + ) + + # create webhook log + jingrow.get_pg( + { + "pagetype": "Jcloud Webhook Log", + "status": "Pending", + "event": event, + "team": team, + "request_payload": request_payload, + } + ).insert(ignore_permissions=True) + return True + except Exception: + jingrow.log_error("failed to queue webhook event") + return False + + +def _process_document_payload(payload: Document): + # convert payload to dict + # send fields mentioned in dashboard_fields, as other fields can have sensitive information + fields = list(default_fields) + if hasattr(payload, "dashboard_fields"): + fields += payload.dashboard_fields + _pg = jingrow._dict() + for fieldname in fields: + _pg[fieldname] = payload.get(fieldname) + + return _pg diff --git a/jcloud/www/__init__.py b/jcloud/www/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/dashboard.py b/jcloud/www/dashboard.py new file mode 100644 index 0000000..e2617bf --- /dev/null +++ b/jcloud/www/dashboard.py @@ -0,0 +1,56 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + +from jcloud.utils import get_default_team_for_user, get_valid_teams_for_user + +base_template_path = "templates/www/dashboard.html" +no_cache = 1 + + +def get_context(): + return _get_context() + + +def _get_context(): + csrf_token = jingrow.sessions.get_csrf_token() + jingrow.db.commit() + context = jingrow._dict() + context.boot = get_boot() + context.boot.csrf_token = csrf_token + return context + + +@jingrow.whitelist(methods=["POST"], allow_guest=True) +def get_context_for_dev(): + if not jingrow.conf.developer_mode: + jingrow.throw("This method is only meant for developer mode") + return get_boot() + + +def get_boot(): + return jingrow._dict( + jingrow_version=jingrow.__version__, + jcloud_frontend_sentry_dsn=jingrow.conf.jcloud_frontend_sentry_dsn or "", + jcloud_dashboard_sentry_dsn=jingrow.conf.jcloud_dashboard_sentry_dsn or "", + jcloud_frontend_posthog_host=jingrow.conf.posthog_host or "", + jcloud_frontend_posthog_project_id=jingrow.conf.posthog_project_id or "", + jcloud_site_name=jingrow.conf.site, + site_name=jingrow.local.site, + default_team=get_default_team_for_user(jingrow.session.user), + valid_teams=get_valid_teams_for_user(jingrow.session.user), + is_system_user=jingrow.session.data.user_type == "System User", + verify_cards_with_micro_charge=jingrow.db.get_single_value( + "Jcloud Settings", "verify_cards_with_micro_charge" + ), + **( + jingrow.db.get_values( + "Jcloud Settings", + "Jcloud Settings", + ["free_credits_cny", "free_credits_usd"], + as_dict=True, + )[0] + ), + ) diff --git a/jcloud/www/dashboard_old.py b/jcloud/www/dashboard_old.py new file mode 100644 index 0000000..da09b45 --- /dev/null +++ b/jcloud/www/dashboard_old.py @@ -0,0 +1,14 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +# import jingrow + +from .dashboard import _get_context + +base_template_path = "templates/www/dashboard-old.html" +no_cache = 1 + + +def get_context(context): + return _get_context() diff --git a/jcloud/www/docs/__init__.py b/jcloud/www/docs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/docs/billing/__init__.py b/jcloud/www/docs/billing/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/docs/getting-started/__init__.py b/jcloud/www/docs/getting-started/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/docs/sites/__init__.py b/jcloud/www/docs/sites/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/github/__init__.py b/jcloud/www/github/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/github/authorize.html b/jcloud/www/github/authorize.html new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/github/authorize.py b/jcloud/www/github/authorize.py new file mode 100644 index 0000000..15b55f4 --- /dev/null +++ b/jcloud/www/github/authorize.py @@ -0,0 +1,42 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import json +from base64 import b64decode + +import jingrow +import requests + +from jcloud.utils import log_error + + +def get_context(context): + code = jingrow.form_dict.code + state = jingrow.form_dict.state + redirect_url = jingrow.utils.get_url("/dashboard") + if code and state: + decoded_state = json.loads(b64decode(state).decode()) + team = decoded_state["team"] + redirect_url = jingrow.utils.get_url(decoded_state["url"]) + obtain_access_token(code, team) + jingrow.db.commit() + jingrow.flags.redirect_location = redirect_url + raise jingrow.Redirect + + +def obtain_access_token(code, team): + response = None + try: + client_id = jingrow.db.get_single_value("Jcloud Settings", "github_app_client_id") + client_secret = jingrow.db.get_single_value( + "Jcloud Settings", "github_app_client_secret" + ) + data = {"client_id": client_id, "client_secret": client_secret, "code": code} + headers = {"Accept": "application/json"} + response = requests.post( + "https://github.com/login/oauth/access_token", data=data, headers=headers + ).json() + jingrow.db.set_value("Team", team, "github_access_token", response["access_token"]) + except Exception: + log_error("Access Token Error", team=team, code=code, response=response) diff --git a/jcloud/www/github/redirect.html b/jcloud/www/github/redirect.html new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/github/redirect.py b/jcloud/www/github/redirect.py new file mode 100644 index 0000000..da210b5 --- /dev/null +++ b/jcloud/www/github/redirect.py @@ -0,0 +1,37 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow +import requests + +from jcloud.utils import log_error + + +def get_context(context): + if not jingrow.db.get_single_value("Jcloud Settings", "github_app_id"): + code = jingrow.form_dict.code + response = None + try: + headers = {"Accept": "application/vnd.github.v3+json"} + response = jingrow._dict( + requests.post( + f"http://git.jingrow.com:3000/api/v1/app-manifests/{code}/conversions", headers=headers + ).json() + ) + + settings = jingrow.get_pg("Jcloud Settings", "Jcloud Settings") + settings.github_app_id = response.id + settings.github_app_client_id = response.client_id + settings.github_app_client_secret = response.client_secret + settings.github_app_public_link = response.html_url + settings.github_app_private_key = response.pem + settings.github_webhook_secret = response.webhook_secret + settings.save() + jingrow.db.commit() + except Exception: + log_error("GitHub App Creation Error", code=code, response=response) + + redirect_url = jingrow.utils.get_url("/desk#Form/Jcloud Settings") + jingrow.flags.redirect_location = redirect_url + raise jingrow.Redirect diff --git a/jcloud/www/internal/__init__.py b/jcloud/www/internal/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/internal/_sidebar.json b/jcloud/www/internal/_sidebar.json new file mode 100644 index 0000000..cdb8802 --- /dev/null +++ b/jcloud/www/internal/_sidebar.json @@ -0,0 +1,67 @@ +[ + { + "group_title": "Getting Started", + "group_items": [ + { + "title": "Overview", + "route": "/internal/getting-started/overview" + } + ] + }, + { + "group_title": "Jcloud", + "group_items": [ + { + "title": "Overview", + "route": "/internal/jcloud/overview" + } + ] + }, + { + "group_title": "Local Infrastructure", + "group_items": [ + { + "title": "Overview", + "route": "/internal/infrastructure/overview" + } + ] + }, + { + "group_title": "Server", + "group_items": [ + { + "title": "Server Setup", + "route": "/internal/servers/server-setup" + }, + { + "title": "Proxy Server Setup", + "route": "/internal/servers/proxy-server-setup" + }, + { + "title": "Database Server Setup", + "route": "/internal/servers/database-server-setup" + }, + { + "title": "Convert Server into a Database Server", + "route": "/internal/servers/convert-server-to-database-server" + }, + { + "title": "Database Replication", + "route": "/internal/servers/database-replication" + }, + { + "title": "Database Failover", + "route": "/internal/servers/database-failover" + } + ] + }, + { + "group_title": "Bench", + "group_items": [ + { + "title": "Build a Bench", + "route": "/internal/bench/overview" + } + ] + } +] diff --git a/jcloud/www/internal/bench/__init__.py b/jcloud/www/internal/bench/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/internal/bench/overview.md b/jcloud/www/internal/bench/overview.md new file mode 100644 index 0000000..bc61cc1 --- /dev/null +++ b/jcloud/www/internal/bench/overview.md @@ -0,0 +1,88 @@ +--- +title: Build a Bench +--- + +# Build a Bench + +This is information about doctypes required to manage benches and apps on FC +from [desk](https://jingrow.com/app). Roughly, the build process goes +through these doctypes (that you're concerned with) in order. + +App => App Source => Release Group => Deploy Candidate => Bench + +To build a bench, we need documents of the following doctypes. + +## App + +It's simply a master of all apps on FC. Only detail it has is the name of the app. + +![App Detail](/assets/jcloud/images/internal/bench/app.png 'App Detail') + +## App Source + +We use this pg to specify Github _repo_ and _branch_ for an App. An App can have multiple of these. + +![App Source Detail](/assets/jcloud/images/internal/bench/app-source.png 'App Source Detail') + +## Release Group + +This is like a blueprint of a series of benches. It contains a list of Apps and +App Sources as child table. + +> Must have jingrow as first app. +> You can use `visit dashboard` link in sidebar to see corresponding dashboard view + +![Release Group](/assets/jcloud/images/internal/bench/release-group.png 'Release Group') + +It also contains a list of servers to deploy your bench on. + +## Deploy Candidate (created automatically) + +This pg becomes more specific than the Release Group in the sense that it also +has information on which _commit_ of each app you're going to put on your +bench. + +![Deploy Candidate](/assets/jcloud/images/internal/bench/deploy-candidate.png 'Deploy Candidate') + +> The information of each commit of an App Source is in an App Release pg. + +Deploy Candidates are created for each update to the Release Group (could be +update to an app, could be addition/removal of apps, etc..). It is not intended +to deploy all of the ones created. Latest one is deployed when you update bench +from dashboard view. + +![Deploy Candidate List](/assets/jcloud/images/internal/bench/deploy-candidate-list.png 'Deploy Candidate List') + +You can also manually trigger creation by using `Create Deploy Candidate` +Action on the Release Group. + +![Create Deploy Candidate](/assets/jcloud/images/internal/bench/create-deploy-candidate.png 'Create Deploy Candidate') + +Use the `Build and Deploy` button to deploy a new Bench. +![](/assets/jcloud/images/internal/bench/build-and-deploy-button.png) + +This will create an image that'll be used for creating a bench. In case of any +issue, you can see the log of all commands executed in the Deploy Candidate +pg. `Build` will just create the image, not create a new bench on server. + +![Deploy Candidate Log](/assets/jcloud/images/internal/bench/deploy-candidate-log-desk.png) + +You can also get live output in the dashboard view. + +![Deploy Candidate Dashboard](/assets/jcloud/images/internal/bench/deploy-candidate-dashboard.png 'Deploy Candidate Dashboard') + +After build and upload is successful, there should be a link to the new bench +job, which should also be successful for you to use your bench. + +![New Bench Job Link](/assets/jcloud/images/internal/bench/new-bench-job-link.png) + +## Bench (created automatically) + +![New Bench Job](/assets/jcloud/images/internal/bench/new-bench-job.png) + +Will be created after successful deploy. Represents a bench on each individual server. + +Many can be created for a Deploy Candidate deploy depending on list of servers +in Release Group. + +![Bench List](/assets/jcloud/images/internal/bench/bench-list.png) diff --git a/jcloud/www/internal/getting-started/__init__.py b/jcloud/www/internal/getting-started/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/internal/getting-started/overview.md b/jcloud/www/internal/getting-started/overview.md new file mode 100644 index 0000000..e07f19f --- /dev/null +++ b/jcloud/www/internal/getting-started/overview.md @@ -0,0 +1,7 @@ +--- +title: Overview - Getting Started +--- + +# Jingrow Internal Docs + +Start with [Local Infrastructure Setup](/internal/infrastructure/overview). \ No newline at end of file diff --git a/jcloud/www/internal/index.html b/jcloud/www/internal/index.html new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/internal/index.py b/jcloud/www/internal/index.py new file mode 100644 index 0000000..26127bc --- /dev/null +++ b/jcloud/www/internal/index.py @@ -0,0 +1,10 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + + +import jingrow + + +def get_context(context): + jingrow.flags.redirect_location = "/internal/getting-started/overview" + raise jingrow.Redirect diff --git a/jcloud/www/internal/infrastructure/__init__.py b/jcloud/www/internal/infrastructure/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/internal/infrastructure/overview.md b/jcloud/www/internal/infrastructure/overview.md new file mode 100644 index 0000000..f397310 --- /dev/null +++ b/jcloud/www/internal/infrastructure/overview.md @@ -0,0 +1,94 @@ +--- +title: Local Infrastructure +--- + +# Local Infrastructure + +Local infrastructure subsystem is a faster, cheaper and more flexible alternative to running virtual machines on a public cloud for development. + +> Note: Local Insfrastructure is available for Linux hosts only. You're free to add Mac OS support by using Virtualbox. + +## Prerequisites + +#### Dependencies + +- Packer - For building virtual machine images. +- Vagrant - Simplified CLI/API for managing virtual hosts. + + - vagrant-libvirt - For using KVM/Libvirt. Duh! + - vagrant-hostmanager - For managing hosts file in guests and host machine. + +- Libvirt/KVM - Virtualization provider. + +### Installation + +Dependencies can be installed with + +```bash +source env/bin/activate +python3 apps/jcloud/backbone/setup.py +``` + +> Note: After this logout completely and login again to start with local infrastructure. Start a new shell session. if you're on a Non-GUI server. + +### Building Base Images + +To spawn virtual machines quickly, we'll create a ubuntu base image (as seen on DigitalOcean or other cloud providers). This image can later be used to spawn blank virtual machines in seconds. + +This can be done with (`backbone` CLI is installed with `jcloud` application) + +```bash +source env/bin/activate +backbone hypervisor build +``` + +> Note: When running for the very first time this will download a ~500 MB ubuntu image. + +See contents of `jcloud/backbone/packer` directory for more details. + +### Spawning Virtual Machines + +There's no simple CLI for this at the moment. + +1. Create a Vagrant directory + +```bash +mkdir scratch +cd scratch +ln -s ../apps/jcloud/backbone/vagrant/Vagrantfile Vagrantfile +``` + +2. Start local cluster with the following command and provide sudo password when asked + +``` +vagrant up --no-parallel --provider=libvirt +``` + +You can destroy the machines with + +``` +vagrant destroy -f +``` + +and spawn them again with `vagrant up` command. + +Local machines have following names/ips by default + +| Type | Name | Public IP | Private IP | +| --------------- | --------- | ---------- | ---------- | +| Proxy Server | n1.fc.dev | 10.0.1.101 | 10.1.1.101 | +| Jingrow Server | f1.fc.dev | 10.0.2.101 | 10.1.2.101 | +| Database Server | m1.fc.dev | 10.0.3.101 | 10.1.3.101 | +| Database Server | m2.fc.dev | 10.0.3.102 | 10.1.3.102 | + +### Accessing VMs + +You can ssh into these machines using following commands + +```bash +ssh root@n1.fc.dev +``` + +```bash +vagrant ssh n1 +``` diff --git a/jcloud/www/internal/jcloud/__init__.py b/jcloud/www/internal/jcloud/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/internal/jcloud/overview.md b/jcloud/www/internal/jcloud/overview.md new file mode 100644 index 0000000..c35bc9f --- /dev/null +++ b/jcloud/www/internal/jcloud/overview.md @@ -0,0 +1,83 @@ +--- +title: Jcloud +--- + +# Jcloud + +Steps to setup your own production/development jcloud installation. + +### Basic + +- **Domain** - Domain to use for Jingrow sites and servers. You should have access to the DNS settings of this domain (See DNS section). + +### DNS + +This is necessary for obtaining Wildcard TLS certificates. + +Currently only supported provider is AWS Route 53, uses [Route 53 plugin for Certbot](https://certbot-dns-route53.readthedocs.io/en/stable/). + +### Let's Encrypt + +- **Certbot Directory**: + + By default Certbot uses `/etc/letsencrypt`, `/var/log/letsencrypt` and `/var/lib/letsencrypt` directories. Writing to these directories requires root access, to avoid this, we use `--logs-dir`, `--work-dir` and `--config-dir` flags to run certbot commands without root privileges. + + +- **Webroot Directory**: + + This directory is used for acquiring TLS certificates for custom domains using [Webroot plugin for Certbot](https://certbot.eff.org/docs/using.html#webroot). + + Skip this if you don't plan to use custom domains. + +> Note: Both these directories should be writable by the jingrow user. If the directories don't exist then they'll be created. + +- **Staging CA**: Check this to use [Let's Encrypt Staging Environment](https://letsencrypt.org/docs/staging-environment/) when experimenting with TLS certificates. + +- **EFF Registration Email**: EFF will send expiry, renewal and other updates on this email. + +After setting these, click **Obtain TLS Certificate** (Requires **Basic** > **Domain**, and **DNS** section to be setup). + +### GitHub + +1. Go To **Jcloud Settings** > **GitHub** +1. Click on **Create GitHub App** +1. Name your app and click **Create GitHub App for ...** + + ![GitHub App Create](/assets/jcloud/images/internal/jcloud/github/github-app-create.png) +1. You'll be redirected to Jcloud Settings. + + ![GitHub App Created](/assets/jcloud/images/internal/jcloud/github/github-app-created.png) + + +1. Create a GitHub Personal Access Token and add it in GitHub Access Token field. + + This is nedded for custom apps that are added without using the app creation flow (Dashboard > New App... ). + As these are subject stricter [rate limits](https://docs.git.jingrow.com:3000/en/free-pro-team@latest/rest/overview/resources-in-the-rest-api#rate-limiting) (5000 requests/hour vs 60 requests/hour). + +### Docker + +**Docker Registry**: Built docker images are pushed here (this can be self hosted service or a managed container registry e.g. Docker Hub, DigitalOcean Container Registry, Amazon ECR etc.). + +**Clone Directory**: Custom apps are cloned and kept in this directory to avoid repeated cloning. Directory Structure looks like this + +```bash +. +├── jingrow/ +│ ├── release-1/ +│ └── release-2/ +└── jerp/ + ├── release-1/ + └── release-2/ +``` +**Build Directory**: Directory for docker build contexts. Directory Structure looks like this + +```bash +. +├── bench-1/ +│ ├── build-1/ +│ └── build-2/ +└── bench-2/ + ├── build-1/ + └── build-2/ +``` +**Code Server**: (Not required) Visual Studio Codeserver instance for read only access to cloned apps for manual review. Requires a password to access. \ No newline at end of file diff --git a/jcloud/www/internal/servers/__init__.py b/jcloud/www/internal/servers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/internal/servers/convert-server-to-database-server.md b/jcloud/www/internal/servers/convert-server-to-database-server.md new file mode 100644 index 0000000..fe5c5e8 --- /dev/null +++ b/jcloud/www/internal/servers/convert-server-to-database-server.md @@ -0,0 +1,36 @@ +--- +title: Convert Jingrow Server to Database Server +--- + +# Convert Jingrow Server to Database Server + +> Note: Downtime Ahead! This will restart MariaDB process. + +Convert a Server into a Database Server, So that, a local database can be moved to a remote server using MariaDB replication. + +> Note: MariaDB running on a Server allows clients to connect from localhost only. MariaDB running on Database Server allows clients to connect from the private network. + +#### Requires +- Jcloud host must have root SSH access to the target machine. + +#### Steps + +1. Create a Database Server + - Use all values from the Server as-is. + - Set unique **Server ID** (a unique value will be set if not left unset). + + ![Server](/assets/jcloud/images/internal/servers/convert-jingrow-to-database/server.png) + + ![New Database Server](/assets/jcloud/images/internal/servers/convert-jingrow-to-database/new-database-server.png) + +1. Click on **Actions > Convert From Jingrow Server** + + ![Convert From Jingrow Server](/assets/jcloud/images/internal/servers/convert-jingrow-to-database/database-server-actions-convert.png) + +1. Setup Complete + + **Database Server** field on the Server will be set. (As if the MariaDB is hosted remotely). + + ![Database Server Active](/assets/jcloud/images/internal/servers/convert-jingrow-to-database/database-server-active.png) + + ![Server Active](/assets/jcloud/images/internal/servers/convert-jingrow-to-database/server-active.png) diff --git a/jcloud/www/internal/servers/database-failover.md b/jcloud/www/internal/servers/database-failover.md new file mode 100644 index 0000000..7aec831 --- /dev/null +++ b/jcloud/www/internal/servers/database-failover.md @@ -0,0 +1,32 @@ +--- +title: Database Failover - Server +--- + +# Database Failover + +> Note: Minor Downtime + Data Loss ahead! + +#### Requires +- [Database Replication](/internal/servers/database-replication) should be already setup. + +#### Steps + +1. On Secondary Database Server Click on **Actions > Trigger Failover** + + This doesn't require the primary Database Server to be online. + + ![Setup Replication](/assets/jcloud/images/internal/servers/database-failover/database-server-actions-failover.png) + + +1. Failover Complete + + **Database Server** field will be updated on all Servers and Benches linked to the primary Database Server. + + ![Server After](/assets/jcloud/images/internal/servers/database-failover/server-after.png) + + ![Bench After](/assets/jcloud/images/internal/servers/database-failover/bench-after.png) + + Database Server will be promoted to primary. + + ![Database Server After](/assets/jcloud/images/internal/servers/database-failover/database-server-after.png) + diff --git a/jcloud/www/internal/servers/database-replication.md b/jcloud/www/internal/servers/database-replication.md new file mode 100644 index 0000000..228bec7 --- /dev/null +++ b/jcloud/www/internal/servers/database-replication.md @@ -0,0 +1,31 @@ +--- +title: Database Replication - Server +--- + +# Database Replication + +> Note: This will wipe data already present on the secondary Database Server. + +#### Requires +- Primary and secondary Database Servers should be already setup. + +#### Steps + +1. Set **Primary** field on the secondary Database Server + + ![Server](/assets/jcloud/images/internal/servers/database-replication/primary-set.png) + +1. On Secondary Database Server Click on **Actions > Setup Replication** + + ![Setup Replication](/assets/jcloud/images/internal/servers/database-replication/database-server-actions.png) + + This runs playbooks on both primary and secondary Database Server. + + ![Multiple Plays](/assets/jcloud/images/internal/servers/database-replication/multiple-plays.png) + + +1. Setup Complete + + **Replication Setup** field on secondary Database Server will be set. + + ![Server Active](/assets/jcloud/images/internal/servers/database-replication/replication-complete.png) diff --git a/jcloud/www/internal/servers/database-server-setup.md b/jcloud/www/internal/servers/database-server-setup.md new file mode 100644 index 0000000..d92ce8c --- /dev/null +++ b/jcloud/www/internal/servers/database-server-setup.md @@ -0,0 +1,27 @@ +--- +title: Database Server Setup - Server +--- + +# Database Server Setup +Database Server runs MariaDB server listening on a private IP address. + +> Note: See [Server Setup](/internal/servers/server-setup) for detailed explanation. + +#### Requires +- Jcloud host must have root SSH access to the target machine. + +#### Steps + +1. Create a Database Server + - Set unique **Server ID** (a unique value will be set if left empty). + - **Is Primary** and **Primary** fields are inconsequential at this point. + + ![New Database Server](/assets/jcloud/images/internal/servers/database-server/new-database-server.png) + +1. Click on **Actions > Setup Server** + +1. Setup Complete + + + + \ No newline at end of file diff --git a/jcloud/www/internal/servers/proxy-server-setup.md b/jcloud/www/internal/servers/proxy-server-setup.md new file mode 100644 index 0000000..b1e4c71 --- /dev/null +++ b/jcloud/www/internal/servers/proxy-server-setup.md @@ -0,0 +1,28 @@ +--- +title: Proxy Server Setup - Server +--- + +# Proxy Server Setup +Proxy Server runs NGINX as a reverse proxy. + +> Note: See [Server Setup](/internal/servers/server-setup) for detailed explanation. + +#### Requires +- Jcloud host must have root SSH access to the target machine. + +#### Steps + +1. Create a Proxy Server + + ![New Proxy Server](/assets/jcloud/images/internal/servers/proxy-server/new-proxy-server.png) + +1. Click on **Actions > Setup Server** + + ![Setup Server](/assets/jcloud/images/internal/servers/proxy-server/proxy-server-setup-server.png) + +1. Setup Complete + + ![Proxy Server Setup Success](/assets/jcloud/images/internal/servers/proxy-server/proxy-server-setup-success.png) + + + diff --git a/jcloud/www/internal/servers/server-setup.md b/jcloud/www/internal/servers/server-setup.md new file mode 100644 index 0000000..555ce1e --- /dev/null +++ b/jcloud/www/internal/servers/server-setup.md @@ -0,0 +1,62 @@ +--- +title: Server Setup - Server +--- + +# Server Setup +Server runs benches as well as its own MariaDB database. + +> Note: [Proxy Server Setup](/internal/servers/proxy-server-setup) must be completed before Server setup. + +#### Requires +- Jcloud host must have root SSH access to the target machine. + +#### Steps + +1. Create a Server Document and Save +![New Server Document](/assets/jcloud/images/internal/servers/server/new-server.png) + +1. Click on **Actions > Setup Server** + + ![New Server Document](/assets/jcloud/images/internal/servers/server/setup-server-actions.png) + +During the playbook execution, Server status is set as **Installing**. + + ![Setup Server Installing](/assets/jcloud/images/internal/servers/server/setup-server-installing.png) + +> Note: Running a playbook blocks a worker for the duration of the play. + +Setup Server action runs an Ansible playbook. The status of the play is tracked in Ansible Play and the tasks in the play are tracked in Ansible Task. play and task updates are reflected in realtime. + + ![Setup Server Play Running List](/assets/jcloud/images/internal/servers/server/setup-server-play-running-list.png) + + ![Setup Server Play Running](/assets/jcloud/images/internal/servers/server/setup-server-play-running.png) + + ![Setup Server Tasks Running](/assets/jcloud/images/internal/servers/server/setup-server-tasks-running.png) + + ![Setup Server Task Success](/assets/jcloud/images/internal/servers/server/setup-server-task-success.png) + + ![Setup Server Tasks Complete](/assets/jcloud/images/internal/servers/server/setup-server-tasks-complete.png) + + ![Setup Server Play Success](/assets/jcloud/images/internal/servers/server/setup-server-play-success.png) + + + ![Setup Server Success](/assets/jcloud/images/internal/servers/server/setup-server-success.png) + +Once the playbook execution is complete, If failed, Server status is set as **Broken**. The Server setup can be retried with **Actions > Setup Server**. + +If successful, Server status is set as **Active** and **Server Setup** field is checked. + + +### Add Server to Proxy + +This adds the Server as an NGINX upstream on the Proxy Server. This action is carried out using Agent. The status of this job can be tracked under Add Upstream to Proxy Agent Job. If successful, the **Upstream Setup** field is checked. + +1. Set **Proxy Server** field and Save. + + ![Setup Server Set Proxy](/assets/jcloud/images/internal/servers/server/setup-server-set-proxy.png) + +1. Click **Actions > Add To Proxy** + + ![Setup Server Add To Proxy](/assets/jcloud/images/internal/servers/server/setup-server-actions-add-to-proxy.png) + + ![Add Upstream To Proxy Job](/assets/jcloud/images/internal/servers/server/add-upstream-to-proxy-job.png) diff --git a/jcloud/www/marketplace/__init__.py b/jcloud/www/marketplace/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/marketplace/index.html b/jcloud/www/marketplace/index.html new file mode 100644 index 0000000..a8ab061 --- /dev/null +++ b/jcloud/www/marketplace/index.html @@ -0,0 +1,274 @@ +{% extends "templates/marketplace/base.html" %} {%- block content -%} + +
+

+ One Click Apps for your Jingrow Sites +

+

+ Extend functionality of your Jingrow sites by finding an app that suits you + and install it in a few steps. +

+
+ +
+
+ + + +
+ +
+
+
+ + + +
+
+ +
+ {%- for title, app_list in apps.items() -%} +

{{ title }}

+
+ {%- for app in app_list -%} + + {{app.title}} Logo +
+ {{ app.title }} +

+ {{ app.description }} +

+
+
+ {%- endfor -%} +
+ {%- endfor -%} +
+ + + + + + + + + +
+
+
+{%- endblock -%} + +{%- block script -%} + +{%- endblock -%} diff --git a/jcloud/www/marketplace/index.py b/jcloud/www/marketplace/index.py new file mode 100644 index 0000000..4e87e13 --- /dev/null +++ b/jcloud/www/marketplace/index.py @@ -0,0 +1,111 @@ +# Copyright (c) 2020, JINGROW +# For license information, please see license.txt + +import jingrow + + +def get_context(context): + context.no_cache = 1 + context.apps = {} + + featured = jingrow.get_all( + "Featured App", + filters={"parent": "Marketplace Settings", "parentfield": "featured_apps"}, + pluck="app", + order_by="idx", + ) + context.apps["Featured Apps"] = sorted( + filter( + lambda x: x.name in featured, + jingrow.get_all( + "Marketplace App", + {"name": ("in", featured), "status": "Published"}, + ["name", "title", "description", "image", "route"], + ), + ), + key=lambda y: featured.index(y.name), + ) + + context.apps["Most Installed"] = jingrow.db.sql( + """ + SELECT + marketplace.name, + marketplace.title, + marketplace.image, + marketplace.route, + marketplace.description, + COUNT(*) AS total_installs + FROM + `tabMarketplace App` marketplace + LEFT JOIN + `tabSite App` site + ON + site.app = marketplace.app + WHERE + marketplace.status = "Published" + GROUP BY + marketplace.name + ORDER BY + total_installs DESC + LIMIT 6 + """, + as_dict=True, + ) + + context.apps["Recently Added"] = jingrow.get_all( + "Marketplace App", + {"status": "Published"}, + ["name", "title", "description", "image", "route"], + order_by="creation DESC", + limit=6, + ) + + context.categories = sorted( + jingrow.db.get_all("Marketplace App Categories", pluck="category", distinct=True) + ) + context.metatags = { + "title": "Jingrow Marketplace", + "description": "One Click Apps for your Jingrow Sites", + "og:type": "website", + } + + +@jingrow.whitelist(allow_guest=True) +def search(query: str, offset: int = 0, limit: int = 20): + return jingrow.qb.get_query( + "Marketplace App", + filters={ + "status": "Published", + "title": ("like", f"%{query}%"), + }, + fields=["name", "image", "title", "description", "image", "route"], + offset=offset, + limit=limit, + ).run(as_dict=1) + + +@jingrow.whitelist(allow_guest=True) +def filter_by_category(category): + return jingrow.db.sql( + """ + SELECT + marketplace.name, + marketplace.title, + marketplace.image, + marketplace.route, + marketplace.description + FROM + `tabMarketplace App` marketplace + LEFT JOIN + `tabMarketplace App Categories` category + ON + category.parent = marketplace.name + WHERE + marketplace.status = "Published" + AND + category.category = %s + ORDER BY marketplace.jingrow_approved DESC + """, + category, + as_dict=True, + ) diff --git a/jcloud/www/prepare-site.html b/jcloud/www/prepare-site.html new file mode 100644 index 0000000..d495ae1 --- /dev/null +++ b/jcloud/www/prepare-site.html @@ -0,0 +1,123 @@ +{%- extends "templates/base.html" -%} + +{%- block navbar -%} +{%- endblock -%} + +{%- block content -%} +
+
+
+
+ {%- if jingrow.form_dict.key -%} +

Your site is being prepared

+

+ + + + + + {%- else -%} +
+ Invalid or expired key +
+ {%- endif -%} +
+
+
+
+{%- endblock -%} + +{%- block footer -%} +{%- endblock -%} + +{%- block script -%} + +{%- endblock -%} + +{%- block style -%} + +{%- endblock -%} \ No newline at end of file diff --git a/jcloud/www/saas-oauth.html b/jcloud/www/saas-oauth.html new file mode 100644 index 0000000..8f88ba4 --- /dev/null +++ b/jcloud/www/saas-oauth.html @@ -0,0 +1,209 @@ +{%- extends "templates/saas/layout.html" -%} + +{%- block navbar -%} +{%- endblock -%} + +{%- block content -%} +
+ +
+
+
+ + + {%- if jingrow.form_dict.key -%} + + {%- else -%} +
+ Invalid or expired key +
+ {%- endif -%} +
+
+
+
+{%- endblock -%} + +{%- block footer -%} +{%- endblock -%} + +{%- block script -%} + + +{%- endblock -%} + +{%- block style -%} + +{%- endblock -%} diff --git a/jcloud/www/saas/__init__.py b/jcloud/www/saas/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/jcloud/www/saas/billing.html b/jcloud/www/saas/billing.html new file mode 100644 index 0000000..92e4790 --- /dev/null +++ b/jcloud/www/saas/billing.html @@ -0,0 +1,289 @@ +{%- extends "templates/saas/billing_layout.html" -%} {%- from +"templates/saas/macros.html" import subs_wrapper, plans_wrapper, success_card, +error_card, load_stripe, load_subs, checkout_wrapper, stripe_wrapper, +address_wrapper, email_verify_wrapper -%} {% block content %} +
+ {{ subs_wrapper() }} {{ plans_wrapper() }} {{ address_wrapper() }} {{ + checkout_wrapper() }} {{ stripe_wrapper() }} {{ success_card() }} {{ + success_card() }} {{ load_stripe() }} {{ load_subs() }} {{ email_verify_wrapper() }} +
+{%- endblock -%} {%- block script -%} + + +{% endblock %} \ No newline at end of file diff --git a/jcloud/www/saas/billing.js b/jcloud/www/saas/billing.js new file mode 100644 index 0000000..95f57ff --- /dev/null +++ b/jcloud/www/saas/billing.js @@ -0,0 +1,239 @@ +const jingrow_cloud_base_endpoint = 'https://jingrow.com'; + +function calculate_trial_end_days() { + // try to check for trial_end_date in jingrow.boot.subscription_conf + if (jingrow.boot.subscription_conf.trial_end_date) { + const trial_end_date = new Date( + jingrow.boot.subscription_conf.trial_end_date, + ); + const today = new Date(); + const diffTime = trial_end_date - today; + const diffDays = Math.ceil(diffTime / (1000 * 60 * 60 * 24)); + return diffDays; + } else { + return 15 - jingrow.boot.telemetry_site_age; + } +} + +const trial_end_days = calculate_trial_end_days(); + +const trial_end_string = + trial_end_days > 1 ? `${trial_end_days} days` : `${trial_end_days} day`; + +let subscription_string = __( + `Your trial ends in ${trial_end_string}. Please subscribe for uninterrupted services`, +); + +let $floatingBar = $(` +
+ + + + +

+ ${subscription_string} +

+ + +
+`); + +$(document).ready(function () { + if (jingrow.boot.setup_complete === 1) { + if ( + !jingrow.is_mobile() && + jingrow.boot.subscription_conf.status !== 'Subscribed' && + trial_end_days > 0 + ) { + $('.layout-main-section').before($floatingBar); + + $floatingBar.find('.dismiss-upgrade').on('click', () => { + $floatingBar.remove(); + }); + } + if (jingrow.user.has_role('System Manager')) { + add_jingrow_cloud_dashboard_link(); + } + } +}); + +function add_jingrow_cloud_dashboard_link() { + $('.dropdown-navbar-user .dropdown-menu .dropdown-divider').before( + `Log In to Jingrow`, + ); +} + +function showBanner() { + const d = new jingrow.ui.Dialog({ + title: __('Change Plan'), + size: 'medium', + }); + + $(d.body).html(` +
+