更新jingrow仓库地址
Some checks failed
Agent Tests / Lint and Format (push) Has been cancelled
Agent Tests / Unit Tests (push) Has been cancelled

This commit is contained in:
jingrow 2025-08-21 18:08:53 +08:00
parent f9ac97e7fc
commit 9ca8014177
8 changed files with 2979 additions and 2979 deletions

278
.gitignore vendored
View File

@ -1,140 +1,140 @@
# Byte-compiled / optimized / DLL files # Byte-compiled / optimized / DLL files
__pycache__/ __pycache__/
*.py[cod] *.py[cod]
*$py.class *$py.class
# C extensions # C extensions
*.so *.so
# Distribution / packaging # Distribution / packaging
.Python .Python
build/ build/
develop-eggs/ develop-eggs/
dist/ dist/
downloads/ downloads/
eggs/ eggs/
.eggs/ .eggs/
lib/ lib/
lib64/ lib64/
parts/ parts/
sdist/ sdist/
var/ var/
wheels/ wheels/
pip-wheel-metadata/ pip-wheel-metadata/
share/python-wheels/ share/python-wheels/
*.egg-info/ *.egg-info/
.installed.cfg .installed.cfg
*.egg *.egg
MANIFEST MANIFEST
# PyInstaller # PyInstaller
# Usually these files are written by a python script from a template # Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it. # before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest *.manifest
*.spec *.spec
# Installer logs # Installer logs
pip-log.txt pip-log.txt
pip-delete-this-directory.txt pip-delete-this-directory.txt
# Unit test / coverage reports # Unit test / coverage reports
htmlcov/ htmlcov/
.tox/ .tox/
.nox/ .nox/
.coverage .coverage
.coverage.* .coverage.*
.cache .cache
nosetests.xml nosetests.xml
coverage.xml coverage.xml
*.cover *.cover
*.py,cover *.py,cover
.hypothesis/ .hypothesis/
.pytest_cache/ .pytest_cache/
# Translations # Translations
*.mo *.mo
*.pot *.pot
# Django stuff: # Django stuff:
*.log *.log
local_settings.py local_settings.py
db.sqlite3 db.sqlite3
db.sqlite3-journal db.sqlite3-journal
# Flask stuff: # Flask stuff:
instance/ instance/
.webassets-cache .webassets-cache
# Scrapy stuff: # Scrapy stuff:
.scrapy .scrapy
# Sphinx documentation # Sphinx documentation
docs/_build/ docs/_build/
# PyBuilder # PyBuilder
target/ target/
# Jupyter Notebook # Jupyter Notebook
.ipynb_checkpoints .ipynb_checkpoints
# IPython # IPython
profile_default/ profile_default/
ipython_config.py ipython_config.py
# pyenv # pyenv
.python-version .python-version
# pipenv # pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies # However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not # having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies. # install all needed dependencies.
#Pipfile.lock #Pipfile.lock
# PEP 582; used by e.g. git.jingrow.com:3000/David-OConnor/pyflow # PEP 582; used by e.g. git.jingrow.com/David-OConnor/pyflow
__pypackages__/ __pypackages__/
# Celery stuff # Celery stuff
celerybeat-schedule celerybeat-schedule
celerybeat.pid celerybeat.pid
# SageMath parsed files # SageMath parsed files
*.sage.py *.sage.py
# Environments # Environments
.env .env
.venv .venv
env/ env/
venv/ venv/
ENV/ ENV/
env.bak/ env.bak/
venv.bak/ venv.bak/
# Spyder project settings # Spyder project settings
.spyderproject .spyderproject
.spyproject .spyproject
# Rope project settings # Rope project settings
.ropeproject .ropeproject
# mkdocs documentation # mkdocs documentation
/site /site
# mypy # mypy
.mypy_cache/ .mypy_cache/
.dmypy.json .dmypy.json
dmypy.json dmypy.json
# Pyre type checker # Pyre type checker
.pyre/ .pyre/
# VSCode # VSCode
.vscode/ .vscode/
# PyCharm # PyCharm
.idea/ .idea/
# Vim # Vim
.vim/ .vim/
.DS_Store .DS_Store

View File

@ -1,27 +1,27 @@
exclude: 'node_modules|.git' exclude: 'node_modules|.git'
default_stages: [commit] default_stages: [commit]
fail_fast: false fail_fast: false
repos: repos:
- repo: http://git.jingrow.com:3000/pre-commit/pre-commit-hooks - repo: http://git.jingrow.com/pre-commit/pre-commit-hooks
rev: v4.4.0 rev: v4.4.0
hooks: hooks:
- id: debug-statements - id: debug-statements
- id: trailing-whitespace - id: trailing-whitespace
files: 'agent.*' files: 'agent.*'
exclude: '.*json$|.*txt$|.*csv|.*md|.*svg' exclude: '.*json$|.*txt$|.*csv|.*md|.*svg'
- id: check-merge-conflict - id: check-merge-conflict
- id: check-ast - id: check-ast
- id: check-json - id: check-json
- id: check-toml - id: check-toml
- id: check-yaml - id: check-yaml
- repo: http://git.jingrow.com:3000/astral-sh/ruff-pre-commit - repo: http://git.jingrow.com/astral-sh/ruff-pre-commit
# Ruff version. # Ruff version.
rev: v0.6.5 rev: v0.6.5
hooks: hooks:
# Run the linter. # Run the linter.
- id: ruff - id: ruff
args: [--fix] args: [--fix]
# Run the formatter. # Run the formatter.
- id: ruff-format - id: ruff-format

View File

@ -1,41 +1,41 @@
# Agent # Agent
## Installation ## Installation
``` ```
mkdir agent && cd agent mkdir agent && cd agent
git clone http://git.jingrow.com:3000/jingrow/agent repo git clone http://git.jingrow.com/jingrow/agent repo
virtualenv env virtualenv env
source env/bin/activate source env/bin/activate
pip install -e ./repo pip install -e ./repo
cp repo/redis.conf . cp repo/redis.conf .
cp repo/Procfile . cp repo/Procfile .
``` ```
## Running ## Running
``` ```
honcho start honcho start
``` ```
## CLI ## CLI
Agent has a CLI Agent has a CLI
([ref](http://git.jingrow.com:3000/jingrow/agent/blob/master/agent/cli.py)). You can ([ref](http://git.jingrow.com/jingrow/agent/blob/master/agent/cli.py)). You can
access this by activating the env: access this by activating the env:
```bash ```bash
# Path to your agent's Python env might be different # Path to your agent's Python env might be different
source ./agent/env/bin/activate source ./agent/env/bin/activate
agent --help agent --help
``` ```
Once you have activated the env, you can access the iPython console: Once you have activated the env, you can access the iPython console:
```bash ```bash
agent console agent console
``` ```
This should have the server object instantiated if it was able to find the This should have the server object instantiated if it was able to find the
`config.json` file. If not you can specify the path (check `agent console --help`). `config.json` file. If not you can specify the path (check `agent console --help`).

File diff suppressed because it is too large Load Diff

View File

@ -1,259 +1,259 @@
from __future__ import annotations from __future__ import annotations
# Code below copied mostly verbatim from jcloud, this is tentative and # Code below copied mostly verbatim from jcloud, this is tentative and
# will be removed once build code has been moved out of jcloud. # will be removed once build code has been moved out of jcloud.
# #
# Primary source: # Primary source:
# http://git.jingrow.com:3000/jingrow/jcloud/blob/40859becf2976a3b6a5ac0ff79e2dff8cd2c46af/jcloud/jcloud/doctype/deploy_candidate/cache_utils.py # http://git.jingrow.com/jingrow/jcloud/blob/40859becf2976a3b6a5ac0ff79e2dff8cd2c46af/jcloud/jcloud/doctype/deploy_candidate/cache_utils.py
import os import os
import platform import platform
import random import random
import re import re
import shlex import shlex
import shutil import shutil
import subprocess import subprocess
from datetime import datetime from datetime import datetime
from pathlib import Path from pathlib import Path
from textwrap import dedent from textwrap import dedent
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
if TYPE_CHECKING: if TYPE_CHECKING:
from typing import TypedDict from typing import TypedDict
class CommandOutput(TypedDict): class CommandOutput(TypedDict):
cwd: str cwd: str
image_tag: str image_tag: str
returncode: int returncode: int
output: str output: str
def copy_file_from_docker_cache( def copy_file_from_docker_cache(
container_source: str, container_source: str,
host_dest: str = ".", host_dest: str = ".",
cache_target: str = "/home/jingrow/.cache", cache_target: str = "/home/jingrow/.cache",
) -> CommandOutput: ) -> CommandOutput:
""" """
Function is used to copy files from docker cache i.e. `cache_target/container_source` Function is used to copy files from docker cache i.e. `cache_target/container_source`
to the host system i.e `host_dest`. to the host system i.e `host_dest`.
This function is required cause cache files may be available only during docker build. This function is required cause cache files may be available only during docker build.
This works by: This works by:
- copy the file from mount cache (image) to another_folder (image) - copy the file from mount cache (image) to another_folder (image)
- create a container from image - create a container from image
- copy file from another_folder (container) to host system (using docker cp) - copy file from another_folder (container) to host system (using docker cp)
- remove container and then image - remove container and then image
""" """
filename = Path(container_source).name filename = Path(container_source).name
container_dest_dirpath = Path(cache_target).parent / "container_dest" container_dest_dirpath = Path(cache_target).parent / "container_dest"
container_dest_filepath = container_dest_dirpath / filename container_dest_filepath = container_dest_dirpath / filename
command = f"mkdir -p {container_dest_dirpath} && " + f"cp {container_source} {container_dest_filepath}" command = f"mkdir -p {container_dest_dirpath} && " + f"cp {container_source} {container_dest_filepath}"
output = run_command_in_docker_cache( output = run_command_in_docker_cache(
command, command,
cache_target, cache_target,
False, False,
) )
if output["returncode"] == 0: if output["returncode"] == 0:
container_id = create_container(output["image_tag"]) container_id = create_container(output["image_tag"])
copy_file_from_container( copy_file_from_container(
container_id, container_id,
container_dest_filepath, container_dest_filepath,
Path(host_dest), Path(host_dest),
) )
remove_container(container_id) remove_container(container_id)
run_image_rm(output["image_tag"]) run_image_rm(output["image_tag"])
return output return output
def run_command_in_docker_cache( def run_command_in_docker_cache(
command: str = "ls -A", command: str = "ls -A",
cache_target: str = "/home/jingrow/.cache", cache_target: str = "/home/jingrow/.cache",
remove_image: bool = True, remove_image: bool = True,
) -> CommandOutput: ) -> CommandOutput:
""" """
This function works by capturing the output of the given `command` This function works by capturing the output of the given `command`
by running it in the cache dir (`cache_target`) while building a by running it in the cache dir (`cache_target`) while building a
dummy image. dummy image.
The primary purpose is to check the contents of the mounted cache. It's The primary purpose is to check the contents of the mounted cache. It's
an incredibly hacky way to achieve this, but afaik the only one. an incredibly hacky way to achieve this, but afaik the only one.
Note: The `ARG CACHE_BUST=1` line is used to cause layer cache miss Note: The `ARG CACHE_BUST=1` line is used to cause layer cache miss
while running `command` at `cache_target`. This is achieved by changing while running `command` at `cache_target`. This is achieved by changing
`CACHE_BUST` value every run. `CACHE_BUST` value every run.
Warning: Takes time to run, use judiciously. Warning: Takes time to run, use judiciously.
""" """
dockerfile = get_cache_check_dockerfile( dockerfile = get_cache_check_dockerfile(
command, command,
cache_target, cache_target,
) )
df_path = prep_dockerfile_path(dockerfile) df_path = prep_dockerfile_path(dockerfile)
return run_build_command(df_path, remove_image) return run_build_command(df_path, remove_image)
def get_cache_check_dockerfile(command: str, cache_target: str) -> str: def get_cache_check_dockerfile(command: str, cache_target: str) -> str:
""" """
Note: Mount cache is identified by different attributes, hence it should Note: Mount cache is identified by different attributes, hence it should
be the same as the Dockerfile else it will always result in a cache miss. be the same as the Dockerfile else it will always result in a cache miss.
Ref: https://docs.docker.com/engine/reference/builder/#run---mounttypecache Ref: https://docs.docker.com/engine/reference/builder/#run---mounttypecache
""" """
df = f""" df = f"""
FROM ubuntu:20.04 FROM ubuntu:20.04
ARG CACHE_BUST=1 ARG CACHE_BUST=1
WORKDIR {cache_target} WORKDIR {cache_target}
RUN --mount=type=cache,target={cache_target},uid=1000,gid=1000 {command} RUN --mount=type=cache,target={cache_target},uid=1000,gid=1000 {command}
""" """
return dedent(df).strip() return dedent(df).strip()
def create_container(image_tag: str) -> str: def create_container(image_tag: str) -> str:
args = shlex.split(f"docker create --platform linux/amd64 {image_tag}") args = shlex.split(f"docker create --platform linux/amd64 {image_tag}")
return subprocess.run( return subprocess.run(
args, args,
env=os.environ.copy(), env=os.environ.copy(),
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
).stdout.strip() ).stdout.strip()
def copy_file_from_container( def copy_file_from_container(
container_id: str, container_id: str,
container_filepath: Path, container_filepath: Path,
host_dest: Path, host_dest: Path,
): ):
container_source = f"{container_id}:{container_filepath}" container_source = f"{container_id}:{container_filepath}"
args = ["docker", "cp", container_source, host_dest.as_posix()] args = ["docker", "cp", container_source, host_dest.as_posix()]
proc = subprocess.run( proc = subprocess.run(
args, args,
env=os.environ.copy(), env=os.environ.copy(),
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
if not proc.returncode: if not proc.returncode:
print(f"file copied:\n- from {container_source}\n- to {host_dest.absolute().as_posix()}") print(f"file copied:\n- from {container_source}\n- to {host_dest.absolute().as_posix()}")
else: else:
print(proc.stdout) print(proc.stdout)
def remove_container(container_id: str) -> str: def remove_container(container_id: str) -> str:
args = shlex.split(f"docker rm -v {container_id}") args = shlex.split(f"docker rm -v {container_id}")
return subprocess.run( return subprocess.run(
args, args,
env=os.environ.copy(), env=os.environ.copy(),
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
).stdout ).stdout
def prep_dockerfile_path(dockerfile: str) -> Path: def prep_dockerfile_path(dockerfile: str) -> Path:
dir = Path("cache_check_dockerfile_dir") dir = Path("cache_check_dockerfile_dir")
if dir.is_dir(): if dir.is_dir():
shutil.rmtree(dir) shutil.rmtree(dir)
dir.mkdir() dir.mkdir()
df_path = dir / "Dockerfile" df_path = dir / "Dockerfile"
with open(df_path, "w") as df: with open(df_path, "w") as df:
df.write(dockerfile) df.write(dockerfile)
return df_path return df_path
def run_build_command(df_path: Path, remove_image: bool) -> CommandOutput: def run_build_command(df_path: Path, remove_image: bool) -> CommandOutput:
command, image_tag = get_cache_check_build_command() command, image_tag = get_cache_check_build_command()
env = os.environ.copy() env = os.environ.copy()
env["DOCKER_BUILDKIT"] = "1" env["DOCKER_BUILDKIT"] = "1"
env["BUILDKIT_PROGRESS"] = "plain" env["BUILDKIT_PROGRESS"] = "plain"
output = subprocess.run( output = subprocess.run(
shlex.split(command), shlex.split(command),
env=env, env=env,
cwd=df_path.parent, cwd=df_path.parent,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
text=True, text=True,
) )
if remove_image: if remove_image:
run_image_rm(image_tag) run_image_rm(image_tag)
return dict( return dict(
cwd=df_path.parent.absolute().as_posix(), cwd=df_path.parent.absolute().as_posix(),
image_tag=image_tag, image_tag=image_tag,
returncode=output.returncode, returncode=output.returncode,
output=strip_build_output(output.stdout), output=strip_build_output(output.stdout),
) )
def get_cache_check_build_command() -> tuple[str, str]: def get_cache_check_build_command() -> tuple[str, str]:
command = "docker build" command = "docker build"
if platform.machine() == "arm64" and platform.system() == "Darwin" and platform.processor() == "arm": if platform.machine() == "arm64" and platform.system() == "Darwin" and platform.processor() == "arm":
command += "x build --platform linux/amd64" command += "x build --platform linux/amd64"
now_ts = datetime.timestamp(datetime.today()) now_ts = datetime.timestamp(datetime.today())
command += f" --build-arg CACHE_BUST={now_ts}" command += f" --build-arg CACHE_BUST={now_ts}"
image_tag = f"cache_check:id-{random.getrandbits(40):x}" image_tag = f"cache_check:id-{random.getrandbits(40):x}"
command += f" --tag {image_tag} ." command += f" --tag {image_tag} ."
return command, image_tag return command, image_tag
def run_image_rm(image_tag: str): def run_image_rm(image_tag: str):
command = f"docker image rm {image_tag}" command = f"docker image rm {image_tag}"
subprocess.run( subprocess.run(
shlex.split(command), shlex.split(command),
stdout=subprocess.DEVNULL, stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
) )
def strip_build_output(stdout: str) -> str: def strip_build_output(stdout: str) -> str:
output = [] output = []
is_output = False is_output = False
line_rx = re.compile(r"^#\d+\s\d+\.\d+\s") line_rx = re.compile(r"^#\d+\s\d+\.\d+\s")
done_rx = re.compile(r"^#\d+\sDONE\s\d+\.\d+s$") done_rx = re.compile(r"^#\d+\sDONE\s\d+\.\d+s$")
for line in stdout.split("\n"): for line in stdout.split("\n"):
if is_output and (m := line_rx.match(line)): if is_output and (m := line_rx.match(line)):
start = m.end() start = m.end()
output.append(line[start:]) output.append(line[start:])
elif is_output and done_rx.search(line): elif is_output and done_rx.search(line):
break break
elif "--mount=type=cache,target=" in line: elif "--mount=type=cache,target=" in line:
is_output = True is_output = True
return "\n".join(output) return "\n".join(output)
def get_cached_apps() -> dict[str, list[str]]: def get_cached_apps() -> dict[str, list[str]]:
result = run_command_in_docker_cache( result = run_command_in_docker_cache(
command="ls -A bench/apps", command="ls -A bench/apps",
cache_target="/home/jingrow/.cache", cache_target="/home/jingrow/.cache",
) )
apps = dict() apps = dict()
if result["returncode"] != 0: if result["returncode"] != 0:
return apps return apps
for line in result["output"].split("\n"): for line in result["output"].split("\n"):
# File Name: app_name-cache_key.ext # File Name: app_name-cache_key.ext
splits = line.split("-", 1) splits = line.split("-", 1)
if len(splits) != 2: if len(splits) != 2:
continue continue
app_name, suffix = splits app_name, suffix = splits
suffix_splits = suffix.split(".", 1) suffix_splits = suffix.split(".", 1)
if len(suffix_splits) != 2 or suffix_splits[1] not in ["tar", "tgz"]: if len(suffix_splits) != 2 or suffix_splits[1] not in ["tar", "tgz"]:
continue continue
if app_name not in apps: if app_name not in apps:
apps[app_name] = [] apps[app_name] = []
app_hash = suffix_splits[0] app_hash = suffix_splits[0]
apps[app_name].append(app_hash) apps[app_name].append(app_hash)
return apps return apps

View File

@ -1,301 +1,301 @@
## Set a variable to help us decide if we need to add the ## Set a variable to help us decide if we need to add the
## 'Docker-Distribution-Api-Version' header. ## 'Docker-Distribution-Api-Version' header.
## The registry always sets this header. ## The registry always sets this header.
## In the case of nginx performing auth, the header is unset ## In the case of nginx performing auth, the header is unset
## since nginx is auth-ing before proxying. ## since nginx is auth-ing before proxying.
map $upstream_http_docker_distribution_api_version $docker_distribution_api_version { map $upstream_http_docker_distribution_api_version $docker_distribution_api_version {
'' 'registry/2.0'; '' 'registry/2.0';
} }
## this is required to proxy Grafana Live WebSocket connections ## this is required to proxy Grafana Live WebSocket connections
map $http_upgrade $connection_upgrade { map $http_upgrade $connection_upgrade {
default upgrade; default upgrade;
'' close; '' close;
} }
server { server {
listen 443 ssl http2; listen 443 ssl http2;
server_name {{ name }}; server_name {{ name }};
ssl_certificate {{ tls_directory }}/fullchain.pem; ssl_certificate {{ tls_directory }}/fullchain.pem;
ssl_certificate_key {{ tls_directory }}/privkey.pem; ssl_certificate_key {{ tls_directory }}/privkey.pem;
ssl_trusted_certificate {{ tls_directory }}/chain.pem; ssl_trusted_certificate {{ tls_directory }}/chain.pem;
ssl_session_timeout 1d; ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off; ssl_session_tickets off;
ssl_protocols {{ tls_protocols or 'TLSv1.3' }}; ssl_protocols {{ tls_protocols or 'TLSv1.3' }};
ssl_prefer_server_ciphers off; ssl_prefer_server_ciphers off;
ssl_stapling on; ssl_stapling on;
ssl_stapling_verify on; ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 8.8.8.8 8.8.4.4 208.67.222.222 208.67.220.220 valid=60s; resolver 1.1.1.1 1.0.0.1 8.8.8.8 8.8.4.4 208.67.222.222 208.67.220.220 valid=60s;
resolver_timeout 2s; resolver_timeout 2s;
# disable any limits to avoid HTTP 413 for large image uploads # disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0; client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (http://git.jingrow.com:3000/moby/moby/issues/1486) # required to avoid HTTP 411: see Issue #1486 (http://git.jingrow.com/moby/moby/issues/1486)
chunked_transfer_encoding on; chunked_transfer_encoding on;
# Allow jcloud signup pages to check browser-proxy latency # Allow jcloud signup pages to check browser-proxy latency
{% if jcloud_url -%} {% if jcloud_url -%}
more_set_headers "Access-Control-Allow-Origin: {{ jcloud_url }}"; more_set_headers "Access-Control-Allow-Origin: {{ jcloud_url }}";
{%- endif %} {%- endif %}
location /agent/ { location /agent/ {
proxy_http_version 1.1; proxy_http_version 1.1;
proxy_cache_bypass $http_upgrade; proxy_cache_bypass $http_upgrade;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";
proxy_set_header host $host; proxy_set_header host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port; proxy_set_header X-Forwarded-Port $server_port;
location /agent/benches/metrics { location /agent/benches/metrics {
return 301 /metrics/rq; return 301 /metrics/rq;
} }
proxy_pass http://127.0.0.1:{{ web_port }}/; proxy_pass http://127.0.0.1:{{ web_port }}/;
} }
{% if nginx_vts_module_enabled %} {% if nginx_vts_module_enabled %}
location /status { location /status {
auth_basic "NGINX VTS"; auth_basic "NGINX VTS";
auth_basic_user_file {{ nginx_directory }}/monitoring.htpasswd; auth_basic_user_file {{ nginx_directory }}/monitoring.htpasswd;
vhost_traffic_status_display; vhost_traffic_status_display;
vhost_traffic_status_display_format html; vhost_traffic_status_display_format html;
} }
{% endif %} {% endif %}
location /metrics { location /metrics {
auth_basic "Prometheus"; auth_basic "Prometheus";
auth_basic_user_file {{ nginx_directory }}/monitoring.htpasswd; auth_basic_user_file {{ nginx_directory }}/monitoring.htpasswd;
location /metrics/node { location /metrics/node {
proxy_pass http://127.0.0.1:9100/metrics; proxy_pass http://127.0.0.1:9100/metrics;
} }
location /metrics/docker { location /metrics/docker {
proxy_pass http://127.0.0.1:9323/metrics; proxy_pass http://127.0.0.1:9323/metrics;
} }
location /metrics/cadvisor { location /metrics/cadvisor {
proxy_pass http://127.0.0.1:9338/metrics; proxy_pass http://127.0.0.1:9338/metrics;
} }
{% if nginx_vts_module_enabled %} {% if nginx_vts_module_enabled %}
location /metrics/nginx { location /metrics/nginx {
vhost_traffic_status_display; vhost_traffic_status_display;
vhost_traffic_status_display_format prometheus; vhost_traffic_status_display_format prometheus;
} }
{% endif %} {% endif %}
location /metrics/mariadb { location /metrics/mariadb {
proxy_pass http://127.0.0.1:9104/metrics; proxy_pass http://127.0.0.1:9104/metrics;
} }
location /metrics/mariadb_proxy { location /metrics/mariadb_proxy {
proxy_pass http://127.0.0.1:9104/metrics; proxy_pass http://127.0.0.1:9104/metrics;
} }
location /metrics/gunicorn { location /metrics/gunicorn {
proxy_pass http://127.0.0.1:9102/metrics; proxy_pass http://127.0.0.1:9102/metrics;
} }
location /metrics/registry { location /metrics/registry {
proxy_pass http://127.0.0.1:5001/metrics; proxy_pass http://127.0.0.1:5001/metrics;
} }
location /metrics/prometheus { location /metrics/prometheus {
proxy_pass http://127.0.0.1:9090/prometheus/metrics; proxy_pass http://127.0.0.1:9090/prometheus/metrics;
} }
location /metrics/alertmanager { location /metrics/alertmanager {
proxy_pass http://127.0.0.1:9093/alertmanager/metrics; proxy_pass http://127.0.0.1:9093/alertmanager/metrics;
} }
location /metrics/blackbox { location /metrics/blackbox {
proxy_pass http://127.0.0.1:9115/blackbox/metrics; proxy_pass http://127.0.0.1:9115/blackbox/metrics;
} }
location /metrics/grafana { location /metrics/grafana {
proxy_pass http://127.0.0.1:3000/grafana/metrics; proxy_pass http://127.0.0.1:3000/grafana/metrics;
} }
location /metrics/proxysql { location /metrics/proxysql {
proxy_pass http://127.0.0.1:6070/metrics; proxy_pass http://127.0.0.1:6070/metrics;
} }
location /metrics/elasticsearch { location /metrics/elasticsearch {
proxy_pass http://127.0.0.1:9114/metrics; proxy_pass http://127.0.0.1:9114/metrics;
} }
location /metrics/rq { location /metrics/rq {
proxy_pass http://127.0.0.1:{{ web_port }}/benches/metrics; proxy_pass http://127.0.0.1:{{ web_port }}/benches/metrics;
} }
} }
{% if registry %} {% if registry %}
location /v2/ { location /v2/ {
# Do not allow connections from docker 1.5 and earlier # Do not allow connections from docker 1.5 and earlier
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents # docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) { if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
return 404; return 404;
} }
# To add basic authentication to v2 use auth_basic setting. # To add basic authentication to v2 use auth_basic setting.
auth_basic "Registry realm"; auth_basic "Registry realm";
auth_basic_user_file /home/jingrow/registry/registry.htpasswd; auth_basic_user_file /home/jingrow/registry/registry.htpasswd;
## If $docker_distribution_api_version is empty, the header is not added. ## If $docker_distribution_api_version is empty, the header is not added.
## See the map directive above where this variable is defined. ## See the map directive above where this variable is defined.
add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always; add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always;
add_header Access-Control-Allow-Origin '*'; add_header Access-Control-Allow-Origin '*';
add_header Access-Control-Allow-Credentials 'true'; add_header Access-Control-Allow-Credentials 'true';
add_header Access-Control-Allow-Headers 'Authorization, Accept'; add_header Access-Control-Allow-Headers 'Authorization, Accept';
add_header Access-Control-Allow-Methods 'HEAD, GET, OPTIONS, DELETE'; add_header Access-Control-Allow-Methods 'HEAD, GET, OPTIONS, DELETE';
add_header Access-Control-Expose-Headers 'Docker-Content-Digest'; add_header Access-Control-Expose-Headers 'Docker-Content-Digest';
proxy_pass http://127.0.0.1:5000; proxy_pass http://127.0.0.1:5000;
proxy_set_header Host $http_host; # required for docker client's sake proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900; proxy_read_timeout 900;
} }
location / { location / {
# To add basic authentication to v2 use auth_basic setting. # To add basic authentication to v2 use auth_basic setting.
auth_basic "Registry realm"; auth_basic "Registry realm";
auth_basic_user_file /home/jingrow/registry/registry.htpasswd; auth_basic_user_file /home/jingrow/registry/registry.htpasswd;
proxy_pass http://127.0.0.1:6000; proxy_pass http://127.0.0.1:6000;
proxy_set_header Host $http_host; # required for docker client's sake proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900; proxy_read_timeout 900;
} }
{% elif monitor %} {% elif monitor %}
location /prometheus { location /prometheus {
auth_basic "Monitoring"; auth_basic "Monitoring";
auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd; auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd;
proxy_pass http://127.0.0.1:9090/prometheus; proxy_pass http://127.0.0.1:9090/prometheus;
proxy_read_timeout 1500; proxy_read_timeout 1500;
} }
location /alertmanager { location /alertmanager {
auth_basic "Monitoring"; auth_basic "Monitoring";
auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd; auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd;
proxy_pass http://127.0.0.1:9093/alertmanager; proxy_pass http://127.0.0.1:9093/alertmanager;
} }
location /blackbox { location /blackbox {
auth_basic "Monitoring"; auth_basic "Monitoring";
auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd; auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd;
proxy_pass http://127.0.0.1:9115/blackbox; proxy_pass http://127.0.0.1:9115/blackbox;
} }
location /grafana { location /grafana {
auth_basic "Grafana UI"; auth_basic "Grafana UI";
auth_basic_user_file /home/jingrow/agent/nginx/grafana-ui.htpasswd; auth_basic_user_file /home/jingrow/agent/nginx/grafana-ui.htpasswd;
proxy_pass http://127.0.0.1:3000/grafana; proxy_pass http://127.0.0.1:3000/grafana;
location /grafana/metrics { location /grafana/metrics {
return 307 https://$host/metrics/grafana; return 307 https://$host/metrics/grafana;
} }
# Proxy Grafana Live WebSocket connections. # Proxy Grafana Live WebSocket connections.
location /grafana/api/live/ { location /grafana/api/live/ {
rewrite ^/grafana/(.*) /$1 break; rewrite ^/grafana/(.*) /$1 break;
proxy_http_version 1.1; proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade; proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $http_host; proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:3000/grafana; proxy_pass http://127.0.0.1:3000/grafana;
} }
} }
location / { location / {
return 307 https://$host/grafana; return 307 https://$host/grafana;
} }
{% elif log %} {% elif log %}
location /kibana/ { location /kibana/ {
auth_basic "Kibana"; auth_basic "Kibana";
auth_basic_user_file /home/jingrow/agent/nginx/kibana.htpasswd; auth_basic_user_file /home/jingrow/agent/nginx/kibana.htpasswd;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";
proxy_set_header host $host; proxy_set_header host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port; proxy_set_header X-Forwarded-Port $server_port;
proxy_pass http://127.0.0.1:5601/; proxy_pass http://127.0.0.1:5601/;
} }
location /elasticsearch/ { location /elasticsearch/ {
auth_basic "Elasticsearch"; auth_basic "Elasticsearch";
auth_basic_user_file /home/jingrow/agent/nginx/kibana.htpasswd; auth_basic_user_file /home/jingrow/agent/nginx/kibana.htpasswd;
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";
proxy_set_header host $host; proxy_set_header host $host;
proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host; proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port; proxy_set_header X-Forwarded-Port $server_port;
proxy_pass http://127.0.0.1:9200/; proxy_pass http://127.0.0.1:9200/;
} }
location / { location / {
return 307 https://$host/kibana; return 307 https://$host/kibana;
} }
{% elif analytics %} {% elif analytics %}
location / { location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://127.0.0.1:8000/; proxy_pass http://127.0.0.1:8000/;
} }
{% elif trace %} {% elif trace %}
location / { location / {
proxy_buffer_size 32k; proxy_buffer_size 32k;
proxy_buffers 8 16k; proxy_buffers 8 16k;
proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-For $remote_addr;
proxy_pass http://127.0.0.1:9000/; proxy_pass http://127.0.0.1:9000/;
} }
{% else %} {% else %}
location / { location / {
root {{ pages_directory }}; root {{ pages_directory }};
try_files /home.html /dev/null; try_files /home.html /dev/null;
} }
{% endif %} {% endif %}
} }

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,22 @@
from setuptools import find_packages, setup from setuptools import find_packages, setup
with open("requirements.txt") as f: with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n") install_requires = f.read().strip().split("\n")
setup( setup(
name="agent", name="agent",
version="0.0.0", version="0.0.0",
description="Jingrow Jcloud Agent", description="Jingrow Jcloud Agent",
url="http://git.jingrow.com:3000/jingrow/agent", url="http://git.jingrow.com/jingrow/agent",
author="Jingrow Technologies", author="Jingrow Technologies",
author_email="developers@framework.jingrow.com", author_email="developers@framework.jingrow.com",
packages=find_packages(), packages=find_packages(),
zip_safe=False, zip_safe=False,
install_requires=install_requires, install_requires=install_requires,
entry_points={ entry_points={
"console_scripts": [ "console_scripts": [
"agent = agent.cli:cli", "agent = agent.cli:cli",
], ],
}, },
) )