更新jingrow仓库地址
Some checks failed
Agent Tests / Lint and Format (push) Has been cancelled
Agent Tests / Unit Tests (push) Has been cancelled

This commit is contained in:
jingrow 2025-08-21 18:08:53 +08:00
parent f9ac97e7fc
commit 9ca8014177
8 changed files with 2979 additions and 2979 deletions

278
.gitignore vendored
View File

@ -1,140 +1,140 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. git.jingrow.com:3000/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# VSCode
.vscode/
# PyCharm
.idea/
# Vim
.vim/
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# PEP 582; used by e.g. git.jingrow.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# VSCode
.vscode/
# PyCharm
.idea/
# Vim
.vim/
.DS_Store

View File

@ -1,27 +1,27 @@
exclude: 'node_modules|.git'
default_stages: [commit]
fail_fast: false
repos:
- repo: http://git.jingrow.com:3000/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: debug-statements
- id: trailing-whitespace
files: 'agent.*'
exclude: '.*json$|.*txt$|.*csv|.*md|.*svg'
- id: check-merge-conflict
- id: check-ast
- id: check-json
- id: check-toml
- id: check-yaml
- repo: http://git.jingrow.com:3000/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.5
hooks:
# Run the linter.
- id: ruff
args: [--fix]
# Run the formatter.
- id: ruff-format
exclude: 'node_modules|.git'
default_stages: [commit]
fail_fast: false
repos:
- repo: http://git.jingrow.com/pre-commit/pre-commit-hooks
rev: v4.4.0
hooks:
- id: debug-statements
- id: trailing-whitespace
files: 'agent.*'
exclude: '.*json$|.*txt$|.*csv|.*md|.*svg'
- id: check-merge-conflict
- id: check-ast
- id: check-json
- id: check-toml
- id: check-yaml
- repo: http://git.jingrow.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.6.5
hooks:
# Run the linter.
- id: ruff
args: [--fix]
# Run the formatter.
- id: ruff-format

View File

@ -1,41 +1,41 @@
# Agent
## Installation
```
mkdir agent && cd agent
git clone http://git.jingrow.com:3000/jingrow/agent repo
virtualenv env
source env/bin/activate
pip install -e ./repo
cp repo/redis.conf .
cp repo/Procfile .
```
## Running
```
honcho start
```
## CLI
Agent has a CLI
([ref](http://git.jingrow.com:3000/jingrow/agent/blob/master/agent/cli.py)). You can
access this by activating the env:
```bash
# Path to your agent's Python env might be different
source ./agent/env/bin/activate
agent --help
```
Once you have activated the env, you can access the iPython console:
```bash
agent console
```
This should have the server object instantiated if it was able to find the
`config.json` file. If not you can specify the path (check `agent console --help`).
# Agent
## Installation
```
mkdir agent && cd agent
git clone http://git.jingrow.com/jingrow/agent repo
virtualenv env
source env/bin/activate
pip install -e ./repo
cp repo/redis.conf .
cp repo/Procfile .
```
## Running
```
honcho start
```
## CLI
Agent has a CLI
([ref](http://git.jingrow.com/jingrow/agent/blob/master/agent/cli.py)). You can
access this by activating the env:
```bash
# Path to your agent's Python env might be different
source ./agent/env/bin/activate
agent --help
```
Once you have activated the env, you can access the iPython console:
```bash
agent console
```
This should have the server object instantiated if it was able to find the
`config.json` file. If not you can specify the path (check `agent console --help`).

File diff suppressed because it is too large Load Diff

View File

@ -1,259 +1,259 @@
from __future__ import annotations
# Code below copied mostly verbatim from jcloud, this is tentative and
# will be removed once build code has been moved out of jcloud.
#
# Primary source:
# http://git.jingrow.com:3000/jingrow/jcloud/blob/40859becf2976a3b6a5ac0ff79e2dff8cd2c46af/jcloud/jcloud/doctype/deploy_candidate/cache_utils.py
import os
import platform
import random
import re
import shlex
import shutil
import subprocess
from datetime import datetime
from pathlib import Path
from textwrap import dedent
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import TypedDict
class CommandOutput(TypedDict):
cwd: str
image_tag: str
returncode: int
output: str
def copy_file_from_docker_cache(
container_source: str,
host_dest: str = ".",
cache_target: str = "/home/jingrow/.cache",
) -> CommandOutput:
"""
Function is used to copy files from docker cache i.e. `cache_target/container_source`
to the host system i.e `host_dest`.
This function is required cause cache files may be available only during docker build.
This works by:
- copy the file from mount cache (image) to another_folder (image)
- create a container from image
- copy file from another_folder (container) to host system (using docker cp)
- remove container and then image
"""
filename = Path(container_source).name
container_dest_dirpath = Path(cache_target).parent / "container_dest"
container_dest_filepath = container_dest_dirpath / filename
command = f"mkdir -p {container_dest_dirpath} && " + f"cp {container_source} {container_dest_filepath}"
output = run_command_in_docker_cache(
command,
cache_target,
False,
)
if output["returncode"] == 0:
container_id = create_container(output["image_tag"])
copy_file_from_container(
container_id,
container_dest_filepath,
Path(host_dest),
)
remove_container(container_id)
run_image_rm(output["image_tag"])
return output
def run_command_in_docker_cache(
command: str = "ls -A",
cache_target: str = "/home/jingrow/.cache",
remove_image: bool = True,
) -> CommandOutput:
"""
This function works by capturing the output of the given `command`
by running it in the cache dir (`cache_target`) while building a
dummy image.
The primary purpose is to check the contents of the mounted cache. It's
an incredibly hacky way to achieve this, but afaik the only one.
Note: The `ARG CACHE_BUST=1` line is used to cause layer cache miss
while running `command` at `cache_target`. This is achieved by changing
`CACHE_BUST` value every run.
Warning: Takes time to run, use judiciously.
"""
dockerfile = get_cache_check_dockerfile(
command,
cache_target,
)
df_path = prep_dockerfile_path(dockerfile)
return run_build_command(df_path, remove_image)
def get_cache_check_dockerfile(command: str, cache_target: str) -> str:
"""
Note: Mount cache is identified by different attributes, hence it should
be the same as the Dockerfile else it will always result in a cache miss.
Ref: https://docs.docker.com/engine/reference/builder/#run---mounttypecache
"""
df = f"""
FROM ubuntu:20.04
ARG CACHE_BUST=1
WORKDIR {cache_target}
RUN --mount=type=cache,target={cache_target},uid=1000,gid=1000 {command}
"""
return dedent(df).strip()
def create_container(image_tag: str) -> str:
args = shlex.split(f"docker create --platform linux/amd64 {image_tag}")
return subprocess.run(
args,
env=os.environ.copy(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
).stdout.strip()
def copy_file_from_container(
container_id: str,
container_filepath: Path,
host_dest: Path,
):
container_source = f"{container_id}:{container_filepath}"
args = ["docker", "cp", container_source, host_dest.as_posix()]
proc = subprocess.run(
args,
env=os.environ.copy(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
if not proc.returncode:
print(f"file copied:\n- from {container_source}\n- to {host_dest.absolute().as_posix()}")
else:
print(proc.stdout)
def remove_container(container_id: str) -> str:
args = shlex.split(f"docker rm -v {container_id}")
return subprocess.run(
args,
env=os.environ.copy(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
).stdout
def prep_dockerfile_path(dockerfile: str) -> Path:
dir = Path("cache_check_dockerfile_dir")
if dir.is_dir():
shutil.rmtree(dir)
dir.mkdir()
df_path = dir / "Dockerfile"
with open(df_path, "w") as df:
df.write(dockerfile)
return df_path
def run_build_command(df_path: Path, remove_image: bool) -> CommandOutput:
command, image_tag = get_cache_check_build_command()
env = os.environ.copy()
env["DOCKER_BUILDKIT"] = "1"
env["BUILDKIT_PROGRESS"] = "plain"
output = subprocess.run(
shlex.split(command),
env=env,
cwd=df_path.parent,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
if remove_image:
run_image_rm(image_tag)
return dict(
cwd=df_path.parent.absolute().as_posix(),
image_tag=image_tag,
returncode=output.returncode,
output=strip_build_output(output.stdout),
)
def get_cache_check_build_command() -> tuple[str, str]:
command = "docker build"
if platform.machine() == "arm64" and platform.system() == "Darwin" and platform.processor() == "arm":
command += "x build --platform linux/amd64"
now_ts = datetime.timestamp(datetime.today())
command += f" --build-arg CACHE_BUST={now_ts}"
image_tag = f"cache_check:id-{random.getrandbits(40):x}"
command += f" --tag {image_tag} ."
return command, image_tag
def run_image_rm(image_tag: str):
command = f"docker image rm {image_tag}"
subprocess.run(
shlex.split(command),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def strip_build_output(stdout: str) -> str:
output = []
is_output = False
line_rx = re.compile(r"^#\d+\s\d+\.\d+\s")
done_rx = re.compile(r"^#\d+\sDONE\s\d+\.\d+s$")
for line in stdout.split("\n"):
if is_output and (m := line_rx.match(line)):
start = m.end()
output.append(line[start:])
elif is_output and done_rx.search(line):
break
elif "--mount=type=cache,target=" in line:
is_output = True
return "\n".join(output)
def get_cached_apps() -> dict[str, list[str]]:
result = run_command_in_docker_cache(
command="ls -A bench/apps",
cache_target="/home/jingrow/.cache",
)
apps = dict()
if result["returncode"] != 0:
return apps
for line in result["output"].split("\n"):
# File Name: app_name-cache_key.ext
splits = line.split("-", 1)
if len(splits) != 2:
continue
app_name, suffix = splits
suffix_splits = suffix.split(".", 1)
if len(suffix_splits) != 2 or suffix_splits[1] not in ["tar", "tgz"]:
continue
if app_name not in apps:
apps[app_name] = []
app_hash = suffix_splits[0]
apps[app_name].append(app_hash)
return apps
from __future__ import annotations
# Code below copied mostly verbatim from jcloud, this is tentative and
# will be removed once build code has been moved out of jcloud.
#
# Primary source:
# http://git.jingrow.com/jingrow/jcloud/blob/40859becf2976a3b6a5ac0ff79e2dff8cd2c46af/jcloud/jcloud/doctype/deploy_candidate/cache_utils.py
import os
import platform
import random
import re
import shlex
import shutil
import subprocess
from datetime import datetime
from pathlib import Path
from textwrap import dedent
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import TypedDict
class CommandOutput(TypedDict):
cwd: str
image_tag: str
returncode: int
output: str
def copy_file_from_docker_cache(
container_source: str,
host_dest: str = ".",
cache_target: str = "/home/jingrow/.cache",
) -> CommandOutput:
"""
Function is used to copy files from docker cache i.e. `cache_target/container_source`
to the host system i.e `host_dest`.
This function is required cause cache files may be available only during docker build.
This works by:
- copy the file from mount cache (image) to another_folder (image)
- create a container from image
- copy file from another_folder (container) to host system (using docker cp)
- remove container and then image
"""
filename = Path(container_source).name
container_dest_dirpath = Path(cache_target).parent / "container_dest"
container_dest_filepath = container_dest_dirpath / filename
command = f"mkdir -p {container_dest_dirpath} && " + f"cp {container_source} {container_dest_filepath}"
output = run_command_in_docker_cache(
command,
cache_target,
False,
)
if output["returncode"] == 0:
container_id = create_container(output["image_tag"])
copy_file_from_container(
container_id,
container_dest_filepath,
Path(host_dest),
)
remove_container(container_id)
run_image_rm(output["image_tag"])
return output
def run_command_in_docker_cache(
command: str = "ls -A",
cache_target: str = "/home/jingrow/.cache",
remove_image: bool = True,
) -> CommandOutput:
"""
This function works by capturing the output of the given `command`
by running it in the cache dir (`cache_target`) while building a
dummy image.
The primary purpose is to check the contents of the mounted cache. It's
an incredibly hacky way to achieve this, but afaik the only one.
Note: The `ARG CACHE_BUST=1` line is used to cause layer cache miss
while running `command` at `cache_target`. This is achieved by changing
`CACHE_BUST` value every run.
Warning: Takes time to run, use judiciously.
"""
dockerfile = get_cache_check_dockerfile(
command,
cache_target,
)
df_path = prep_dockerfile_path(dockerfile)
return run_build_command(df_path, remove_image)
def get_cache_check_dockerfile(command: str, cache_target: str) -> str:
"""
Note: Mount cache is identified by different attributes, hence it should
be the same as the Dockerfile else it will always result in a cache miss.
Ref: https://docs.docker.com/engine/reference/builder/#run---mounttypecache
"""
df = f"""
FROM ubuntu:20.04
ARG CACHE_BUST=1
WORKDIR {cache_target}
RUN --mount=type=cache,target={cache_target},uid=1000,gid=1000 {command}
"""
return dedent(df).strip()
def create_container(image_tag: str) -> str:
args = shlex.split(f"docker create --platform linux/amd64 {image_tag}")
return subprocess.run(
args,
env=os.environ.copy(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
).stdout.strip()
def copy_file_from_container(
container_id: str,
container_filepath: Path,
host_dest: Path,
):
container_source = f"{container_id}:{container_filepath}"
args = ["docker", "cp", container_source, host_dest.as_posix()]
proc = subprocess.run(
args,
env=os.environ.copy(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
if not proc.returncode:
print(f"file copied:\n- from {container_source}\n- to {host_dest.absolute().as_posix()}")
else:
print(proc.stdout)
def remove_container(container_id: str) -> str:
args = shlex.split(f"docker rm -v {container_id}")
return subprocess.run(
args,
env=os.environ.copy(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
).stdout
def prep_dockerfile_path(dockerfile: str) -> Path:
dir = Path("cache_check_dockerfile_dir")
if dir.is_dir():
shutil.rmtree(dir)
dir.mkdir()
df_path = dir / "Dockerfile"
with open(df_path, "w") as df:
df.write(dockerfile)
return df_path
def run_build_command(df_path: Path, remove_image: bool) -> CommandOutput:
command, image_tag = get_cache_check_build_command()
env = os.environ.copy()
env["DOCKER_BUILDKIT"] = "1"
env["BUILDKIT_PROGRESS"] = "plain"
output = subprocess.run(
shlex.split(command),
env=env,
cwd=df_path.parent,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
)
if remove_image:
run_image_rm(image_tag)
return dict(
cwd=df_path.parent.absolute().as_posix(),
image_tag=image_tag,
returncode=output.returncode,
output=strip_build_output(output.stdout),
)
def get_cache_check_build_command() -> tuple[str, str]:
command = "docker build"
if platform.machine() == "arm64" and platform.system() == "Darwin" and platform.processor() == "arm":
command += "x build --platform linux/amd64"
now_ts = datetime.timestamp(datetime.today())
command += f" --build-arg CACHE_BUST={now_ts}"
image_tag = f"cache_check:id-{random.getrandbits(40):x}"
command += f" --tag {image_tag} ."
return command, image_tag
def run_image_rm(image_tag: str):
command = f"docker image rm {image_tag}"
subprocess.run(
shlex.split(command),
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
)
def strip_build_output(stdout: str) -> str:
output = []
is_output = False
line_rx = re.compile(r"^#\d+\s\d+\.\d+\s")
done_rx = re.compile(r"^#\d+\sDONE\s\d+\.\d+s$")
for line in stdout.split("\n"):
if is_output and (m := line_rx.match(line)):
start = m.end()
output.append(line[start:])
elif is_output and done_rx.search(line):
break
elif "--mount=type=cache,target=" in line:
is_output = True
return "\n".join(output)
def get_cached_apps() -> dict[str, list[str]]:
result = run_command_in_docker_cache(
command="ls -A bench/apps",
cache_target="/home/jingrow/.cache",
)
apps = dict()
if result["returncode"] != 0:
return apps
for line in result["output"].split("\n"):
# File Name: app_name-cache_key.ext
splits = line.split("-", 1)
if len(splits) != 2:
continue
app_name, suffix = splits
suffix_splits = suffix.split(".", 1)
if len(suffix_splits) != 2 or suffix_splits[1] not in ["tar", "tgz"]:
continue
if app_name not in apps:
apps[app_name] = []
app_hash = suffix_splits[0]
apps[app_name].append(app_hash)
return apps

View File

@ -1,301 +1,301 @@
## Set a variable to help us decide if we need to add the
## 'Docker-Distribution-Api-Version' header.
## The registry always sets this header.
## In the case of nginx performing auth, the header is unset
## since nginx is auth-ing before proxying.
map $upstream_http_docker_distribution_api_version $docker_distribution_api_version {
'' 'registry/2.0';
}
## this is required to proxy Grafana Live WebSocket connections
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 443 ssl http2;
server_name {{ name }};
ssl_certificate {{ tls_directory }}/fullchain.pem;
ssl_certificate_key {{ tls_directory }}/privkey.pem;
ssl_trusted_certificate {{ tls_directory }}/chain.pem;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
ssl_protocols {{ tls_protocols or 'TLSv1.3' }};
ssl_prefer_server_ciphers off;
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 8.8.8.8 8.8.4.4 208.67.222.222 208.67.220.220 valid=60s;
resolver_timeout 2s;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (http://git.jingrow.com:3000/moby/moby/issues/1486)
chunked_transfer_encoding on;
# Allow jcloud signup pages to check browser-proxy latency
{% if jcloud_url -%}
more_set_headers "Access-Control-Allow-Origin: {{ jcloud_url }}";
{%- endif %}
location /agent/ {
proxy_http_version 1.1;
proxy_cache_bypass $http_upgrade;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
location /agent/benches/metrics {
return 301 /metrics/rq;
}
proxy_pass http://127.0.0.1:{{ web_port }}/;
}
{% if nginx_vts_module_enabled %}
location /status {
auth_basic "NGINX VTS";
auth_basic_user_file {{ nginx_directory }}/monitoring.htpasswd;
vhost_traffic_status_display;
vhost_traffic_status_display_format html;
}
{% endif %}
location /metrics {
auth_basic "Prometheus";
auth_basic_user_file {{ nginx_directory }}/monitoring.htpasswd;
location /metrics/node {
proxy_pass http://127.0.0.1:9100/metrics;
}
location /metrics/docker {
proxy_pass http://127.0.0.1:9323/metrics;
}
location /metrics/cadvisor {
proxy_pass http://127.0.0.1:9338/metrics;
}
{% if nginx_vts_module_enabled %}
location /metrics/nginx {
vhost_traffic_status_display;
vhost_traffic_status_display_format prometheus;
}
{% endif %}
location /metrics/mariadb {
proxy_pass http://127.0.0.1:9104/metrics;
}
location /metrics/mariadb_proxy {
proxy_pass http://127.0.0.1:9104/metrics;
}
location /metrics/gunicorn {
proxy_pass http://127.0.0.1:9102/metrics;
}
location /metrics/registry {
proxy_pass http://127.0.0.1:5001/metrics;
}
location /metrics/prometheus {
proxy_pass http://127.0.0.1:9090/prometheus/metrics;
}
location /metrics/alertmanager {
proxy_pass http://127.0.0.1:9093/alertmanager/metrics;
}
location /metrics/blackbox {
proxy_pass http://127.0.0.1:9115/blackbox/metrics;
}
location /metrics/grafana {
proxy_pass http://127.0.0.1:3000/grafana/metrics;
}
location /metrics/proxysql {
proxy_pass http://127.0.0.1:6070/metrics;
}
location /metrics/elasticsearch {
proxy_pass http://127.0.0.1:9114/metrics;
}
location /metrics/rq {
proxy_pass http://127.0.0.1:{{ web_port }}/benches/metrics;
}
}
{% if registry %}
location /v2/ {
# Do not allow connections from docker 1.5 and earlier
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
return 404;
}
# To add basic authentication to v2 use auth_basic setting.
auth_basic "Registry realm";
auth_basic_user_file /home/jingrow/registry/registry.htpasswd;
## If $docker_distribution_api_version is empty, the header is not added.
## See the map directive above where this variable is defined.
add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always;
add_header Access-Control-Allow-Origin '*';
add_header Access-Control-Allow-Credentials 'true';
add_header Access-Control-Allow-Headers 'Authorization, Accept';
add_header Access-Control-Allow-Methods 'HEAD, GET, OPTIONS, DELETE';
add_header Access-Control-Expose-Headers 'Docker-Content-Digest';
proxy_pass http://127.0.0.1:5000;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;
}
location / {
# To add basic authentication to v2 use auth_basic setting.
auth_basic "Registry realm";
auth_basic_user_file /home/jingrow/registry/registry.htpasswd;
proxy_pass http://127.0.0.1:6000;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;
}
{% elif monitor %}
location /prometheus {
auth_basic "Monitoring";
auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd;
proxy_pass http://127.0.0.1:9090/prometheus;
proxy_read_timeout 1500;
}
location /alertmanager {
auth_basic "Monitoring";
auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd;
proxy_pass http://127.0.0.1:9093/alertmanager;
}
location /blackbox {
auth_basic "Monitoring";
auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd;
proxy_pass http://127.0.0.1:9115/blackbox;
}
location /grafana {
auth_basic "Grafana UI";
auth_basic_user_file /home/jingrow/agent/nginx/grafana-ui.htpasswd;
proxy_pass http://127.0.0.1:3000/grafana;
location /grafana/metrics {
return 307 https://$host/metrics/grafana;
}
# Proxy Grafana Live WebSocket connections.
location /grafana/api/live/ {
rewrite ^/grafana/(.*) /$1 break;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:3000/grafana;
}
}
location / {
return 307 https://$host/grafana;
}
{% elif log %}
location /kibana/ {
auth_basic "Kibana";
auth_basic_user_file /home/jingrow/agent/nginx/kibana.htpasswd;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_pass http://127.0.0.1:5601/;
}
location /elasticsearch/ {
auth_basic "Elasticsearch";
auth_basic_user_file /home/jingrow/agent/nginx/kibana.htpasswd;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_pass http://127.0.0.1:9200/;
}
location / {
return 307 https://$host/kibana;
}
{% elif analytics %}
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://127.0.0.1:8000/;
}
{% elif trace %}
location / {
proxy_buffer_size 32k;
proxy_buffers 8 16k;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_pass http://127.0.0.1:9000/;
}
{% else %}
location / {
root {{ pages_directory }};
try_files /home.html /dev/null;
}
{% endif %}
}
## Set a variable to help us decide if we need to add the
## 'Docker-Distribution-Api-Version' header.
## The registry always sets this header.
## In the case of nginx performing auth, the header is unset
## since nginx is auth-ing before proxying.
map $upstream_http_docker_distribution_api_version $docker_distribution_api_version {
'' 'registry/2.0';
}
## this is required to proxy Grafana Live WebSocket connections
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
listen 443 ssl http2;
server_name {{ name }};
ssl_certificate {{ tls_directory }}/fullchain.pem;
ssl_certificate_key {{ tls_directory }}/privkey.pem;
ssl_trusted_certificate {{ tls_directory }}/chain.pem;
ssl_session_timeout 1d;
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
ssl_protocols {{ tls_protocols or 'TLSv1.3' }};
ssl_prefer_server_ciphers off;
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 8.8.8.8 8.8.4.4 208.67.222.222 208.67.220.220 valid=60s;
resolver_timeout 2s;
# disable any limits to avoid HTTP 413 for large image uploads
client_max_body_size 0;
# required to avoid HTTP 411: see Issue #1486 (http://git.jingrow.com/moby/moby/issues/1486)
chunked_transfer_encoding on;
# Allow jcloud signup pages to check browser-proxy latency
{% if jcloud_url -%}
more_set_headers "Access-Control-Allow-Origin: {{ jcloud_url }}";
{%- endif %}
location /agent/ {
proxy_http_version 1.1;
proxy_cache_bypass $http_upgrade;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
location /agent/benches/metrics {
return 301 /metrics/rq;
}
proxy_pass http://127.0.0.1:{{ web_port }}/;
}
{% if nginx_vts_module_enabled %}
location /status {
auth_basic "NGINX VTS";
auth_basic_user_file {{ nginx_directory }}/monitoring.htpasswd;
vhost_traffic_status_display;
vhost_traffic_status_display_format html;
}
{% endif %}
location /metrics {
auth_basic "Prometheus";
auth_basic_user_file {{ nginx_directory }}/monitoring.htpasswd;
location /metrics/node {
proxy_pass http://127.0.0.1:9100/metrics;
}
location /metrics/docker {
proxy_pass http://127.0.0.1:9323/metrics;
}
location /metrics/cadvisor {
proxy_pass http://127.0.0.1:9338/metrics;
}
{% if nginx_vts_module_enabled %}
location /metrics/nginx {
vhost_traffic_status_display;
vhost_traffic_status_display_format prometheus;
}
{% endif %}
location /metrics/mariadb {
proxy_pass http://127.0.0.1:9104/metrics;
}
location /metrics/mariadb_proxy {
proxy_pass http://127.0.0.1:9104/metrics;
}
location /metrics/gunicorn {
proxy_pass http://127.0.0.1:9102/metrics;
}
location /metrics/registry {
proxy_pass http://127.0.0.1:5001/metrics;
}
location /metrics/prometheus {
proxy_pass http://127.0.0.1:9090/prometheus/metrics;
}
location /metrics/alertmanager {
proxy_pass http://127.0.0.1:9093/alertmanager/metrics;
}
location /metrics/blackbox {
proxy_pass http://127.0.0.1:9115/blackbox/metrics;
}
location /metrics/grafana {
proxy_pass http://127.0.0.1:3000/grafana/metrics;
}
location /metrics/proxysql {
proxy_pass http://127.0.0.1:6070/metrics;
}
location /metrics/elasticsearch {
proxy_pass http://127.0.0.1:9114/metrics;
}
location /metrics/rq {
proxy_pass http://127.0.0.1:{{ web_port }}/benches/metrics;
}
}
{% if registry %}
location /v2/ {
# Do not allow connections from docker 1.5 and earlier
# docker pre-1.6.0 did not properly set the user agent on ping, catch "Go *" user agents
if ($http_user_agent ~ "^(docker\/1\.(3|4|5(?!\.[0-9]-dev))|Go ).*$" ) {
return 404;
}
# To add basic authentication to v2 use auth_basic setting.
auth_basic "Registry realm";
auth_basic_user_file /home/jingrow/registry/registry.htpasswd;
## If $docker_distribution_api_version is empty, the header is not added.
## See the map directive above where this variable is defined.
add_header 'Docker-Distribution-Api-Version' $docker_distribution_api_version always;
add_header Access-Control-Allow-Origin '*';
add_header Access-Control-Allow-Credentials 'true';
add_header Access-Control-Allow-Headers 'Authorization, Accept';
add_header Access-Control-Allow-Methods 'HEAD, GET, OPTIONS, DELETE';
add_header Access-Control-Expose-Headers 'Docker-Content-Digest';
proxy_pass http://127.0.0.1:5000;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;
}
location / {
# To add basic authentication to v2 use auth_basic setting.
auth_basic "Registry realm";
auth_basic_user_file /home/jingrow/registry/registry.htpasswd;
proxy_pass http://127.0.0.1:6000;
proxy_set_header Host $http_host; # required for docker client's sake
proxy_set_header X-Real-IP $remote_addr; # pass on real client's IP
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_read_timeout 900;
}
{% elif monitor %}
location /prometheus {
auth_basic "Monitoring";
auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd;
proxy_pass http://127.0.0.1:9090/prometheus;
proxy_read_timeout 1500;
}
location /alertmanager {
auth_basic "Monitoring";
auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd;
proxy_pass http://127.0.0.1:9093/alertmanager;
}
location /blackbox {
auth_basic "Monitoring";
auth_basic_user_file /home/jingrow/agent/nginx/grafana.htpasswd;
proxy_pass http://127.0.0.1:9115/blackbox;
}
location /grafana {
auth_basic "Grafana UI";
auth_basic_user_file /home/jingrow/agent/nginx/grafana-ui.htpasswd;
proxy_pass http://127.0.0.1:3000/grafana;
location /grafana/metrics {
return 307 https://$host/metrics/grafana;
}
# Proxy Grafana Live WebSocket connections.
location /grafana/api/live/ {
rewrite ^/grafana/(.*) /$1 break;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header Host $http_host;
proxy_pass http://127.0.0.1:3000/grafana;
}
}
location / {
return 307 https://$host/grafana;
}
{% elif log %}
location /kibana/ {
auth_basic "Kibana";
auth_basic_user_file /home/jingrow/agent/nginx/kibana.htpasswd;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_pass http://127.0.0.1:5601/;
}
location /elasticsearch/ {
auth_basic "Elasticsearch";
auth_basic_user_file /home/jingrow/agent/nginx/kibana.htpasswd;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
proxy_pass http://127.0.0.1:9200/;
}
location / {
return 307 https://$host/kibana;
}
{% elif analytics %}
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://127.0.0.1:8000/;
}
{% elif trace %}
location / {
proxy_buffer_size 32k;
proxy_buffers 8 16k;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_pass http://127.0.0.1:9000/;
}
{% else %}
location / {
root {{ pages_directory }};
try_files /home.html /dev/null;
}
{% endif %}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,22 @@
from setuptools import find_packages, setup
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
setup(
name="agent",
version="0.0.0",
description="Jingrow Jcloud Agent",
url="http://git.jingrow.com:3000/jingrow/agent",
author="Jingrow Technologies",
author_email="developers@framework.jingrow.com",
packages=find_packages(),
zip_safe=False,
install_requires=install_requires,
entry_points={
"console_scripts": [
"agent = agent.cli:cli",
],
},
)
from setuptools import find_packages, setup
with open("requirements.txt") as f:
install_requires = f.read().strip().split("\n")
setup(
name="agent",
version="0.0.0",
description="Jingrow Jcloud Agent",
url="http://git.jingrow.com/jingrow/agent",
author="Jingrow Technologies",
author_email="developers@framework.jingrow.com",
packages=find_packages(),
zip_safe=False,
install_requires=install_requires,
entry_points={
"console_scripts": [
"agent = agent.cli:cli",
],
},
)