Compare commits

..

No commits in common. "main" and "erc721-stack-syntax" have entirely different histories.

256 changed files with 493 additions and 7698 deletions

View File

@ -1,27 +0,0 @@
name: Fixturenet-Eth-Test
on:
push:
branches: 'ci-test'
jobs:
test:
name: "Run an Ethereum fixturenet test"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: cerc-io/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run fixturenet-eth tests"
run: ./tests/fixturenet-eth/run-test.sh

View File

@ -1,46 +0,0 @@
name: Publish
on:
push:
branches:
- main
- publish-test
jobs:
publish:
name: "Build and publish"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Get build info"
id: build-info
run: |
build_tag=$(./scripts/create_build_tag_file.sh)
echo "build-tag=v${build_tag}" >> $GITHUB_OUTPUT
- name: "Install Python"
uses: cerc-io/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Build local shiv package"
id: build
run: |
./scripts/build_shiv_package.sh
result_code=$?
echo "package-file=$(ls ./package/*)" >> $GITHUB_OUTPUT
exit $result_code
- name: "Stage artifact file"
run: |
cp ${{ steps.build.outputs.package-file }} ./laconic-so
- name: "Create release"
uses: cerc-io/action-gh-release@gitea-v1
with:
tag_name: ${{ steps.build-info.outputs.build-tag }}
# On the publish test branch, mark our release as a draft
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
draft: ${{ endsWith('publish-test', github.ref ) }}
files: ./laconic-so

View File

@ -1,39 +0,0 @@
name: Deploy Test
on:
pull_request:
branches: '*'
push:
branches:
- main
- ci-test
# Needed until we can incorporate docker startup into the executor container
env:
DOCKER_HOST: unix:///var/run/dind.sock
jobs:
test:
name: "Run deploy test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: cerc-io/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: Start dockerd # Also needed until we can incorporate into the executor
run: |
dockerd -H $DOCKER_HOST --userland-proxy=false &
sleep 5
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh

View File

@ -1,39 +0,0 @@
name: Smoke Test
on:
pull_request:
branches: '*'
push:
branches:
- main
- ci-test
# Needed until we can incorporate docker startup into the executor container
env:
DOCKER_HOST: unix:///var/run/dind.sock
jobs:
test:
name: "Run basic test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: cerc-io/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: Start dockerd # Also needed until we can incorporate into the executor
run: |
dockerd -H $DOCKER_HOST --userland-proxy=false &
sleep 5
- name: "Run smoke tests"
run: ./tests/smoke-test/run-smoke-test.sh

View File

@ -1,46 +0,0 @@
name: Publish
on:
push:
branches:
- main
- publish-test
jobs:
publish:
name: "Build and publish"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Get build info"
id: build-info
run: |
build_tag=$(./scripts/create_build_tag_file.sh)
echo "build-tag=v${build_tag}" >> $GITHUB_OUTPUT
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Build local shiv package"
id: build
run: |
./scripts/build_shiv_package.sh
result_code=$?
echo "package-file=$(ls ./package/*)" >> $GITHUB_OUTPUT
exit $result_code
- name: "Stage artifact file"
run: |
cp ${{ steps.build.outputs.package-file }} ./laconic-so
- name: "Create release"
uses: softprops/action-gh-release@v1
with:
tag_name: ${{ steps.build-info.outputs.build-tag }}
# On the publish test branch, mark our release as a draft
# Hack using endsWith to workaround Gitea sometimes sending "publish-test" vs "refs/heads/publish-test"
draft: ${{ endsWith('publish-test', github.ref ) }}
files: ./laconic-so

View File

@ -1,29 +0,0 @@
name: Deploy Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run deploy test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run deploy tests"
run: ./tests/deploy/run-deploy-test.sh

View File

@ -1,29 +0,0 @@
name: Smoke Test
on:
pull_request:
branches: '*'
push:
branches: '*'
jobs:
test:
name: "Run basic test suite"
runs-on: ubuntu-latest
steps:
- name: "Clone project repository"
uses: actions/checkout@v3
- name: "Install Python"
uses: actions/setup-python@v4
with:
python-version: '3.8'
- name: "Print Python version"
run: python3 --version
- name: "Install shiv"
run: pip install shiv
- name: "Generate build version file"
run: ./scripts/create_build_tag_file.sh
- name: "Build local shiv package"
run: ./scripts/build_shiv_package.sh
- name: "Run smoke tests"
run: ./tests/smoke-test/run-smoke-test.sh

4
.gitignore vendored
View File

@ -5,6 +5,4 @@ laconic-so
laconic_stack_orchestrator.egg-info
__pycache__
*~
package
app/data/build_tag.txt
build

View File

@ -1,25 +1,18 @@
# Stack Orchestrator
Stack Orchestrator allows building and deployment of a Laconic Stack on a single machine with minimial prerequisites. It is a Python3 CLI tool that runs on any OS with Python3 and Docker. The following diagram summarizes the relevant repositories in the Laconic Stack - and the relationship to Stack Orchestrator.
![The Stack](/docs/images/laconic-stack.png)
## Install
**To get started quickly** on a fresh Ubuntu instance (e.g, Digital Ocean); [try this script](./scripts/quick-install-ubuntu.sh). **WARNING:** always review scripts prior to running them so that you know what is happening on your machine.
For any other installation, follow along below and **adapt these instructions based on the specifics of your system.**
Ensure that the following are already installed:
- [Python3](https://wiki.python.org/moin/BeginnersGuide/Download): `python3 --version` >= `3.8.10` (the Python3 shipped in Ubuntu 20+ is good to go)
- [Python3](https://wiki.python.org/moin/BeginnersGuide/Download): `python3 --version` >= `3.10.8`
- [Docker](https://docs.docker.com/get-docker/): `docker --version` >= `20.10.21`
- [jq](https://stedolan.github.io/jq/download/): `jq --version` >= `1.5`
- [Docker Compose](https://docs.docker.com/compose/install/): `docker-compose --version` >= `2.13.0`
Note: if installing docker-compose via package manager on Linux (as opposed to Docker Desktop), you must [install the plugin](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually), e.g. :
Note: if installing docker-compose via package manager (as opposed to Docker Desktop), you must [install the plugin](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually), e.g., on Linux:
```bash
mkdir -p ~/.docker/cli-plugins
@ -27,38 +20,80 @@ curl -SL https://github.com/docker/compose/releases/download/v2.11.2/docker-comp
chmod +x ~/.docker/cli-plugins/docker-compose
```
Next decide on a directory where you would like to put the stack-orchestrator program. Typically this would be
a "user" binary directory such as `~/bin` or perhaps `/usr/local/laconic` or possibly just the current working directory.
Now, having selected that directory, download the latest release from [this page](https://github.com/cerc-io/stack-orchestrator/tags) into it (we're using `~/bin` below for concreteness but edit to suit if you selected a different directory). Also be sure that the destination directory exists and is writable:
Next, download the latest release from [this page](https://github.com/cerc-io/stack-orchestrator/tags), into a suitable directory (e.g. `~/bin`):
```bash
curl -L -o ~/bin/laconic-so https://github.com/cerc-io/stack-orchestrator/releases/latest/download/laconic-so
```
Give it execute permissions:
Give it permissions:
```bash
chmod +x ~/bin/laconic-so
```
Ensure `laconic-so` is on the [`PATH`](https://unix.stackexchange.com/a/26059)
Verify operation (your version will probably be different, just check here that you see some version output and not an error):
Verify operation:
```
laconic-so version
Version: 1.1.0-7a607c2-202304260513
laconic-so --help
Usage: python -m laconic-so [OPTIONS] COMMAND [ARGS]...
Laconic Stack Orchestrator
Options:
--quiet
--verbose
--dry-run
--local-stack
-h, --help Show this message and exit.
Commands:
build-containers build the set of containers required for a complete...
build-npms build the set of npm packages required for a...
deploy-system deploy a stack
setup-repositories git clone the set of repositories required to build...
```
## Usage
The various [stacks](/app/data/stacks) each contain instructions for running different stacks based on your use case. For example:
Three sub-commands: `setup-repositories`, `build-containers` and `deploy-system` are generally run in order. The following is a slim example for standing up the `erc20-watcher`. Go further with the [erc20 watcher demo](/app/data/stacks/erc20) and other pieces of the stack, within the [`stacks` directory](/app/data/stacks).
- [self-hosted Gitea](/app/data/stacks/build-support)
- [an Optimism Fixturenet](/app/data/stacks/fixturenet-optimism)
- [laconicd with console and CLI](app/data/stacks/fixturenet-laconic-loaded)
- [kubo (IPFS)](app/data/stacks/kubo)
### Setup Repositories
Clone the set of git repositories necessary to build a system:
```bash
laconic-so --verbose setup-repositories --include cerc-io/go-ethereum,cerc-io/ipld-eth-db,cerc-io/ipld-eth-server,cerc-io/watcher-ts
```
This will default to `~/cerc` or - if set - the environment variable `CERC_REPO_BASE_DIR`
### Build Containers
Build the set of docker container images required to run a system. It takes around 10 minutes to build all the containers from scratch.
```bash
laconic-so --verbose build-containers --include cerc/go-ethereum,cerc/go-ethereum-foundry,cerc/ipld-eth-db,cerc/ipld-eth-server,cerc/watcher-erc20
```
### Deploy System
Uses `docker-compose` to deploy a system (with most recently built container images).
```bash
laconic-so --verbose deploy-system --include ipld-eth-db,go-ethereum-foundry,ipld-eth-server,watcher-erc20 up
```
Check out he GraphQL playground here: [http://localhost:3002/graphql](http://localhost:3002/graphql)
See the [erc20 watcher demo](/app/data/stacks/erc20) to continue further.
### Cleanup
```bash
laconic-so --verbose deploy-system --include ipld-eth-db,go-ethereum-foundry,ipld-eth-server,watcher-erc20 down
```
## Contributing
@ -68,4 +103,3 @@ See the [CONTRIBUTING.md](/docs/CONTRIBUTING.md) for developer mode install.
Native aarm64 is _not_ currently supported. x64 emulation on ARM64 macos should work (not yet tested).

View File

@ -69,3 +69,9 @@ class package_registry_stack(base_stack):
def get_url(self):
return self.url
# Temporary helper functions while we figure out a good interface to the stack deploy code
def _get_stack_mapped_port(stack, service, exposed_port):
return 3000

View File

@ -36,16 +36,13 @@ from .util import include_exclude_check, get_parsed_stack_config
@click.command()
@click.option('--include', help="only build these containers")
@click.option('--exclude', help="don\'t build these containers")
@click.option("--force-rebuild", is_flag=True, default=False, help="Override dependency checking -- always rebuild")
@click.option("--extra-build-args", help="Supply extra arguments to build")
@click.pass_context
def command(ctx, include, exclude, force_rebuild, extra_build_args):
def command(ctx, include, exclude):
'''build the set of containers required for a complete stack'''
quiet = ctx.obj.quiet
verbose = ctx.obj.verbose
dry_run = ctx.obj.dry_run
debug = ctx.obj.debug
local_stack = ctx.obj.local_stack
stack = ctx.obj.stack
continue_on_error = ctx.obj.continue_on_error
@ -84,20 +81,10 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
# TODO: make this configurable
container_build_env = {
"CERC_NPM_REGISTRY_URL": config("CERC_NPM_REGISTRY_URL", default="http://gitea.local:3000/api/packages/cerc-io/npm/"),
"CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default=""),
"CERC_REPO_BASE_DIR": dev_root_path,
"CERC_CONTAINER_BASE_DIR": container_build_dir,
"CERC_HOST_UID": f"{os.getuid()}",
"CERC_HOST_GID": f"{os.getgid()}",
"DOCKER_BUILDKIT": config("DOCKER_BUILDKIT", default="0")
"CERC_NPM_URL": "http://gitea.local:3000/api/packages/cerc-io/npm/",
"CERC_NPM_AUTH_TOKEN": config("CERC_NPM_AUTH_TOKEN", default="<token-not-supplied>"),
"CERC_REPO_BASE_DIR": dev_root_path
}
container_build_env.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
container_build_env.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
container_build_env.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
docker_host_env = os.getenv("DOCKER_HOST")
if docker_host_env:
container_build_env.update({"DOCKER_HOST": docker_host_env})
def process_container(container):
if not quiet:
@ -115,11 +102,11 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
# TODO: make this less of a hack -- should be specified in some metadata somewhere
# Check if we have a repo for this container. If not, set the context dir to the container-build subdir
repo_full_path = os.path.join(dev_root_path, repo_dir)
repo_dir_or_build_dir = repo_full_path if os.path.exists(repo_full_path) else build_dir
repo_dir_or_build_dir = repo_dir if os.path.exists(repo_full_path) else build_dir
build_command = os.path.join(container_build_dir, "default-build.sh") + f" {container}:local {repo_dir_or_build_dir}"
if not dry_run:
if verbose:
print(f"Executing: {build_command} with environment: {container_build_env}")
print(f"Executing: {build_command}")
build_result = subprocess.run(build_command, shell=True, env=container_build_env)
if verbose:
print(f"Return code is: {build_result.returncode}")

View File

@ -33,10 +33,8 @@ builder_js_image_name = "cerc/builder-js:local"
@click.command()
@click.option('--include', help="only build these packages")
@click.option('--exclude', help="don\'t build these packages")
@click.option("--force-rebuild", is_flag=True, default=False, help="Override existing target package version check -- force rebuild")
@click.option("--extra-build-args", help="Supply extra arguments to build")
@click.pass_context
def command(ctx, include, exclude, force_rebuild, extra_build_args):
def command(ctx, include, exclude):
'''build the set of npm packages required for a complete stack'''
quiet = ctx.obj.quiet
@ -118,15 +116,7 @@ def command(ctx, include, exclude, force_rebuild, extra_build_args):
if not dry_run:
if verbose:
print(f"Executing: {build_command}")
# Originally we used the PEP 584 merge operator:
# envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
# but that isn't available in Python 3.8 (default in Ubuntu 20) so for now we use dict.update:
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token,
"LACONIC_HOSTED_CONFIG_FILE": "config-hosted.yml" # Convention used by our web app packages
}
envs.update({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
envs.update({"CERC_FORCE_REBUILD": "true"} if force_rebuild else {})
envs.update({"CERC_CONTAINER_EXTRA_BUILD_ARGS": extra_build_args} if extra_build_args else {})
envs = {"CERC_NPM_AUTH_TOKEN": npm_registry_url_token} | ({"CERC_SCRIPT_DEBUG": "true"} if debug else {})
try:
docker.run(builder_js_image_name,
remove=True,

View File

@ -2,34 +2,28 @@ version: '3.7'
services:
fixturenet-eth-bootnode-geth:
restart: always
hostname: fixturenet-eth-bootnode-geth
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
environment:
RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-geth:local
volumes:
- fixturenet_eth_bootnode_geth_data:/root/ethdata
ports:
- "9898"
- "30303"
fixturenet-eth-geth-1:
restart: always
hostname: fixturenet-eth-geth-1
cap_add:
- SYS_PTRACE
environment:
CERC_REMOTE_DEBUG: "true"
CERC_RUN_STATEDIFF: ${CERC_RUN_STATEDIFF:-detect}
CERC_RUN_STATEDIFF: "detect"
CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-eth-geth:local
volumes:
- fixturenet_eth_geth_1_data:/root/ethdata
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"]
interval: 30s
@ -44,7 +38,6 @@ services:
- "6060"
fixturenet-eth-geth-2:
restart: always
hostname: fixturenet-eth-geth-2
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "8545"]
@ -52,25 +45,19 @@ services:
timeout: 10s
retries: 10
start_period: 3s
environment:
CERC_KEEP_RUNNING_AFTER_GETH_EXIT: "true"
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-eth-geth:local
depends_on:
- fixturenet-eth-bootnode-geth
volumes:
- fixturenet_eth_geth_2_data:/root/ethdata
fixturenet-eth-bootnode-lighthouse:
restart: always
hostname: fixturenet-eth-bootnode-lighthouse
environment:
RUN_BOOTNODE: "true"
image: cerc/fixturenet-eth-lighthouse:local
fixturenet-eth-lighthouse-1:
restart: always
hostname: fixturenet-eth-lighthouse-1
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
@ -85,8 +72,6 @@ services:
ETH1_ENDPOINT: "http://fixturenet-eth-geth-1:8545"
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-1:8551"
image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_eth_lighthouse_1_data:/opt/testnet/build/cl
depends_on:
fixturenet-eth-bootnode-lighthouse:
condition: service_started
@ -96,7 +81,6 @@ services:
- "8001"
fixturenet-eth-lighthouse-2:
restart: always
hostname: fixturenet-eth-lighthouse-2
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
@ -112,17 +96,8 @@ services:
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-2:8551"
LIGHTHOUSE_GENESIS_STATE_URL: "http://fixturenet-eth-lighthouse-1:8001/eth/v2/debug/beacon/states/0"
image: cerc/fixturenet-eth-lighthouse:local
volumes:
- fixturenet_eth_lighthouse_2_data:/opt/testnet/build/cl
depends_on:
fixturenet-eth-bootnode-lighthouse:
condition: service_started
fixturenet-eth-geth-2:
condition: service_healthy
volumes:
fixturenet_eth_bootnode_geth_data:
fixturenet_eth_geth_1_data:
fixturenet_eth_geth_2_data:
fixturenet_eth_lighthouse_1_data:
fixturenet_eth_lighthouse_2_data:

View File

@ -1,8 +0,0 @@
services:
laconic-console:
restart: unless-stopped
image: cerc/laconic-console-host:local
environment:
- LACONIC_HOSTED_ENDPOINT=${LACONIC_HOSTED_ENDPOINT:-http://localhost}
ports:
- "80"

View File

@ -1,27 +1,21 @@
version: "3.2"
services:
laconicd:
restart: unless-stopped
image: cerc/laconicd:local
command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
volumes:
# The cosmos-sdk node's database directory:
- laconicd-data:/root/.laconicd/data
# TODO: look at folding these scripts into the container
# TODO: look at folding this script into the container
- ../config/fixturenet-laconicd/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
- ../config/fixturenet-laconicd/export-mykey.sh:/docker-entrypoint-scripts.d/export-mykey.sh
- ../config/fixturenet-laconicd/export-myaddress.sh:/docker-entrypoint-scripts.d/export-myaddress.sh
# TODO: determine which of the ports below is really needed
ports:
- "6060"
- "26657"
- "26656"
- "9473:9473"
- "9473"
- "8545"
- "8546"
- "9090"
- "9091"
- "1317"
cli:
image: cerc/laconic-registry-cli:local
volumes:
- ../config/fixturenet-laconicd/registry-cli-config-template.yml:/registry-cli-config-template.yml

View File

@ -1,68 +0,0 @@
version: "3.8"
services:
lotus-miner:
hostname: lotus-miner
env_file:
- ../config/fixturenet-lotus/lotus-env.env
image: cerc/lotus:local
volumes:
- ../config/fixturenet-lotus/setup-miner.sh:/docker-entrypoint-scripts.d/setup-miner.sh
- ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car
- $HOME/stack-orchestrator/app/data/config/fixturenet-lotus/genesis/.genesis-sectors:/root/.genesis-sectors
- lotus-shared:/root/.lotus-shared
healthcheck:
# test: ["CMD-SHELL", "grep 'started ChainNotify channel' /var/log/lotus.log"]
# test: ["CMD-SHELL", "[ -f /root/.lotus-shared/miner.addr ]"]
test: ["CMD-SHELL", "[ -d /root/.lotus-miner-local-net ]"]
interval: 10s
timeout: 10s
retries: 10
start_period: 60s
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-miner.sh"]
ports:
- "1234"
- "2345"
- "3456"
- "1777"
lotus-node-1:
hostname: lotus-node-1
env_file:
- ../config/fixturenet-lotus/lotus-env.env
image: cerc/lotus:local
volumes:
- ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh
- ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car
- lotus-shared:/root/.lotus-shared
depends_on:
lotus-miner:
condition: service_healthy
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"]
ports:
- "1234"
- "2345"
- "3456"
- "1777"
lotus-node-2:
hostname: lotus-node-2
env_file:
- ../config/fixturenet-lotus/lotus-env.env
image: cerc/lotus:local
volumes:
- ../config/fixturenet-lotus/setup-node.sh:/docker-entrypoint-scripts.d/setup-node.sh
- ../config/fixturenet-lotus/genesis/devgen.car:/devgen.car
- lotus-shared:/root/.lotus-shared
depends_on:
lotus-miner:
condition: service_healthy
entrypoint: ["sh", "/docker-entrypoint-scripts.d/setup-node.sh"]
ports:
- "1234"
- "2345"
- "3456"
- "1777"
volumes:
lotus-shared:

View File

@ -1,165 +0,0 @@
version: '3.7'
services:
# Generates and funds the accounts required when setting up the L2 chain (outputs to volume l2_accounts)
# Creates / updates the configuration for L1 contracts deployment
# Deploys the L1 smart contracts (outputs to volume l1_deployment)
fixturenet-optimism-contracts:
restart: on-failure
hostname: fixturenet-optimism-contracts
image: cerc/optimism-contracts:local
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_CHAIN_ID: ${CERC_L1_CHAIN_ID}
CERC_L1_RPC: ${CERC_L1_RPC}
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
CERC_L1_ADDRESS: ${CERC_L1_ADDRESS}
CERC_L1_PRIV_KEY: ${CERC_L1_PRIV_KEY}
CERC_L1_ADDRESS_2: ${CERC_L1_ADDRESS_2}
CERC_L1_PRIV_KEY_2: ${CERC_L1_PRIV_KEY_2}
# Waits for L1 endpoint to be up before running the script
command: |
"./wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- ./run.sh"
volumes:
- ../config/wait-for-it.sh:/app/packages/contracts-bedrock/wait-for-it.sh
- ../container-build/cerc-optimism-contracts/hardhat-tasks/verify-contract-deployment.ts:/app/packages/contracts-bedrock/tasks/verify-contract-deployment.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/rekey-json.ts:/app/packages/contracts-bedrock/tasks/rekey-json.ts
- ../container-build/cerc-optimism-contracts/hardhat-tasks/send-balance.ts:/app/packages/contracts-bedrock/tasks/send-balance.ts
- ../config/fixturenet-optimism/optimism-contracts/update-config.js:/app/packages/contracts-bedrock/update-config.js
- ../config/fixturenet-optimism/optimism-contracts/run.sh:/app/packages/contracts-bedrock/run.sh
- l2_accounts:/l2-accounts
- l1_deployment:/app/packages/contracts-bedrock
extra_hosts:
- "host.docker.internal:host-gateway"
# Generates the config files required for L2 (outputs to volume l2_config)
op-node-l2-config-gen:
restart: on-failure
image: cerc/optimism-op-node:local
depends_on:
fixturenet-optimism-contracts:
condition: service_completed_successfully
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC}
volumes:
- ../config/fixturenet-optimism/generate-l2-config.sh:/app/generate-l2-config.sh
- l1_deployment:/contracts-bedrock:ro
- l2_config:/app
command: ["sh", "/app/generate-l2-config.sh"]
extra_hosts:
- "host.docker.internal:host-gateway"
# Initializes and runs the L2 execution client (outputs to volume l2_geth_data)
op-geth:
restart: always
image: cerc/optimism-l2geth:local
depends_on:
op-node-l2-config-gen:
condition: service_started
volumes:
- ../config/fixturenet-optimism/run-op-geth.sh:/run-op-geth.sh
- l2_config:/op-node:ro
- l2_accounts:/l2-accounts:ro
- l2_geth_data:/datadir
entrypoint: "sh"
command: "/run-op-geth.sh"
ports:
- "0.0.0.0:8545:8545"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8545"]
interval: 30s
timeout: 10s
retries: 10
start_period: 10s
# Runs the L2 consensus client (Sequencer node)
op-node:
restart: always
image: cerc/optimism-op-node:local
depends_on:
op-geth:
condition: service_healthy
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC}
volumes:
- ../config/fixturenet-optimism/run-op-node.sh:/app/run-op-node.sh
- l2_config:/op-node-data:ro
- l2_accounts:/l2-accounts:ro
command: ["sh", "/app/run-op-node.sh"]
ports:
- "0.0.0.0:8547:8547"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost:8547"]
interval: 30s
timeout: 10s
retries: 10
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
# Runs the batcher (takes transactions from the Sequencer and publishes them to L1)
op-batcher:
restart: always
image: cerc/optimism-op-batcher:local
depends_on:
op-node:
condition: service_healthy
op-geth:
condition: service_healthy
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC}
volumes:
- ../config/wait-for-it.sh:/wait-for-it.sh
- ../config/fixturenet-optimism/run-op-batcher.sh:/run-op-batcher.sh
- l2_accounts:/l2-accounts:ro
entrypoint: ["sh", "-c"]
# Waits for L1 endpoint to be up before running the batcher
command: |
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-batcher.sh"
ports:
- "127.0.0.1:8548:8548"
extra_hosts:
- "host.docker.internal:host-gateway"
# Runs the proposer (periodically submits new state roots to L1)
op-proposer:
restart: always
image: cerc/optimism-op-proposer:local
depends_on:
op-node:
condition: service_healthy
env_file:
- ../config/fixturenet-optimism/l1-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L1_RPC: ${CERC_L1_RPC}
volumes:
- ../config/wait-for-it.sh:/wait-for-it.sh
- ../config/fixturenet-optimism/run-op-proposer.sh:/run-op-proposer.sh
- l1_deployment:/contracts-bedrock:ro
- l2_accounts:/l2-accounts:ro
entrypoint: ["sh", "-c"]
# Waits for L1 endpoint to be up before running the proposer
command: |
"/wait-for-it.sh -h ${CERC_L1_HOST:-$${DEFAULT_CERC_L1_HOST}} -p ${CERC_L1_PORT:-$${DEFAULT_CERC_L1_PORT}} -s -t 60 -- /run-op-proposer.sh"
ports:
- "127.0.0.1:8560:8560"
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
l1_deployment:
l2_accounts:
l2_config:
l2_geth_data:

View File

@ -1,129 +0,0 @@
services:
fixturenet-eth-bootnode-geth:
restart: always
hostname: fixturenet-eth-bootnode-geth
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
environment:
RUN_BOOTNODE: "true"
image: cerc/fixturenet-plugeth-plugeth:local
volumes:
- fixturenet_plugeth_bootnode_geth_data:/root/ethdata
- ../config/fixturenet-plugeth/plugins:/root/ethdata/plugins
ports:
- "9898"
- "30303"
fixturenet-eth-geth-1:
restart: always
hostname: fixturenet-eth-geth-1
cap_add:
- SYS_PTRACE
environment:
CERC_REMOTE_DEBUG: "true"
CERC_RUN_STATEDIFF: "detect"
CERC_STATEDIFF_DB_NODE_ID: 1
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-plugeth-plugeth:local
volumes:
- fixturenet_plugeth_geth_1_data:/root/ethdata
- ../config/fixturenet-plugeth/plugins:/root/ethdata/plugins
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8545/"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
depends_on:
- fixturenet-eth-bootnode-geth
ports:
- "8545"
- "40000"
- "6060"
fixturenet-eth-geth-2:
restart: always
hostname: fixturenet-eth-geth-2
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8545/"]
interval: 30s
timeout: 10s
retries: 10
start_period: 3s
environment:
CERC_KEEP_RUNNING_AFTER_GETH_EXIT: "true"
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
image: cerc/fixturenet-plugeth-plugeth:local
depends_on:
- fixturenet-eth-bootnode-geth
volumes:
- fixturenet_plugeth_geth_2_data:/root/ethdata
- ../config/fixturenet-plugeth/plugins:/root/ethdata/plugins
fixturenet-eth-bootnode-lighthouse:
restart: always
hostname: fixturenet-eth-bootnode-lighthouse
environment:
RUN_BOOTNODE: "true"
image: cerc/fixturenet-plugeth-lighthouse:local
fixturenet-eth-lighthouse-1:
restart: always
hostname: fixturenet-eth-lighthouse-1
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
interval: 30s
timeout: 10s
retries: 10
start_period: 30s
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
environment:
NODE_NUMBER: "1"
ETH1_ENDPOINT: "http://fixturenet-eth-geth-1:8545"
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-1:8551"
image: cerc/fixturenet-plugeth-lighthouse:local
volumes:
- fixturenet_plugeth_lighthouse_1_data:/opt/testnet/build/cl
depends_on:
fixturenet-eth-bootnode-lighthouse:
condition: service_started
fixturenet-eth-geth-1:
condition: service_healthy
ports:
- "8001"
fixturenet-eth-lighthouse-2:
restart: always
hostname: fixturenet-eth-lighthouse-2
healthcheck:
test: ["CMD", "wget", "--tries=1", "--connect-timeout=1", "--quiet", "-O", "-", "http://localhost:8001/eth/v2/beacon/blocks/head"]
interval: 30s
timeout: 10s
retries: 10
start_period: 30s
env_file:
- ../config/fixturenet-eth/fixturenet-eth.env
environment:
NODE_NUMBER: "2"
ETH1_ENDPOINT: "http://fixturenet-eth-geth-2:8545"
EXECUTION_ENDPOINT: "http://fixturenet-eth-geth-2:8551"
LIGHTHOUSE_GENESIS_STATE_URL: "http://fixturenet-eth-lighthouse-1:8001/eth/v2/debug/beacon/states/0"
image: cerc/fixturenet-plugeth-lighthouse:local
volumes:
- fixturenet_plugeth_lighthouse_2_data:/opt/testnet/build/cl
depends_on:
fixturenet-eth-bootnode-lighthouse:
condition: service_started
fixturenet-eth-geth-2:
condition: service_healthy
volumes:
fixturenet_plugeth_bootnode_geth_data:
fixturenet_plugeth_geth_1_data:
fixturenet_plugeth_geth_2_data:
fixturenet_plugeth_lighthouse_1_data:
fixturenet_plugeth_lighthouse_2_data:

View File

@ -1,18 +0,0 @@
version: "3.2"
services:
pocket:
restart: unless-stopped
image: cerc/pocket:local
# command: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
entrypoint: ["sh", "/docker-entrypoint-scripts.d/create-fixturenet.sh"]
volumes:
# TODO: look at folding these scripts into the container
- ../config/fixturenet-pocket/create-fixturenet.sh:/docker-entrypoint-scripts.d/create-fixturenet.sh
- ../config/fixturenet-pocket/chains.json:/home/app/pocket-configs/chains.json
- ../config/fixturenet-pocket/genesis.json:/home/app/pocket-configs/genesis.json
ports:
- "8081:8081" # pocket relay rpc
networks:
net1:
name: fixturenet-eth_default
external: true

View File

@ -1,9 +0,0 @@
# Add-on pod to include foundry tooling within a fixturenet
services:
foundry:
restart: always
image: cerc/foundry:local
command: ["while :; do sleep 600; done"]
volumes:
- ../config/foundry/foundry.toml:/foundry.toml
- ./foundry/workspace:/workspace

View File

@ -8,7 +8,7 @@ services:
condition: service_healthy
image: cerc/go-ethereum-foundry:local
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "8545"]
test: ["CMD", "nc", "-v", "localhost", "8545"]
interval: 30s
timeout: 3s
retries: 10

View File

@ -7,9 +7,11 @@ services:
condition: service_healthy
image: cerc/ipld-eth-server:local
environment:
SERVER_HTTP_PATH: 0.0.0.0:8081
SERVER_GRAPHQL: "true"
SERVER_GRAPHQLPATH: 0.0.0.0:8082
IPLD_SERVER_GRAPHQL: "true"
IPLD_POSTGRAPHILEPATH: http://graphql:5000
ETH_SERVER_HTTPPATH: 0.0.0.0:8081
ETH_SERVER_GRAPHQL: "true"
ETH_SERVER_GRAPHQLPATH: 0.0.0.0:8082
VDB_COMMAND: "serve"
ETH_CHAIN_CONFIG: "/tmp/chain.json"
DATABASE_NAME: cerc_testing

View File

@ -8,6 +8,6 @@ services:
- ./ipfs/import:/import
- ./ipfs/data:/data/ipfs
ports:
- "0.0.0.0:8080:8080"
- "0.0.0.0:4001:4001"
- "0.0.0.0:5001:5001"
- "8080"
- "4001"
- "5001"

View File

@ -1,70 +0,0 @@
version: '3.2'
services:
# Builds and serves the MobyMask react-app
mobymask-app:
restart: unless-stopped
image: cerc/mobymask-ui:local
env_file:
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_CHAIN_ID: ${CERC_CHAIN_ID}
CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT}
CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL}
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
CERC_BUILD_DIR: "@cerc-io/mobymask-ui/build"
working_dir: /scripts
command: ["sh", "mobymask-app-start.sh"]
volumes:
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
- peers_ids:/peers
- mobymask_deployment:/server
ports:
- "0.0.0.0:3002:80"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "80"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
# Builds and serves the LXDAO version of MobyMask react-app
lxdao-mobymask-app:
restart: unless-stopped
image: cerc/mobymask-ui:local
env_file:
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_CHAIN_ID: ${CERC_CHAIN_ID}
CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT}
CERC_APP_WATCHER_URL: ${CERC_APP_WATCHER_URL}
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
CERC_BUILD_DIR: "@cerc-io/mobymask-ui-lxdao/build"
working_dir: /scripts
command: ["sh", "mobymask-app-start.sh"]
volumes:
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/mobymask-app-start.sh:/scripts/mobymask-app-start.sh
- peers_ids:/peers
- mobymask_deployment:/server
ports:
- "0.0.0.0:3004:80"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "80"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
mobymask_deployment:
peers_ids:

View File

@ -1,32 +0,0 @@
version: '3.2'
services:
# Builds and serves the peer-test react-app
peer-test-app:
restart: unless-stopped
image: cerc/react-peer:local
working_dir: /scripts
env_file:
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_RELAY_NODES: ${CERC_RELAY_NODES}
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
command: ["sh", "test-app-start.sh"]
volumes:
- ../config/wait-for-it.sh:/scripts/wait-for-it.sh
- ../config/watcher-mobymask-v2/test-app-start.sh:/scripts/test-app-start.sh
- peers_ids:/peers
ports:
- "0.0.0.0:3003:80"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "80"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
peers_ids:

View File

@ -1,13 +1,7 @@
version: "3.2"
services:
test:
image: cerc/test-container:local
restart: always
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
volumes:
- test-data:/var
ports:
- "80"
volumes:
test-data:

View File

@ -1,304 +0,0 @@
version: '3.2'
services:
# Starts the PostgreSQL database for watchers
watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=azimuth-watcher,azimuth-watcher-job-queue,censures-watcher,censures-watcher-job-queue,claims-watcher,claims-watcher-job-queue,conditional-star-release-watcher,conditional-star-release-watcher-job-queue,delegated-sending-watcher,delegated-sending-watcher-job-queue,ecliptic-watcher,ecliptic-watcher-job-queue,linear-star-release-watcher,linear-star-release-watcher-job-queue,polls-watcher,polls-watcher-job-queue
- POSTGRES_EXTENSION=azimuth-watcher-job-queue:pgcrypto,censures-watcher-job-queue:pgcrypto,claims-watcher-job-queue:pgcrypto,conditional-star-release-watcher-job-queue:pgcrypto,delegated-sending-watcher-job-queue:pgcrypto,ecliptic-watcher-job-queue:pgcrypto,linear-star-release-watcher-job-queue:pgcrypto,polls-watcher-job-queue:pgcrypto,
- POSTGRES_PASSWORD=password
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15432:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
# Starts the azimuth-watcher server
azimuth-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/azimuth-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/azimuth-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/azimuth-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/azimuth-watcher/start-server.sh
ports:
- "3001"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3001"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the censures-watcher server
censures-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/censures-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/censures-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/censures-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/censures-watcher/start-server.sh
ports:
- "3002"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3002"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the claims-watcher server
claims-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/claims-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/claims-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/claims-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/claims-watcher/start-server.sh
ports:
- "3003"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3003"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the conditional-star-release-watcher server
conditional-star-release-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/conditional-star-release-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/conditional-star-release-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/conditional-star-release-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/conditional-star-release-watcher/start-server.sh
ports:
- "3004"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3004"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the delegated-sending-watcher server
delegated-sending-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/delegated-sending-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/delegated-sending-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/delegated-sending-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/delegated-sending-watcher/start-server.sh
ports:
- "3005"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3005"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the ecliptic-watcher server
ecliptic-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/ecliptic-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/ecliptic-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/ecliptic-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/ecliptic-watcher/start-server.sh
ports:
- "3006"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3006"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the linear-star-release-watcher server
linear-star-release-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/linear-star-release-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/linear-star-release-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/linear-star-release-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/linear-star-release-watcher/start-server.sh
ports:
- "3007"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3007"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the polls-watcher server
polls-watcher-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-azimuth/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
working_dir: /app/packages/polls-watcher
command: "./start-server.sh"
volumes:
- ../config/watcher-azimuth/watcher-config-template.toml:/app/packages/polls-watcher/environments/watcher-config-template.toml
- ../config/watcher-azimuth/merge-toml.js:/app/packages/polls-watcher/merge-toml.js
- ../config/watcher-azimuth/start-server.sh:/app/packages/polls-watcher/start-server.sh
ports:
- "3008"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3008"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the gateway-server for proxying queries
gateway-server:
image: cerc/watcher-azimuth:local
restart: unless-stopped
depends_on:
azimuth-watcher-server:
condition: service_healthy
censures-watcher-server:
condition: service_healthy
claims-watcher-server:
condition: service_healthy
conditional-star-release-watcher-server:
condition: service_healthy
delegated-sending-watcher-server:
condition: service_healthy
ecliptic-watcher-server:
condition: service_healthy
linear-star-release-watcher-server:
condition: service_healthy
polls-watcher-server:
condition: service_healthy
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
working_dir: /app/packages/gateway-server
command: "yarn server"
volumes:
- ../config/watcher-azimuth/gateway-watchers.json:/app/packages/gateway-server/dist/watchers.json
ports:
- "0.0.0.0:4000:4000"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "4000"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
watcher_db_data:

View File

@ -39,7 +39,7 @@ services:
- "0.0.0.0:3002:3001"
- "0.0.0.0:9002:9001"
healthcheck:
test: ["CMD", "nc", "-vz", "localhost", "3001"]
test: ["CMD", "nc", "-v", "localhost", "3002"]
interval: 20s
timeout: 5s
retries: 15

View File

@ -1,91 +0,0 @@
version: '3.2'
services:
# Starts the PostgreSQL database for watcher
gelato-watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=gelato-watcher,gelato-watcher-job-queue
- POSTGRES_EXTENSION=gelato-watcher-job-queue:pgcrypto
- POSTGRES_PASSWORD=password
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- gelato_watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15432:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 10s
timeout: 5s
retries: 15
start_period: 10s
# Starts the gelato-watcher job runner
gelato-watcher-job-runner:
image: cerc/watcher-gelato:local
restart: unless-stopped
depends_on:
gelato-watcher-db:
condition: service_healthy
env_file:
- ../config/watcher-gelato/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
command: ["./start-job-runner.sh"]
volumes:
- ../config/watcher-gelato/watcher-config-template.toml:/app/environments/watcher-config-template.toml
- ../config/watcher-gelato/start-job-runner.sh:/app/start-job-runner.sh
ports:
- "0.0.0.0:9000:9000"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "9000"]
interval: 10s
timeout: 5s
retries: 15
start_period: 10s
extra_hosts:
- "host.docker.internal:host-gateway"
# Starts the gelato-watcher server
gelato-watcher-server:
image: cerc/watcher-gelato:local
restart: unless-stopped
depends_on:
gelato-watcher-db:
condition: service_healthy
gelato-watcher-job-runner:
condition: service_healthy
env_file:
- ../config/watcher-gelato/watcher-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_IPLD_ETH_RPC: ${CERC_IPLD_ETH_RPC}
CERC_IPLD_ETH_GQL: ${CERC_IPLD_ETH_GQL}
CERC_USE_STATE_SNAPSHOT: ${CERC_USE_STATE_SNAPSHOT}
CERC_SNAPSHOT_GQL_ENDPOINT: ${CERC_SNAPSHOT_GQL_ENDPOINT}
CERC_SNAPSHOT_BLOCKHASH: ${CERC_SNAPSHOT_BLOCKHASH}
command: ["./start-server.sh"]
volumes:
- ../config/watcher-gelato/watcher-config-template.toml:/app/environments/watcher-config-template.toml
- ../config/watcher-gelato/start-server.sh:/app/start-server.sh
- ../config/watcher-gelato/create-and-import-checkpoint.sh:/app/create-and-import-checkpoint.sh
- gelato_watcher_state_gql:/app/state_checkpoint
ports:
- "0.0.0.0:3008:3008"
- "0.0.0.0:9001:9001"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "3008"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
volumes:
gelato_watcher_db_data:
gelato_watcher_state_gql:

View File

@ -1,135 +0,0 @@
version: '3.2'
services:
# Starts the PostgreSQL database for watcher
mobymask-watcher-db:
restart: unless-stopped
image: postgres:14-alpine
environment:
- POSTGRES_USER=vdbm
- POSTGRES_MULTIPLE_DATABASES=mobymask-watcher,mobymask-watcher-job-queue
- POSTGRES_EXTENSION=mobymask-watcher-job-queue:pgcrypto
- POSTGRES_PASSWORD=password
volumes:
- ../config/postgresql/multiple-postgressql-databases.sh:/docker-entrypoint-initdb.d/multiple-postgressql-databases.sh
- mobymask_watcher_db_data:/var/lib/postgresql/data
ports:
- "0.0.0.0:15432:5432"
healthcheck:
test: ["CMD", "nc", "-v", "localhost", "5432"]
interval: 20s
timeout: 5s
retries: 15
start_period: 10s
# Deploys the MobyMask contract and generates an invite link
# Deployment is skipped if CERC_DEPLOYED_CONTRACT env is set
mobymask:
image: cerc/mobymask:local
working_dir: /app/packages/server
env_file:
- ../config/watcher-mobymask-v2/optimism-params.env
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
ENV: "PROD"
CERC_L2_GETH_RPC: ${CERC_L2_GETH_RPC}
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
CERC_PRIVATE_KEY_DEPLOYER: ${CERC_PRIVATE_KEY_DEPLOYER}
CERC_MOBYMASK_APP_BASE_URI: ${CERC_MOBYMASK_APP_BASE_URI}
CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT}
CERC_L2_GETH_HOST: ${CERC_L2_GETH_HOST}
CERC_L2_GETH_PORT: ${CERC_L2_GETH_PORT}
CERC_L2_NODE_HOST: ${CERC_L2_NODE_HOST}
CERC_L2_NODE_PORT: ${CERC_L2_NODE_PORT}
command: ["sh", "deploy-and-generate-invite.sh"]
volumes:
- ../config/wait-for-it.sh:/app/packages/server/wait-for-it.sh
- ../config/watcher-mobymask-v2/secrets-template.json:/app/packages/server/secrets-template.json
- ../config/watcher-mobymask-v2/deploy-and-generate-invite.sh:/app/packages/server/deploy-and-generate-invite.sh
- mobymask_deployment:/app/packages/server
extra_hosts:
- "host.docker.internal:host-gateway"
# Creates peer-id files if they don't exist
peer-ids-gen:
image: cerc/watcher-ts:local
restart: on-failure
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
working_dir: /app/packages/peer
command: ["sh", "generate-peer-ids.sh"]
volumes:
- ../config/watcher-mobymask-v2/generate-peer-ids.sh:/app/packages/peer/generate-peer-ids.sh
- peers_ids:/peer-ids
# Starts the mobymask-v2-watcher server
mobymask-watcher-server:
image: cerc/watcher-mobymask-v2:local
restart: unless-stopped
depends_on:
mobymask-watcher-db:
condition: service_healthy
peer-ids-gen:
condition: service_completed_successfully
mobymask:
condition: service_completed_successfully
env_file:
- ../config/watcher-mobymask-v2/optimism-params.env
- ../config/watcher-mobymask-v2/mobymask-params.env
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
CERC_L2_GETH_RPC: ${CERC_L2_GETH_RPC}
CERC_L1_ACCOUNTS_CSV_URL: ${CERC_L1_ACCOUNTS_CSV_URL}
CERC_PRIVATE_KEY_PEER: ${CERC_PRIVATE_KEY_PEER}
CERC_RELAY_PEERS: ${CERC_RELAY_PEERS}
CERC_DENY_MULTIADDRS: ${CERC_DENY_MULTIADDRS}
CERC_RELAY_ANNOUNCE_DOMAIN: ${CERC_RELAY_ANNOUNCE_DOMAIN}
CERC_ENABLE_PEER_L2_TXS: ${CERC_ENABLE_PEER_L2_TXS}
CERC_DEPLOYED_CONTRACT: ${CERC_DEPLOYED_CONTRACT}
command: ["sh", "start-server.sh"]
volumes:
- ../config/watcher-mobymask-v2/watcher-config-template.toml:/app/environments/watcher-config-template.toml
- ../config/watcher-mobymask-v2/start-server.sh:/app/start-server.sh
- peers_ids:/app/peers
- mobymask_deployment:/server
# Expose GQL, metrics and relay node ports
ports:
- "0.0.0.0:3001:3001"
- "0.0.0.0:9001:9001"
- "0.0.0.0:9090:9090"
healthcheck:
test: ["CMD", "busybox", "nc", "localhost", "9090"]
interval: 20s
timeout: 5s
retries: 15
start_period: 5s
extra_hosts:
- "host.docker.internal:host-gateway"
# Container to run peer tests
peer-tests:
image: cerc/watcher-ts:local
restart: on-failure
depends_on:
mobymask-watcher-server:
condition: service_healthy
peer-ids-gen:
condition: service_completed_successfully
environment:
CERC_SCRIPT_DEBUG: ${CERC_SCRIPT_DEBUG}
working_dir: /app/packages/peer
command:
- sh
- -c
- |
./set-tests-env.sh && \
tail -f /dev/null
volumes:
- ../config/watcher-mobymask-v2/set-tests-env.sh:/app/packages/peer/set-tests-env.sh
- peers_ids:/peer-ids
volumes:
mobymask_watcher_db_data:
peers_ids:
mobymask_deployment:

View File

@ -17,8 +17,7 @@ CERC_STATEDIFF_DB_PORT=5432
CERC_STATEDIFF_DB_NAME="cerc_testing"
CERC_STATEDIFF_DB_USER="vdbm"
CERC_STATEDIFF_DB_PASSWORD="password"
CERC_STATEDIFF_DB_GOOSE_MIN_VER=${CERC_STATEDIFF_DB_GOOSE_MIN_VER:-18}
CERC_STATEDIFF_DB_GOOSE_MIN_VER=23
CERC_STATEDIFF_DB_LOG_STATEMENTS="false"
CERC_STATEDIFF_WORKERS=2
CERC_GETH_VMODULE="statediff/*=5,rpc/*=5"

View File

@ -1,8 +1,8 @@
#!/bin/bash
#!/bin/sh
# Originally from: https://github.com/cerc-io/laconicd/blob/main/init.sh
# TODO: fold this back into the laconicd repo
# TODO: this file is now an unmodified copy of cerc-io/laconicd/init.sh
# so we should have a mechanism to bundle it inside the container rather than link from here
# at deploy time.
KEY="mykey"
CHAINID="laconic_9000-1"
@ -10,7 +10,7 @@ MONIKER="localtestnet"
KEYRING="test"
KEYALGO="eth_secp256k1"
LOGLEVEL="info"
# trace evm
# to trace evm
TRACE="--trace"
# TRACE=""
@ -28,7 +28,7 @@ laconicd config chain-id $CHAINID
# if $KEY exists it should be deleted
laconicd keys add $KEY --keyring-backend $KEYRING --algo $KEYALGO
# Set moniker and chain-id for Ethermint (Moniker can be anything, chain-id must be an integer)
# Set moniker and chain-id for laconic (Moniker can be anything, chain-id must be an integer)
laconicd init $MONIKER --chain-id $CHAINID
# Change parameter token denominations to aphoton
@ -37,28 +37,28 @@ cat $HOME/.laconicd/config/genesis.json | jq '.app_state["crisis"]["constant_fee
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["gov"]["deposit_params"]["min_deposit"][0]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["mint"]["params"]["mint_denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
# Custom modules
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["record_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_rent"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_commit_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_reveal_fee"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_minimum_bid"]["denom"]="aphoton"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
if [[ "$TEST_REGISTRY_EXPIRY" == "true" ]]; then
if [[ "$TEST_NAMESERVICE_EXPIRY" == "true" ]]; then
echo "Setting timers for expiry tests."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["record_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_grace_period"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
if [[ "$TEST_AUCTION_ENABLED" == "true" ]]; then
echo "Enabling auction and setting timers."
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["registry"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_enabled"]=true' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_rent_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_grace_period"]="300s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_commits_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
cat $HOME/.laconicd/config/genesis.json | jq '.app_state["nameservice"]["params"]["authority_auction_reveals_duration"]="60s"' > $HOME/.laconicd/config/tmp_genesis.json && mv $HOME/.laconicd/config/tmp_genesis.json $HOME/.laconicd/config/genesis.json
fi
# increase block time (?)

View File

@ -1,2 +0,0 @@
#!/bin/sh
laconicd keys show mykey | grep address | cut -d ' ' -f 3

View File

@ -1,2 +0,0 @@
#!/bin/sh
echo y | laconicd keys export mykey --unarmored-hex --unsafe

View File

@ -1,9 +0,0 @@
services:
cns:
restEndpoint: 'http://laconicd:1317'
gqlEndpoint: 'http://laconicd:9473/api'
userKey: REPLACE_WITH_MYKEY
bondId:
chainId: laconic_9000-1
gas: 250000
fees: 200000aphoton

View File

@ -1 +0,0 @@
}+V<>{iνΆΠΉ<CEA0>²<EFBFBD>¨ΣΗ\k»qς  —?δΪAΒ~μ©™LΉ<4C>tb·yqτ·²ηξΔ<CEBE>Ο?ξaΣ<61>J

View File

@ -1 +0,0 @@
Β~μ©™LΉ<4C>tb·yqτ·²ηξΔ<CEBE>Ο?ξaΣ<61>J

View File

@ -1,71 +0,0 @@
{
"t01000": {
"ID": "t01000",
"Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Worker": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"PeerId": "12D3KooWG5q6pWJVdPBhDBv9AjWVbUh4xxTAZ7xvgZSjczWuD2Z9",
"MarketBalance": "0",
"PowerBalance": "0",
"SectorSize": 2048,
"Sectors": [
{
"CommR": {
"/": "bagboea4b5abcboxypcewlkmrat2myu4vthk3ii2pcomak7nhqmdbb6sxlolp2wdf"
},
"CommD": {
"/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq"
},
"SectorID": 0,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Provider": "t01000",
"Label": "0",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4="
},
"ProofType": 5
},
{
"CommR": {
"/": "bagboea4b5abcb6krzypqcczhcnbeyjcqkeo6omfergm336o3kitugh3jgjog2yqq"
},
"CommD": {
"/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii"
},
"SectorID": 1,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Provider": "t01000",
"Label": "1",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4="
},
"ProofType": 5
}
]
}
}

View File

@ -1 +0,0 @@
7b2254797065223a22626c73222c22507269766174654b6579223a227446765352695367324733537367673050535979323358796a61494d5870736d64794732423755464c54343d227d

View File

@ -1,11 +0,0 @@
{
"ID": "f355523e-69d0-4984-bd0e-9588487c6231",
"Weight": 0,
"CanSeal": false,
"CanStore": false,
"MaxStorage": 0,
"Groups": null,
"AllowTo": null,
"AllowTypes": null,
"DenyTypes": null
}

View File

@ -1,108 +0,0 @@
{
"NetworkVersion": 18,
"Accounts": [
{
"Type": "account",
"Balance": "50000000000000000000000000",
"Meta": {
"Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q"
}
}
],
"Miners": [
{
"ID": "t01000",
"Owner": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Worker": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"PeerId": "12D3KooWG5q6pWJVdPBhDBv9AjWVbUh4xxTAZ7xvgZSjczWuD2Z9",
"MarketBalance": "0",
"PowerBalance": "0",
"SectorSize": 2048,
"Sectors": [
{
"CommR": {
"/": "bagboea4b5abcboxypcewlkmrat2myu4vthk3ii2pcomak7nhqmdbb6sxlolp2wdf"
},
"CommD": {
"/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq"
},
"SectorID": 0,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqn3jfixthmdgksv4vhfeuyvr6upw6tvaqbmzmsyxnzosm4pwgnmlq"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Provider": "t01000",
"Label": "0",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4="
},
"ProofType": 5
},
{
"CommR": {
"/": "bagboea4b5abcb6krzypqcczhcnbeyjcqkeo6omfergm336o3kitugh3jgjog2yqq"
},
"CommD": {
"/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii"
},
"SectorID": 1,
"Deal": {
"PieceCID": {
"/": "baga6ea4seaqhondpb2373hjasjplxvbjzi5n5mm4fbbhjxp5ptnbq4cibapkeii"
},
"PieceSize": 2048,
"VerifiedDeal": false,
"Client": "t3spusn5ia57qezc3fwpe3n2lhb4y4xt67xoflqbqy2muliparw2uktevletuv7gl4qakjpafgcl7jk2s2er3q",
"Provider": "t01000",
"Label": "1",
"StartEpoch": 0,
"EndEpoch": 9001,
"StoragePricePerEpoch": "0",
"ProviderCollateral": "0",
"ClientCollateral": "0"
},
"DealClientKey": {
"Type": "bls",
"PrivateKey": "tFvSRiSg2G3Ssgg0PSYy23XyjaIMXpsmdyG2B7UFLT4="
},
"ProofType": 5
}
]
}
],
"NetworkName": "localnet-6d52dae5-ff29-4bac-a45d-f84e6c07564c",
"VerifregRootKey": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
},
"RemainderAccount": {
"Type": "multisig",
"Balance": "0",
"Meta": {
"Signers": [
"t1ceb34gnsc6qk5dt6n7xg6ycwzasjhbxm3iylkiy"
],
"Threshold": 1,
"VestingDuration": 0,
"VestingStart": 0
}
}
}

View File

@ -1,5 +0,0 @@
LOTUS_PATH=~/.lotus-local-net
LOTUS_MINER_PATH=~/.lotus-miner-local-net
LOTUS_SKIP_GENESIS_CHECK=_yes_
CGO_CFLAGS_ALLOW="-D__BLST_PORTABLE__"
CGO_CFLAGS="-D__BLST_PORTABLE__"

View File

@ -1,39 +0,0 @@
#!/bin/bash
lotus --version
# # remove old bootnode peer info if present
# [ -f /root/.lotus-shared/miner.addr ] && rm /root/.lotus-shared/miner.addr
##TODO: generate genesis files inside container instead of bundling in config dir
##something like commands below should work, other scripts/compose will have to be updated to corresponding directories
# lotus fetch-params 2048
# lotus-seed pre-seal --sector-size 2KiB --num-sectors 2
# lotus-seed genesis new localnet.json
# lotus-seed genesis add-miner localnet.json ~/.genesis-sectors/pre-seal-t01000.json
# start daemon
nohup lotus daemon --genesis=/devgen.car --profile=bootstrapper --bootstrap=false > /var/log/lotus.log 2>&1 &
# Loop until the daemon is started
echo "Waiting for daemon to start..."
while ! grep -q "started ChainNotify channel" /var/log/lotus.log ; do
sleep 5
done
echo "Daemon started."
# publish bootnode peer info to shared volume
lotus net listen | awk 'NR==1{print}' > /root/.lotus-shared/miner.addr
# if miner not already initialized
if [ ! -d /root/.lotus-miner-local-net ]; then
# initialize miner
lotus wallet import --as-default ~/.genesis-sectors/pre-seal-t01000.key
lotus-miner init --genesis-miner --actor=t01000 --sector-size=2KiB --pre-sealed-sectors=~/.genesis-sectors --pre-sealed-metadata=~/.genesis-sectors/pre-seal-t01000.json --nosync
fi
# start miner
nohup lotus-miner run --nosync &
tail -f /dev/null

View File

@ -1,24 +0,0 @@
#!/bin/bash
lotus --version
##TODO: paths can use values from lotus-env.env file
# if not already initialized
if [ ! -f /root/.lotus-local-net/config.toml ]; then
# init node config
mkdir $HOME/.lotus-local-net
lotus config default > $HOME/.lotus-local-net/config.toml
# add bootstrap peer info if available
if [ -f /root/.lotus-shared/miner.addr ]; then
MINER_ADDR=\"$(cat /root/.lotus-shared/miner.addr)\"
# add bootstrap peer id to config file
sed -i "/^\[Libp2p\]/a \ \ BootstrapPeers = [$MINER_ADDR]" $HOME/.lotus-local-net/config.toml
else
echo "Bootstrap peer info not found, unable to configure. Manual peering will be required."
fi
fi
# start node
lotus daemon --genesis=/devgen.car

View File

@ -1,37 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Check existing config if it exists
if [ -f /app/jwt.txt ] && [ -f /app/rollup.json ]; then
echo "Found existing L2 config, cross-checking with L1 deployment config"
SOURCE_L1_CONF=$(cat /contracts-bedrock/deploy-config/getting-started.json)
EXP_L1_BLOCKHASH=$(echo "$SOURCE_L1_CONF" | jq -r '.l1StartingBlockTag')
EXP_BATCHER=$(echo "$SOURCE_L1_CONF" | jq -r '.batchSenderAddress')
GEN_L2_CONF=$(cat /app/rollup.json)
GEN_L1_BLOCKHASH=$(echo "$GEN_L2_CONF" | jq -r '.genesis.l1.hash')
GEN_BATCHER=$(echo "$GEN_L2_CONF" | jq -r '.genesis.system_config.batcherAddr')
if [ "$EXP_L1_BLOCKHASH" = "$GEN_L1_BLOCKHASH" ] && [ "$EXP_BATCHER" = "$GEN_BATCHER" ]; then
echo "Config cross-checked, exiting"
exit 0
fi
echo "Existing L2 config doesn't match the L1 deployment config, please clear L2 config volume before starting"
exit 1
fi
op-node genesis l2 \
--deploy-config /contracts-bedrock/deploy-config/getting-started.json \
--deployment-dir /contracts-bedrock/deployments/getting-started/ \
--outfile.l2 /app/genesis.json \
--outfile.rollup /app/rollup.json \
--l1-rpc $CERC_L1_RPC
openssl rand -hex 32 > /app/jwt.txt

View File

@ -1,12 +0,0 @@
# Defaults
# L1 endpoint
DEFAULT_CERC_L1_CHAIN_ID=1212
DEFAULT_CERC_L1_RPC="http://fixturenet-eth-geth-1:8545"
DEFAULT_CERC_L1_HOST="fixturenet-eth-geth-1"
DEFAULT_CERC_L1_PORT=8545
# URL to get CSV with credentials for accounts on L1
# that are used to send balance to Optimism Proxy contract
# (enables them to do transactions on L2)
DEFAULT_CERC_L1_ACCOUNTS_CSV_URL="http://fixturenet-eth-bootnode-geth:9898/accounts.csv"

View File

@ -1,131 +0,0 @@
#!/bin/bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_CHAIN_ID="${CERC_L1_CHAIN_ID:-${DEFAULT_CERC_L1_CHAIN_ID}}"
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
echo "Using L1 RPC endpoint ${CERC_L1_RPC}"
IMPORT_1="import './verify-contract-deployment'"
IMPORT_2="import './rekey-json'"
IMPORT_3="import './send-balance'"
# Append mounted tasks to tasks/index.ts file if not present
if ! grep -Fxq "$IMPORT_1" tasks/index.ts; then
echo "$IMPORT_1" >> tasks/index.ts
echo "$IMPORT_2" >> tasks/index.ts
echo "$IMPORT_3" >> tasks/index.ts
fi
# Update the chainId in the hardhat config
sed -i "/getting-started/ {n; s/.*chainId.*/ chainId: $CERC_L1_CHAIN_ID,/}" hardhat.config.ts
# Exit if a deployment already exists (on restarts)
# Note: fixturenet-eth-geth currently starts fresh on a restart
if [ -d "deployments/getting-started" ]; then
echo "Deployment directory deployments/getting-started found, checking SystemDictator deployment"
# Read JSON file into variable
SYSTEM_DICTATOR_DETAILS=$(cat deployments/getting-started/SystemDictator.json)
# Parse JSON into variables
SYSTEM_DICTATOR_ADDRESS=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.address')
SYSTEM_DICTATOR_TXHASH=$(echo "$SYSTEM_DICTATOR_DETAILS" | jq -r '.transactionHash')
if yarn hardhat verify-contract-deployment --contract "${SYSTEM_DICTATOR_ADDRESS}" --transaction-hash "${SYSTEM_DICTATOR_TXHASH}"; then
echo "Deployment verfication successful, exiting"
exit 0
else
echo "Deployment verfication failed, please clear L1 deployment volume before starting"
exit 1
fi
fi
# Generate the L2 account addresses
yarn hardhat rekey-json --output /l2-accounts/keys.json
# Read JSON file into variable
KEYS_JSON=$(cat /l2-accounts/keys.json)
# Parse JSON into variables
ADMIN_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Admin.address')
ADMIN_PRIV_KEY=$(echo "$KEYS_JSON" | jq -r '.Admin.privateKey')
PROPOSER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Proposer.address')
BATCHER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Batcher.address')
SEQUENCER_ADDRESS=$(echo "$KEYS_JSON" | jq -r '.Sequencer.address')
# Get the private keys of L1 accounts
if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \
l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \
[ "$l1_accounts_response" -eq 200 ];
then
echo "Fetching L1 account credentials using provided URL"
mkdir -p /geth-accounts
wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL"
CERC_L1_ADDRESS=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 2)
CERC_L1_PRIV_KEY=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
CERC_L1_ADDRESS_2=$(awk -F, 'NR==2{print $(NF-1)}' /geth-accounts/accounts.csv)
CERC_L1_PRIV_KEY_2=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv)
else
echo "Couldn't fetch L1 account credentials, using them from env"
fi
# Send balances to the above L2 addresses
yarn hardhat send-balance --to "${ADMIN_ADDRESS}" --amount 2 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
yarn hardhat send-balance --to "${PROPOSER_ADDRESS}" --amount 5 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
yarn hardhat send-balance --to "${BATCHER_ADDRESS}" --amount 1000 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
echo "Balances sent to L2 accounts"
# Select a finalized L1 block as the starting point for roll ups
until FINALIZED_BLOCK=$(cast block finalized --rpc-url "$CERC_L1_RPC"); do
echo "Waiting for a finalized L1 block to exist, retrying after 10s"
sleep 10
done
L1_BLOCKNUMBER=$(echo "$FINALIZED_BLOCK" | awk '/number/{print $2}')
L1_BLOCKHASH=$(echo "$FINALIZED_BLOCK" | awk '/hash/{print $2}')
L1_BLOCKTIMESTAMP=$(echo "$FINALIZED_BLOCK" | awk '/timestamp/{print $2}')
echo "Selected L1 block ${L1_BLOCKNUMBER} as the starting block for roll ups"
# Update the deployment config
sed -i 's/"l2OutputOracleStartingTimestamp": TIMESTAMP/"l2OutputOracleStartingTimestamp": '"$L1_BLOCKTIMESTAMP"'/g' deploy-config/getting-started.json
jq --arg chainid "$CERC_L1_CHAIN_ID" '.l1ChainID = ($chainid | tonumber)' deploy-config/getting-started.json > tmp.json && mv tmp.json deploy-config/getting-started.json
node update-config.js deploy-config/getting-started.json "$ADMIN_ADDRESS" "$PROPOSER_ADDRESS" "$BATCHER_ADDRESS" "$SEQUENCER_ADDRESS" "$L1_BLOCKHASH"
echo "Updated the deployment config"
# Create a .env file
echo "L1_RPC=$CERC_L1_RPC" > .env
echo "PRIVATE_KEY_DEPLOYER=$ADMIN_PRIV_KEY" >> .env
echo "Deploying the L1 smart contracts, this will take a while..."
# Deploy the L1 smart contracts
yarn hardhat deploy --network getting-started --tags l1
echo "Deployed the L1 smart contracts"
# Read Proxy contract's JSON and get the address
PROXY_JSON=$(cat deployments/getting-started/Proxy__OVM_L1StandardBridge.json)
PROXY_ADDRESS=$(echo "$PROXY_JSON" | jq -r '.address')
# Send balance to the above Proxy contract in L1 for reflecting balance in L2
# First account
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY}" --network getting-started
# Second account
yarn hardhat send-balance --to "${PROXY_ADDRESS}" --amount 1 --private-key "${CERC_L1_PRIV_KEY_2}" --network getting-started
echo "Balance sent to Proxy L2 contract"
echo "Use following accounts for transactions in L2:"
echo "${CERC_L1_ADDRESS}"
echo "${CERC_L1_ADDRESS_2}"
echo "Done"

View File

@ -1,36 +0,0 @@
const fs = require('fs')
// Get the command-line argument
const configFile = process.argv[2]
const adminAddress = process.argv[3]
const proposerAddress = process.argv[4]
const batcherAddress = process.argv[5]
const sequencerAddress = process.argv[6]
const blockHash = process.argv[7]
// Read the JSON file
const configData = fs.readFileSync(configFile)
const configObj = JSON.parse(configData)
// Update the finalSystemOwner property with the ADMIN_ADDRESS value
configObj.finalSystemOwner =
configObj.portalGuardian =
configObj.controller =
configObj.l2OutputOracleChallenger =
configObj.proxyAdminOwner =
configObj.baseFeeVaultRecipient =
configObj.l1FeeVaultRecipient =
configObj.sequencerFeeVaultRecipient =
configObj.governanceTokenOwner =
adminAddress
configObj.l2OutputOracleProposer = proposerAddress
configObj.batchSenderAddress = batcherAddress
configObj.p2pSequencerAddress = sequencerAddress
configObj.l1StartingBlockTag = blockHash
// Write the updated JSON object back to the file
fs.writeFileSync(configFile, JSON.stringify(configObj, null, 2))

View File

@ -1,39 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Get Batcher key from keys.json
BATCHER_KEY=$(jq -r '.Batcher.privateKey' /l2-accounts/keys.json | tr -d '"')
cleanup() {
echo "Signal received, cleaning up..."
kill ${batcher_pid}
wait
echo "Done"
}
trap 'cleanup' INT TERM
# Run op-batcher
op-batcher \
--l2-eth-rpc=http://op-geth:8545 \
--rollup-rpc=http://op-node:8547 \
--poll-interval=1s \
--sub-safety-margin=6 \
--num-confirmations=1 \
--safe-abort-nonce-too-low-count=3 \
--resubmission-timeout=30s \
--rpc.addr=0.0.0.0 \
--rpc.port=8548 \
--rpc.enable-admin \
--max-channel-duration=1 \
--l1-eth-rpc=$CERC_L1_RPC \
--private-key=$BATCHER_KEY \
&
batcher_pid=$!
wait $batcher_pid

View File

@ -1,90 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# TODO: Add in container build or use other tool
echo "Installing jq"
apk update && apk add jq
# Get Sequencer key from keys.json
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
# Initialize op-geth if datadir/geth not found
if [ -f /op-node/jwt.txt ] && [ -d datadir/geth ]; then
echo "Found existing datadir, checking block signer key"
BLOCK_SIGNER_KEY=$(cat datadir/block-signer-key)
if [ "$SEQUENCER_KEY" = "$BLOCK_SIGNER_KEY" ]; then
echo "Sequencer and block signer keys match, skipping initialization"
else
echo "Sequencer and block signer keys don't match, please clear L2 geth data volume before starting"
exit 1
fi
else
echo "Initializing op-geth"
mkdir -p datadir
echo "pwd" > datadir/password
echo $SEQUENCER_KEY > datadir/block-signer-key
geth account import --datadir=datadir --password=datadir/password datadir/block-signer-key
while [ ! -f "/op-node/jwt.txt" ]
do
echo "Config files not created. Checking after 5 seconds."
sleep 5
done
echo "Config files created by op-node, proceeding with the initialization..."
geth init --datadir=datadir /op-node/genesis.json
echo "Node Initialized"
fi
SEQUENCER_ADDRESS=$(jq -r '.Sequencer.address' /l2-accounts/keys.json | tr -d '"')
echo "SEQUENCER_ADDRESS: ${SEQUENCER_ADDRESS}"
cleanup() {
echo "Signal received, cleaning up..."
kill ${geth_pid}
wait
echo "Done"
}
trap 'cleanup' INT TERM
# Run op-geth
geth \
--datadir ./datadir \
--http \
--http.corsdomain="*" \
--http.vhosts="*" \
--http.addr=0.0.0.0 \
--http.api=web3,debug,eth,txpool,net,engine \
--ws \
--ws.addr=0.0.0.0 \
--ws.port=8546 \
--ws.origins="*" \
--ws.api=debug,eth,txpool,net,engine \
--syncmode=full \
--gcmode=archive \
--nodiscover \
--maxpeers=0 \
--networkid=42069 \
--authrpc.vhosts="*" \
--authrpc.addr=0.0.0.0 \
--authrpc.port=8551 \
--authrpc.jwtsecret=/op-node/jwt.txt \
--rollup.disabletxpoolgossip=true \
--password=./datadir/password \
--allow-insecure-unlock \
--mine \
--miner.etherbase=$SEQUENCER_ADDRESS \
--unlock=$SEQUENCER_ADDRESS \
&
geth_pid=$!
wait $geth_pid

View File

@ -1,26 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Get Sequencer key from keys.json
SEQUENCER_KEY=$(jq -r '.Sequencer.privateKey' /l2-accounts/keys.json | tr -d '"')
# Run op-node
op-node \
--l2=http://op-geth:8551 \
--l2.jwt-secret=/op-node-data/jwt.txt \
--sequencer.enabled \
--sequencer.l1-confs=3 \
--verifier.l1-confs=3 \
--rollup.config=/op-node-data/rollup.json \
--rpc.addr=0.0.0.0 \
--rpc.port=8547 \
--p2p.disable \
--rpc.enable-admin \
--p2p.sequencer.key=$SEQUENCER_KEY \
--l1=$CERC_L1_RPC \
--l1.rpckind=any

View File

@ -1,36 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L1_RPC="${CERC_L1_RPC:-${DEFAULT_CERC_L1_RPC}}"
# Read the L2OutputOracle contract address from the deployment
L2OO_DEPLOYMENT=$(cat /contracts-bedrock/deployments/getting-started/L2OutputOracle.json)
L2OO_ADDR=$(echo "$L2OO_DEPLOYMENT" | jq -r '.address')
# Get Proposer key from keys.json
PROPOSER_KEY=$(jq -r '.Proposer.privateKey' /l2-accounts/keys.json | tr -d '"')
cleanup() {
echo "Signal received, cleaning up..."
kill ${proposer_pid}
wait
echo "Done"
}
trap 'cleanup' INT TERM
# Run op-proposer
op-proposer \
--poll-interval 12s \
--rpc.port 8560 \
--rollup-rpc http://op-node:8547 \
--l2oo-address $L2OO_ADDR \
--private-key $PROPOSER_KEY \
--l1-eth-rpc $CERC_L1_RPC \
&
proposer_pid=$!
wait $proposer_pid

View File

@ -1 +0,0 @@
See: https://docs.plugeth.org/

View File

@ -1,18 +0,0 @@
[
{
"id": "0001",
"url": "http://127.0.0.1:8081/",
"basic_auth": {
"username": "",
"password": ""
}
},
{
"id": "0021",
"url": "http://fixturenet-eth-geth-1:8545/",
"basic_auth": {
"username": "",
"password": ""
}
}
]

View File

@ -1,65 +0,0 @@
#!/bin/bash
# TODO: we should have a mechanism to bundle it inside the container rather than link from here
# at deploy time.
CHAINID="pocketlocal-1"
MONIKER="localtestnet"
SERVICE_URL="http://127.0.0.1:8081"
PASSWORD="mypassword" # wallet password, required by cli
# check if jq is installed; install if necessary
# command -v jq > /dev/null 2>&1 || { echo >&2 "jq not installed. More info: https://stedolan.github.io/jq/download/"; exit 1; }
if ! command -v jq > /dev/null 2>&1; then
echo "jq not installed, downloading..."
mkdir -p /home/app/bin
wget -O /home/app/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64
chmod +x /home/app/bin/jq
export PATH=$PATH:/home/app/bin
fi
# remove existing daemon and client
rm -rf ~/.pocket*
# create a wallet with password "mypassword" and save the address for later
address=$(pocket accounts create --pwd $PASSWORD | awk '/Address:/ {print $2}')
# set this address as the validator address for the node
pocket accounts set-validator $address --pwd $PASSWORD
# save the public key for later
pubkey=$(pocket accounts show $address | awk '/Public Key:/ {print $3}')
# set node's moniker
echo $(pocket util print-configs) | jq '.tendermint_config.Moniker = "'"$MONIKER"'"' | jq . > $HOME/.pocket/config/config.json
# pocket mainnet has block time of 15 minutes, set closer to 1 minute instead
cat $HOME/.pocket/config/config.json | jq '.tendermint_config.Consensus.TimeoutPropose = 8000000000' | jq . > $HOME/.pocket/config/tmp_config.json && mv $HOME/.pocket/config/tmp_config.json $HOME/.pocket/config/config.json
cat $HOME/.pocket/config/config.json | jq '.tendermint_config.Consensus.TimeoutProposeDelta = 600000000' | jq . > $HOME/.pocket/config/tmp_config.json && mv $HOME/.pocket/config/tmp_config.json $HOME/.pocket/config/config.json
cat $HOME/.pocket/config/config.json | jq '.tendermint_config.Consensus.TimeoutPrevote = 4000000000' | jq . > $HOME/.pocket/config/tmp_config.json && mv $HOME/.pocket/config/tmp_config.json $HOME/.pocket/config/config.json
cat $HOME/.pocket/config/config.json | jq '.tendermint_config.Consensus.TimeoutPrevoteDelta = 600000000' | jq . > $HOME/.pocket/config/tmp_config.json && mv $HOME/.pocket/config/tmp_config.json $HOME/.pocket/config/config.json
cat $HOME/.pocket/config/config.json | jq '.tendermint_config.Consensus.TimeoutPrecommit = 4000000000' | jq . > $HOME/.pocket/config/tmp_config.json && mv $HOME/.pocket/config/tmp_config.json $HOME/.pocket/config/config.json
cat $HOME/.pocket/config/config.json | jq '.tendermint_config.Consensus.TimeoutPrecommitDelta = 6000000006' | jq . > $HOME/.pocket/config/tmp_config.json && mv $HOME/.pocket/config/tmp_config.json $HOME/.pocket/config/config.json
cat $HOME/.pocket/config/config.json | jq '.tendermint_config.Consensus.TimeoutCommit = 52000000000' | jq . > $HOME/.pocket/config/tmp_config.json && mv $HOME/.pocket/config/tmp_config.json $HOME/.pocket/config/config.json
cat $HOME/.pocket/config/config.json | jq '.tendermint_config.Consensus.CreateEmptyBlocksInterval = 60000000000' | jq . > $HOME/.pocket/config/tmp_config.json && mv $HOME/.pocket/config/tmp_config.json $HOME/.pocket/config/config.json
cat $HOME/.pocket/config/config.json | jq '.tendermint_config.Consensus.PeerGossipSleepDuration = 2000000000' | jq . > $HOME/.pocket/config/tmp_config.json && mv $HOME/.pocket/config/tmp_config.json $HOME/.pocket/config/config.json
cat $HOME/.pocket/config/config.json | jq '.tendermint_config.Consensus.PeerQueryMaj23SleepDuration = 1200000000' | jq . > $HOME/.pocket/config/tmp_config.json && mv $HOME/.pocket/config/tmp_config.json $HOME/.pocket/config/config.json
# include genesis.json and chains.json
cp $HOME/pocket-configs/genesis.json $HOME/.pocket/config/genesis.json
cp $HOME/pocket-configs/chains.json $HOME/.pocket/config/chains.json
# set chain-id and add node to genesis.json as a validator
cat $HOME/.pocket/config/genesis.json | jq '.chain_id="'"$CHAINID"'"' > $HOME/.pocket/config/tmp_genesis.json && mv $HOME/.pocket/config/tmp_genesis.json $HOME/.pocket/config/genesis.json
cat $HOME/.pocket/config/genesis.json | jq '.app_state.auth.accounts[0].value.address="'"$address"'"' > $HOME/.pocket/config/tmp_genesis.json && mv $HOME/.pocket/config/tmp_genesis.json $HOME/.pocket/config/genesis.json
cat $HOME/.pocket/config/genesis.json | jq '.app_state.auth.accounts[0].value.public_key.value="'"$pubkey"'"' > $HOME/.pocket/config/tmp_genesis.json && mv $HOME/.pocket/config/tmp_genesis.json $HOME/.pocket/config/genesis.json
cat $HOME/.pocket/config/genesis.json | jq '.app_state.pos.validators[0].address="'"$address"'"' > $HOME/.pocket/config/tmp_genesis.json && mv $HOME/.pocket/config/tmp_genesis.json $HOME/.pocket/config/genesis.json
cat $HOME/.pocket/config/genesis.json | jq '.app_state.pos.validators[0].public_key="'"$pubkey"'"' > $HOME/.pocket/config/tmp_genesis.json && mv $HOME/.pocket/config/tmp_genesis.json $HOME/.pocket/config/genesis.json
cat $HOME/.pocket/config/genesis.json | jq '.app_state.pos.validators[0].service_url="'"$SERVICE_URL"'"' > $HOME/.pocket/config/tmp_genesis.json && mv $HOME/.pocket/config/tmp_genesis.json $HOME/.pocket/config/genesis.json
# if [[ $1 == "pending" ]]; then
# echo "pending mode is on, please wait for the first block committed."
# fi
# Start the node
pocket start --simulateRelay

View File

@ -1,272 +0,0 @@
{
"genesis_time": "2020-07-28T15:00:00.000000Z",
"chain_id": "testnet",
"consensus_params": {
"block": {
"max_bytes": "4000000",
"max_gas": "-1",
"time_iota_ms": "1"
},
"evidence": {
"max_age": "120000000000"
},
"validator": {
"pub_key_types": [
"ed25519"
]
}
},
"app_hash": "",
"app_state": {
"application": {
"params": {
"unstaking_time": "1814000000000000",
"max_applications": "9223372036854775807",
"app_stake_minimum": "1000000",
"base_relays_per_pokt": "167",
"stability_adjustment": "0",
"participation_rate_on": false,
"maximum_chains": "15"
},
"applications": [],
"exported": false
},
"auth": {
"params": {
"max_memo_characters": "75",
"tx_sig_limit": "8",
"fee_multipliers": {
"fee_multiplier": [],
"default": "1"
}
},
"accounts": [
{
"type": "posmint/Account",
"value": {
"address": "!validator-address",
"coins": [
{
"amount": "0",
"denom": "upokt"
}
],
"public_key": {
"type": "crypto/ed25519_public_key",
"value": "!validator-pubkey"
}
}
}
],
"supply": []
},
"gov": {
"params": {
"acl": [
{
"acl_key": "application/ApplicationStakeMinimum",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "application/AppUnstakingTime",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "application/BaseRelaysPerPOKT",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "application/MaxApplications",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "application/MaximumChains",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "application/ParticipationRateOn",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "application/StabilityAdjustment",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "auth/MaxMemoCharacters",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "auth/TxSigLimit",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "gov/acl",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "gov/daoOwner",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "gov/upgrade",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pocketcore/ClaimExpiration",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "auth/FeeMultipliers",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pocketcore/ReplayAttackBurnMultiplier",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/ProposerPercentage",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pocketcore/ClaimSubmissionWindow",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pocketcore/MinimumNumberOfProofs",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pocketcore/SessionNodeCount",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pocketcore/SupportedBlockchains",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/BlocksPerSession",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/DAOAllocation",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/DowntimeJailDuration",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/MaxEvidenceAge",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/MaximumChains",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/MaxJailedBlocks",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/MaxValidators",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/MinSignedPerWindow",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/RelaysToTokensMultiplier",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/SignedBlocksWindow",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/SlashFractionDoubleSign",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/SlashFractionDowntime",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/StakeDenom",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/StakeMinimum",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
},
{
"acl_key": "pos/UnstakingTime",
"address": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4"
}
],
"dao_owner": "a83172b67b5ffbfcb8acb95acc0fd0466a9d4bc4",
"upgrade": {
"Height": "0",
"Version": "0"
}
},
"DAO_Tokens": "50000000000000"
},
"pos": {
"params": {
"relays_to_tokens_multiplier": "10000",
"unstaking_time": "1814000000000000",
"max_validators": "5000",
"stake_denom": "upokt",
"stake_minimum": "15000000000",
"session_block_frequency": "4",
"dao_allocation": "10",
"proposer_allocation": "1",
"maximum_chains": "15",
"max_jailed_blocks": "37960",
"max_evidence_age": "120000000000",
"signed_blocks_window": "10",
"min_signed_per_window": "0.60",
"downtime_jail_duration": "3600000000000",
"slash_fraction_double_sign": "0.05",
"slash_fraction_downtime": "0.000001"
},
"prevState_total_power": "0",
"prevState_validator_powers": null,
"validators": [
{
"address": "!validator-address",
"public_key": "!validator-pubkey",
"jailed": false,
"status": 2,
"tokens": "5000000000000",
"service_url": "!validator-url",
"chains": [
"0001",
"0021"
],
"unstaking_time": "2021-05-15T00:00:00Z"
}
],
"exported": false,
"signing_infos": {},
"missed_blocks": {},
"previous_proposer": ""
},
"pocketcore": {
"params": {
"session_node_count": "5",
"proof_waiting_period": "3",
"supported_blockchains": [
"0001",
"0021"
],
"claim_expiration": "120",
"replay_attack_burn_multiplier": "3",
"minimum_number_of_proofs": "10"
},
"receipts": null,
"claims": null
}
}
}

View File

@ -1,2 +0,0 @@
[profile.default]
eth-rpc-url = "http://fixturenet-eth-geth-1:8545"

View File

@ -1,182 +0,0 @@
#!/usr/bin/env bash
# Use this script to test if a given TCP host/port are available
WAITFORIT_cmdname=${0##*/}
echoerr() { if [[ $WAITFORIT_QUIET -ne 1 ]]; then echo "$@" 1>&2; fi }
usage()
{
cat << USAGE >&2
Usage:
$WAITFORIT_cmdname host:port [-s] [-t timeout] [-- command args]
-h HOST | --host=HOST Host or IP under test
-p PORT | --port=PORT TCP port under test
Alternatively, you specify the host and port as host:port
-s | --strict Only execute subcommand if the test succeeds
-q | --quiet Don't output any status messages
-t TIMEOUT | --timeout=TIMEOUT
Timeout in seconds, zero for no timeout
-- COMMAND ARGS Execute command with args after the test finishes
USAGE
exit 1
}
wait_for()
{
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
echoerr "$WAITFORIT_cmdname: waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
else
echoerr "$WAITFORIT_cmdname: waiting for $WAITFORIT_HOST:$WAITFORIT_PORT without a timeout"
fi
WAITFORIT_start_ts=$(date +%s)
while :
do
if [[ $WAITFORIT_ISBUSY -eq 1 ]]; then
nc -z $WAITFORIT_HOST $WAITFORIT_PORT
WAITFORIT_result=$?
else
(echo -n > /dev/tcp/$WAITFORIT_HOST/$WAITFORIT_PORT) >/dev/null 2>&1
WAITFORIT_result=$?
fi
if [[ $WAITFORIT_result -eq 0 ]]; then
WAITFORIT_end_ts=$(date +%s)
echoerr "$WAITFORIT_cmdname: $WAITFORIT_HOST:$WAITFORIT_PORT is available after $((WAITFORIT_end_ts - WAITFORIT_start_ts)) seconds"
break
fi
sleep 1
done
return $WAITFORIT_result
}
wait_for_wrapper()
{
# In order to support SIGINT during timeout: http://unix.stackexchange.com/a/57692
if [[ $WAITFORIT_QUIET -eq 1 ]]; then
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --quiet --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
else
timeout $WAITFORIT_BUSYTIMEFLAG $WAITFORIT_TIMEOUT $0 --child --host=$WAITFORIT_HOST --port=$WAITFORIT_PORT --timeout=$WAITFORIT_TIMEOUT &
fi
WAITFORIT_PID=$!
trap "kill -INT -$WAITFORIT_PID" INT
wait $WAITFORIT_PID
WAITFORIT_RESULT=$?
if [[ $WAITFORIT_RESULT -ne 0 ]]; then
echoerr "$WAITFORIT_cmdname: timeout occurred after waiting $WAITFORIT_TIMEOUT seconds for $WAITFORIT_HOST:$WAITFORIT_PORT"
fi
return $WAITFORIT_RESULT
}
# process arguments
while [[ $# -gt 0 ]]
do
case "$1" in
*:* )
WAITFORIT_hostport=(${1//:/ })
WAITFORIT_HOST=${WAITFORIT_hostport[0]}
WAITFORIT_PORT=${WAITFORIT_hostport[1]}
shift 1
;;
--child)
WAITFORIT_CHILD=1
shift 1
;;
-q | --quiet)
WAITFORIT_QUIET=1
shift 1
;;
-s | --strict)
WAITFORIT_STRICT=1
shift 1
;;
-h)
WAITFORIT_HOST="$2"
if [[ $WAITFORIT_HOST == "" ]]; then break; fi
shift 2
;;
--host=*)
WAITFORIT_HOST="${1#*=}"
shift 1
;;
-p)
WAITFORIT_PORT="$2"
if [[ $WAITFORIT_PORT == "" ]]; then break; fi
shift 2
;;
--port=*)
WAITFORIT_PORT="${1#*=}"
shift 1
;;
-t)
WAITFORIT_TIMEOUT="$2"
if [[ $WAITFORIT_TIMEOUT == "" ]]; then break; fi
shift 2
;;
--timeout=*)
WAITFORIT_TIMEOUT="${1#*=}"
shift 1
;;
--)
shift
WAITFORIT_CLI=("$@")
break
;;
--help)
usage
;;
*)
echoerr "Unknown argument: $1"
usage
;;
esac
done
if [[ "$WAITFORIT_HOST" == "" || "$WAITFORIT_PORT" == "" ]]; then
echoerr "Error: you need to provide a host and port to test."
usage
fi
WAITFORIT_TIMEOUT=${WAITFORIT_TIMEOUT:-15}
WAITFORIT_STRICT=${WAITFORIT_STRICT:-0}
WAITFORIT_CHILD=${WAITFORIT_CHILD:-0}
WAITFORIT_QUIET=${WAITFORIT_QUIET:-0}
# Check to see if timeout is from busybox?
WAITFORIT_TIMEOUT_PATH=$(type -p timeout)
WAITFORIT_TIMEOUT_PATH=$(realpath $WAITFORIT_TIMEOUT_PATH 2>/dev/null || readlink -f $WAITFORIT_TIMEOUT_PATH)
WAITFORIT_BUSYTIMEFLAG=""
if [[ $WAITFORIT_TIMEOUT_PATH =~ "busybox" ]]; then
WAITFORIT_ISBUSY=1
# Check if busybox timeout uses -t flag
# (recent Alpine versions don't support -t anymore)
if timeout &>/dev/stdout | grep -q -e '-t '; then
WAITFORIT_BUSYTIMEFLAG="-t"
fi
else
WAITFORIT_ISBUSY=0
fi
if [[ $WAITFORIT_CHILD -gt 0 ]]; then
wait_for
WAITFORIT_RESULT=$?
exit $WAITFORIT_RESULT
else
if [[ $WAITFORIT_TIMEOUT -gt 0 ]]; then
wait_for_wrapper
WAITFORIT_RESULT=$?
else
wait_for
WAITFORIT_RESULT=$?
fi
fi
if [[ $WAITFORIT_CLI != "" ]]; then
if [[ $WAITFORIT_RESULT -ne 0 && $WAITFORIT_STRICT -eq 1 ]]; then
echoerr "$WAITFORIT_cmdname: strict mode, refusing to execute subprocess"
exit $WAITFORIT_RESULT
fi
exec "${WAITFORIT_CLI[@]}"
else
exit $WAITFORIT_RESULT
fi

View File

@ -1,34 +0,0 @@
[
{
"endpoint": "http://azimuth-watcher-server:3001/graphql",
"prefix": "azimuth"
},
{
"endpoint": "http://censures-watcher-server:3002/graphql",
"prefix": "censures"
},
{
"endpoint": "http://claims-watcher-server:3003/graphql",
"prefix": "claims"
},
{
"endpoint": "http://conditional-star-release-watcher-server:3004/graphql",
"prefix": "conditionalStarRelease"
},
{
"endpoint": "http://delegated-sending-watcher-server:3005/graphql",
"prefix": "delegatedSending"
},
{
"endpoint": "http://ecliptic-watcher-server:3006/graphql",
"prefix": "ecliptic"
},
{
"endpoint": "http://linear-star-release-watcher-server:3007/graphql",
"prefix": "linearStarRelease"
},
{
"endpoint": "http://polls-watcher-server:3008/graphql",
"prefix": "polls"
}
]

View File

@ -1,31 +0,0 @@
const fs = require('fs');
const tomlJS = require('toml-js');
const toml = require('toml');
const { merge } = require('lodash')
const main = () => {
const overrideConfigString = fs.readFileSync('environments/watcher-config.toml', 'utf-8');
const configString = fs.readFileSync('environments/local.toml', 'utf-8');
const overrideConfig = toml.parse(overrideConfigString)
const config = toml.parse(configString)
// Merge configs
const updatedConfig = merge(config, overrideConfig);
// Form dbConnectionString for jobQueue DB
const parts = config.jobQueue.dbConnectionString.split("://");
const credsAndDB = parts[1].split("@");
const creds = credsAndDB[0].split(":");
creds[0] = overrideConfig.database.username;
creds[1] = overrideConfig.database.password;
credsAndDB[0] = creds.join(":");
const dbName = credsAndDB[1].split("/")[1]
credsAndDB[1] = [overrideConfig.database.host, dbName].join("/");
parts[1] = credsAndDB.join("@");
updatedConfig.jobQueue.dbConnectionString = parts.join("://");
fs.writeFileSync('environments/local.toml', tomlJS.dump(updatedConfig), 'utf-8');
}
main();

View File

@ -1,27 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_IPLD_ETH_RPC="${CERC_IPLD_ETH_RPC:-${DEFAULT_CERC_IPLD_ETH_RPC}}"
CERC_IPLD_ETH_GQL="${CERC_IPLD_ETH_GQL:-${DEFAULT_CERC_IPLD_ETH_GQL}}"
echo "Using IPLD ETH RPC endpoint ${CERC_IPLD_ETH_RPC}"
echo "Using IPLD GQL endpoint ${CERC_IPLD_ETH_GQL}"
# Replace env variables in template TOML file
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}|g; \
s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/watcher-config.toml
# Merge SO watcher config with existing config file
node merge-toml.js
echo 'yarn server'
yarn server

View File

@ -1,14 +0,0 @@
[server]
host = "0.0.0.0"
maxSimultaneousRequests = -1
[database]
host = "watcher-db"
port = 5432
username = "vdbm"
password = "password"
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL"
rpcProviderEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_RPC"

View File

@ -1,5 +0,0 @@
# Defaults
# ipld-eth-server endpoints
DEFAULT_CERC_IPLD_ETH_RPC=
DEFAULT_CERC_IPLD_ETH_GQL=

View File

@ -1,28 +0,0 @@
#!/bin/bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_SNAPSHOT_GQL_ENDPOINT="${CERC_SNAPSHOT_GQL_ENDPOINT:-${DEFAULT_CERC_SNAPSHOT_GQL_ENDPOINT}}"
CERC_SNAPSHOT_BLOCKHASH="${CERC_SNAPSHOT_BLOCKHASH:-${DEFAULT_CERC_SNAPSHOT_BLOCKHASH}}"
CHECKPOINT_FILE_PATH="./state_checkpoint/state-gql-${CERC_SNAPSHOT_BLOCKHASH}"
if [ -f "${CHECKPOINT_FILE_PATH}" ]; then
# Skip checkpoint creation if the file already exists
echo "File at ${CHECKPOINT_FILE_PATH} already exists, skipping checkpoint creation..."
else
# Create a checkpoint using GQL endpoint
echo "Creating a state checkpoint using GQL endpoint..."
yarn create-state-gql \
--snapshot-block-hash "${CERC_SNAPSHOT_BLOCKHASH}" \
--gql-endpoint "${CERC_SNAPSHOT_GQL_ENDPOINT}" \
--output "${CHECKPOINT_FILE_PATH}"
fi
echo "Initializing watcher using a state snapshot..."
# Import the state checkpoint
# (skips if snapshot block is already indexed)
yarn import-state --import-file "${CHECKPOINT_FILE_PATH}"

View File

@ -1,23 +0,0 @@
#!/bin/bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_IPLD_ETH_RPC="${CERC_IPLD_ETH_RPC:-${DEFAULT_CERC_IPLD_ETH_RPC}}"
CERC_IPLD_ETH_GQL="${CERC_IPLD_ETH_GQL:-${DEFAULT_CERC_IPLD_ETH_GQL}}"
echo "Using ETH server RPC endpoint ${CERC_IPLD_ETH_RPC}"
echo "Using ETH server GQL endpoint ${CERC_IPLD_ETH_GQL}"
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \
s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
echo "Running job-runner"
DEBUG=vulcanize:* exec node --enable-source-maps dist/job-runner.js

View File

@ -1,32 +0,0 @@
#!/bin/bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_IPLD_ETH_RPC="${CERC_IPLD_ETH_RPC:-${DEFAULT_CERC_IPLD_ETH_RPC}}"
CERC_IPLD_ETH_GQL="${CERC_IPLD_ETH_GQL:-${DEFAULT_CERC_IPLD_ETH_GQL}}"
CERC_USE_STATE_SNAPSHOT="${CERC_USE_STATE_SNAPSHOT:-${DEFAULT_CERC_USE_STATE_SNAPSHOT}}"
echo "Using ETH server RPC endpoint ${CERC_IPLD_ETH_RPC}"
echo "Using ETH server GQL endpoint ${CERC_IPLD_ETH_GQL}"
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
sed -E "s|REPLACE_WITH_CERC_IPLD_ETH_GQL|${CERC_IPLD_ETH_GQL}|g; \
s|REPLACE_WITH_CERC_IPLD_ETH_RPC|${CERC_IPLD_ETH_RPC}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
if [ "$CERC_USE_STATE_SNAPSHOT" = true ] ; then
./create-and-import-checkpoint.sh
else
echo "Initializing watcher using fill..."
yarn fill --start-block $DEFAULT_CERC_GELATO_START_BLOCK --end-block $DEFAULT_CERC_GELATO_START_BLOCK
fi
echo "Running active server"
DEBUG=vulcanize:* exec node --enable-source-maps dist/server.js

View File

@ -1,75 +0,0 @@
[server]
host = "0.0.0.0"
port = 3008
kind = "active"
# Checkpointing state.
checkpointing = true
# Checkpoint interval in number of blocks.
checkpointInterval = 2000
# Enable state creation
# CAUTION: Disable only if state creation is not desired or can be filled subsequently
enableState = true
subgraphPath = "./subgraph"
# Interval to restart wasm instance periodically
wasmRestartBlocksInterval = 20
# Interval in number of blocks at which to clear entities cache.
clearEntitiesCacheInterval = 1000
# Boolean to filter logs by contract.
filterLogs = true
# Max block range for which to return events in eventsInRange GQL query.
# Use -1 for skipping check on block range.
maxEventsBlockRange = 1000
# GQL cache settings
[server.gqlCache]
enabled = true
# Max in-memory cache size (in bytes) (default 8 MB)
# maxCacheSize
# GQL cache-control max-age settings (in seconds)
maxAge = 15
timeTravelMaxAge = 86400 # 1 day
[metrics]
host = "0.0.0.0"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "gelato-watcher-db"
port = 5432
database = "gelato-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_GQL"
rpcProviderEndpoint = "REPLACE_WITH_CERC_IPLD_ETH_RPC"
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@gelato-watcher-db/gelato-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 100
eventsInBatch = 50
blockDelayInMilliSecs = 2000
prefetchBlocksInMem = true
prefetchBlockCount = 10

View File

@ -1,13 +0,0 @@
# ipld-eth-server endpoints
DEFAULT_CERC_IPLD_ETH_RPC="http://ipld-eth-server:8082"
DEFAULT_CERC_IPLD_ETH_GQL="http://ipld-eth-server:8083/graphql"
# Gelato start block
DEFAULT_CERC_GELATO_START_BLOCK=11361987
# Whether to use a state snapshot to initialize the watcher
DEFAULT_CERC_USE_STATE_SNAPSHOT=false
# State snapshot params
DEFAULT_CERC_SNAPSHOT_GQL_ENDPOINT=
DEFAULT_CERC_SNAPSHOT_BLOCKHASH=

View File

@ -1,89 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L2_GETH_RPC="${CERC_L2_GETH_RPC:-${DEFAULT_CERC_L2_GETH_RPC}}"
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
CERC_MOBYMASK_APP_BASE_URI="${CERC_MOBYMASK_APP_BASE_URI:-${DEFAULT_CERC_MOBYMASK_APP_BASE_URI}}"
CERC_DEPLOYED_CONTRACT="${CERC_DEPLOYED_CONTRACT:-${DEFAULT_CERC_DEPLOYED_CONTRACT}}"
# Check if CERC_DEPLOYED_CONTRACT environment variable set to skip contract deployment
if [ -n "$CERC_DEPLOYED_CONTRACT" ]; then
echo "CERC_DEPLOYED_CONTRACT is set to '$CERC_DEPLOYED_CONTRACT'"
echo "Skipping contract deployment"
exit 0
fi
echo "Using L2 RPC endpoint ${CERC_L2_GETH_RPC}"
if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \
l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \
[ "$l1_accounts_response" -eq 200 ];
then
echo "Fetching L1 account credentials using provided URL"
mkdir -p /geth-accounts
wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL"
# Read the private key of an L1 account to deploy contract
CERC_PRIVATE_KEY_DEPLOYER=$(head -n 1 /geth-accounts/accounts.csv | cut -d ',' -f 3)
else
echo "Couldn't fetch L1 account credentials, using CERC_PRIVATE_KEY_DEPLOYER from env"
fi
# Set the private key
jq --arg privateKey "$CERC_PRIVATE_KEY_DEPLOYER" '.privateKey = $privateKey' secrets-template.json > secrets.json
# Set the RPC URL
jq --arg rpcUrl "$CERC_L2_GETH_RPC" '.rpcUrl = $rpcUrl' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json
# Set the MobyMask app base URI
jq --arg baseURI "$CERC_MOBYMASK_APP_BASE_URI" '.baseURI = $baseURI' secrets.json > secrets_updated.json && mv secrets_updated.json secrets.json
# Wait for L2 Optimism Geth and Node servers to be up before deploying contract
CERC_L2_GETH_HOST="${CERC_L2_GETH_HOST:-${DEFAULT_CERC_L2_GETH_HOST}}"
CERC_L2_GETH_PORT="${CERC_L2_GETH_PORT:-${DEFAULT_CERC_L2_GETH_PORT}}"
CERC_L2_NODE_HOST="${CERC_L2_NODE_HOST:-${DEFAULT_CERC_L2_NODE_HOST}}"
CERC_L2_NODE_PORT="${CERC_L2_NODE_PORT:-${DEFAULT_CERC_L2_NODE_PORT}}"
./wait-for-it.sh -h "${CERC_L2_GETH_HOST}" -p "${CERC_L2_GETH_PORT}" -s -t 0
./wait-for-it.sh -h "${CERC_L2_NODE_HOST}" -p "${CERC_L2_NODE_PORT}" -s -t 0
export RPC_URL="${CERC_L2_GETH_RPC}"
# Check and exit if a deployment already exists (on restarts)
if [ -f ./config.json ]; then
echo "config.json already exists, checking the contract deployment"
# Read JSON file
DEPLOYMENT_DETAILS=$(cat config.json)
CONTRACT_ADDRESS=$(echo "$DEPLOYMENT_DETAILS" | jq -r '.address')
cd ../hardhat
if yarn verifyDeployment --network optimism --contract "${CONTRACT_ADDRESS}"; then
echo "Deployment verfication successful"
cd ../server
else
echo "Deployment verfication failed, please clear MobyMask deployment volume before starting"
exit 1
fi
fi
# Wait until balance for deployer account is updated
cd ../hardhat
while true; do
ACCOUNT_BALANCE=$(yarn balance --network optimism "$CERC_PRIVATE_KEY_DEPLOYER" | grep ETH)
if [ "$ACCOUNT_BALANCE" != "0.0 ETH" ]; then
echo "Account balance updated: $ACCOUNT_BALANCE"
break # exit the loop
fi
echo "Account balance not updated: $ACCOUNT_BALANCE"
echo "Checking after 2 seconds"
sleep 2
done
cd ../server
npm run deployAndGenerateInvite

View File

@ -1,20 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
# Check for peer ids in ./peers folder, create if not present
if [ -f /peer-ids/relay-id.json ]; then
echo "Using peer id for relay node from the mounted volume"
else
echo "Creating a new peer id for relay node"
yarn create-peer -f /peer-ids/relay-id.json
fi
if [ -f /peer-ids/peer-id.json ]; then
echo "Using peer id for peer node from the mounted volume"
else
echo "Creating a new peer id for peer node"
yarn create-peer -f /peer-ids/peer-id.json
fi

View File

@ -1,43 +0,0 @@
#!/usr/bin/env bash
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_CHAIN_ID="${CERC_CHAIN_ID:-${DEFAULT_CERC_CHAIN_ID}}"
CERC_DEPLOYED_CONTRACT="${CERC_DEPLOYED_CONTRACT:-${DEFAULT_CERC_DEPLOYED_CONTRACT}}"
CERC_RELAY_NODES="${CERC_RELAY_NODES:-${DEFAULT_CERC_RELAY_NODES}}"
CERC_DENY_MULTIADDRS="${CERC_DENY_MULTIADDRS:-${DEFAULT_CERC_DENY_MULTIADDRS}}"
CERC_APP_WATCHER_URL="${CERC_APP_WATCHER_URL:-${DEFAULT_CERC_APP_WATCHER_URL}}"
# If not set (or []), check the mounted volume for relay peer id
if [ -z "$CERC_RELAY_NODES" ] || [ "$CERC_RELAY_NODES" = "[]" ]; then
echo "CERC_RELAY_NODES not provided, taking from the mounted volume"
CERC_RELAY_NODES="[\"/ip4/127.0.0.1/tcp/9090/ws/p2p/$(jq -r '.id' /peers/relay-id.json)\"]"
fi
echo "Using CERC_RELAY_NODES $CERC_RELAY_NODES"
if [ -z "$CERC_DEPLOYED_CONTRACT" ]; then
# Use config from mounted volume (when running web-app along with watcher stack)
echo "Taking config for deployed contract from mounted volume"
while [ ! -f /server/config.json ]; do
echo "Config not found, retrying after 5 seconds"
sleep 5
done
# Get deployed contract address and chain id
CERC_DEPLOYED_CONTRACT=$(jq -r '.address' /server/config.json | tr -d '"')
CERC_CHAIN_ID=$(jq -r '.chainId' /server/config.json)
else
echo "Taking deployed contract details from env"
fi
# Use yq to create config.yml with environment variables
yq -n ".address = env(CERC_DEPLOYED_CONTRACT)" > /config/config.yml
yq ".watcherUrl = env(CERC_APP_WATCHER_URL)" -i /config/config.yml
yq ".chainId = env(CERC_CHAIN_ID)" -i /config/config.yml
yq ".relayNodes = strenv(CERC_RELAY_NODES)" -i /config/config.yml
yq ".denyMultiaddrs = strenv(CERC_DENY_MULTIADDRS)" -i /config/config.yml
/scripts/start-serving-app.sh

View File

@ -1,29 +0,0 @@
# Defaults
# Watcher endpoint
DEFAULT_CERC_APP_WATCHER_URL="http://localhost:3001"
# Set of relay peers to connect to from the relay node
DEFAULT_CERC_RELAY_PEERS=[]
# Domain to be used in the relay node's announce address
DEFAULT_CERC_RELAY_ANNOUNCE_DOMAIN=
# Base URI for mobymask-app (used for generating invite)
DEFAULT_CERC_MOBYMASK_APP_BASE_URI="http://127.0.0.1:3002/#"
# Set to false for disabling watcher peer to send txs to L2
DEFAULT_CERC_ENABLE_PEER_L2_TXS=true
# Set deployed MobyMask contract address to avoid deploying contract in stack
# mobymask-app will use this contract address in config if run separately
DEFAULT_CERC_DEPLOYED_CONTRACT=
# Chain ID is used by mobymask web-app for txs
DEFAULT_CERC_CHAIN_ID=42069
# Set of relay nodes to be used by web-apps
DEFAULT_CERC_RELAY_NODES=[]
# Set of multiaddrs to be avoided while dialling
DEFAULT_CERC_DENY_MULTIADDRS=[]

View File

@ -1,14 +0,0 @@
# Defaults
# L2 endpoints
DEFAULT_CERC_L2_GETH_RPC="http://op-geth:8545"
# Endpoints waited on before contract deployment
DEFAULT_CERC_L2_GETH_HOST="op-geth"
DEFAULT_CERC_L2_GETH_PORT=8545
DEFAULT_CERC_L2_NODE_HOST="op-node"
DEFAULT_CERC_L2_NODE_PORT=8547
# URL to get CSV with credentials for accounts on L1 to perform txs on L2
DEFAULT_CERC_L1_ACCOUNTS_CSV_URL="http://fixturenet-eth-bootnode-geth:9898/accounts.csv"

View File

@ -1,5 +0,0 @@
{
"rpcUrl": "",
"privateKey": "",
"baseURI": ""
}

View File

@ -1,10 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_RELAY_MULTIADDR="/dns4/mobymask-watcher-server/tcp/9090/ws/p2p/$(jq -r '.id' /peer-ids/relay-id.json)"
# Write the relay node's multiaddr to /app/packages/peer/.env for running tests
echo "RELAY=\"$CERC_RELAY_MULTIADDR\"" > ./.env

View File

@ -1,64 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_L2_GETH_RPC="${CERC_L2_GETH_RPC:-${DEFAULT_CERC_L2_GETH_RPC}}"
CERC_L1_ACCOUNTS_CSV_URL="${CERC_L1_ACCOUNTS_CSV_URL:-${DEFAULT_CERC_L1_ACCOUNTS_CSV_URL}}"
CERC_RELAY_PEERS="${CERC_RELAY_PEERS:-${DEFAULT_CERC_RELAY_PEERS}}"
CERC_DENY_MULTIADDRS="${CERC_DENY_MULTIADDRS:-${DEFAULT_CERC_DENY_MULTIADDRS}}"
CERC_RELAY_ANNOUNCE_DOMAIN="${CERC_RELAY_ANNOUNCE_DOMAIN:-${DEFAULT_CERC_RELAY_ANNOUNCE_DOMAIN}}"
CERC_ENABLE_PEER_L2_TXS="${CERC_ENABLE_PEER_L2_TXS:-${DEFAULT_CERC_ENABLE_PEER_L2_TXS}}"
CERC_DEPLOYED_CONTRACT="${CERC_DEPLOYED_CONTRACT:-${DEFAULT_CERC_DEPLOYED_CONTRACT}}"
echo "Using L2 RPC endpoint ${CERC_L2_GETH_RPC}"
# Use public domain for relay multiaddr in peer config if specified
# Otherwise, use the docker container's host IP
if [ -n "$CERC_RELAY_ANNOUNCE_DOMAIN" ]; then
CERC_RELAY_MULTIADDR="/dns4/${CERC_RELAY_ANNOUNCE_DOMAIN}/tcp/443/wss/p2p/$(jq -r '.id' /app/peers/relay-id.json)"
else
CERC_RELAY_MULTIADDR="/dns4/mobymask-watcher-server/tcp/9090/ws/p2p/$(jq -r '.id' /app/peers/relay-id.json)"
fi
# Use contract address from environment variable or set from config.json in mounted volume
if [ -n "$CERC_DEPLOYED_CONTRACT" ]; then
CONTRACT_ADDRESS="${CERC_DEPLOYED_CONTRACT}"
else
# Assign deployed contract address from server config (created by mobymask container after deploying contract)
CONTRACT_ADDRESS=$(jq -r '.address' /server/config.json | tr -d '"')
fi
if [ -n "$CERC_L1_ACCOUNTS_CSV_URL" ] && \
l1_accounts_response=$(curl -L --write-out '%{http_code}' --silent --output /dev/null "$CERC_L1_ACCOUNTS_CSV_URL") && \
[ "$l1_accounts_response" -eq 200 ];
then
echo "Fetching L1 account credentials using provided URL"
mkdir -p /geth-accounts
wget -O /geth-accounts/accounts.csv "$CERC_L1_ACCOUNTS_CSV_URL"
# Read the private key of an L1 account for sending txs from peer
CERC_PRIVATE_KEY_PEER=$(awk -F, 'NR==2{print $NF}' /geth-accounts/accounts.csv)
else
echo "Couldn't fetch L1 account credentials, using CERC_PRIVATE_KEY_PEER from env"
fi
# Read in the config template TOML file and modify it
WATCHER_CONFIG_TEMPLATE=$(cat environments/watcher-config-template.toml)
WATCHER_CONFIG=$(echo "$WATCHER_CONFIG_TEMPLATE" | \
sed -E "s|REPLACE_WITH_CERC_RELAY_PEERS|${CERC_RELAY_PEERS}|g; \
s|REPLACE_WITH_CERC_DENY_MULTIADDRS|${CERC_DENY_MULTIADDRS}|g; \
s/REPLACE_WITH_CERC_RELAY_ANNOUNCE_DOMAIN/${CERC_RELAY_ANNOUNCE_DOMAIN}/g; \
s|REPLACE_WITH_CERC_RELAY_MULTIADDR|${CERC_RELAY_MULTIADDR}|g; \
s/REPLACE_WITH_CERC_ENABLE_PEER_L2_TXS/${CERC_ENABLE_PEER_L2_TXS}/g; \
s/REPLACE_WITH_CERC_PRIVATE_KEY_PEER/${CERC_PRIVATE_KEY_PEER}/g; \
s/REPLACE_WITH_CONTRACT_ADDRESS/${CONTRACT_ADDRESS}/g; \
s|REPLACE_WITH_CERC_L2_GETH_RPC_ENDPOINT|${CERC_L2_GETH_RPC}| ")
# Write the modified content to a new file
echo "$WATCHER_CONFIG" > environments/local.toml
echo 'yarn server'
yarn server

View File

@ -1,7 +0,0 @@
{
"relayNodes": [],
"peer": {
"denyMultiaddrs": [],
"enableDebugInfo": true
}
}

View File

@ -1,22 +0,0 @@
#!/bin/sh
set -e
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
CERC_RELAY_NODES="${CERC_RELAY_NODES:-${DEFAULT_CERC_RELAY_NODES}}"
CERC_DENY_MULTIADDRS="${CERC_DENY_MULTIADDRS:-${DEFAULT_CERC_DENY_MULTIADDRS}}"
# If not set (or []), check the mounted volume for relay peer id
if [ -z "$CERC_RELAY_NODES" ] || [ "$CERC_RELAY_NODES" = "[]" ]; then
echo "CERC_RELAY_NODES not provided, taking from the mounted volume"
CERC_RELAY_NODES="[\"/ip4/127.0.0.1/tcp/9090/ws/p2p/$(jq -r '.id' /peers/relay-id.json)\"]"
fi
echo "Using CERC_RELAY_NODES $CERC_RELAY_NODES"
# Use yq to create config.yml with environment variables
yq -n ".relayNodes = strenv(CERC_RELAY_NODES)" > /config/config.yml
yq ".denyMultiaddrs = strenv(CERC_DENY_MULTIADDRS)" -i /config/config.yml
/scripts/start-serving-app.sh

View File

@ -1,78 +0,0 @@
[server]
host = "0.0.0.0"
port = 3001
kind = "lazy"
# Checkpointing state.
checkpointing = true
# Checkpoint interval in number of blocks.
checkpointInterval = 2000
# Enable state creation
enableState = true
# Boolean to filter logs by contract.
filterLogs = true
# Max block range for which to return events in eventsInRange GQL query.
# Use -1 for skipping check on block range.
maxEventsBlockRange = -1
[server.p2p]
enableRelay = true
enablePeer = true
[server.p2p.relay]
host = "0.0.0.0"
port = 9090
relayPeers = REPLACE_WITH_CERC_RELAY_PEERS
denyMultiaddrs = REPLACE_WITH_CERC_DENY_MULTIADDRS
peerIdFile = './peers/relay-id.json'
announce = 'REPLACE_WITH_CERC_RELAY_ANNOUNCE_DOMAIN'
enableDebugInfo = true
[server.p2p.peer]
relayMultiaddr = 'REPLACE_WITH_CERC_RELAY_MULTIADDR'
pubSubTopic = 'mobymask'
denyMultiaddrs = REPLACE_WITH_CERC_DENY_MULTIADDRS
peerIdFile = './peers/peer-id.json'
enableDebugInfo = true
enableL2Txs = REPLACE_WITH_CERC_ENABLE_PEER_L2_TXS
[server.p2p.peer.l2TxsConfig]
privateKey = 'REPLACE_WITH_CERC_PRIVATE_KEY_PEER'
contractAddress = 'REPLACE_WITH_CONTRACT_ADDRESS'
[metrics]
host = "0.0.0.0"
port = 9000
[metrics.gql]
port = 9001
[database]
type = "postgres"
host = "mobymask-watcher-db"
port = 5432
database = "mobymask-watcher"
username = "vdbm"
password = "password"
synchronize = true
logging = false
[upstream]
[upstream.ethServer]
gqlApiEndpoint = "http://ipld-eth-server:8083/graphql"
rpcProviderEndpoint = "REPLACE_WITH_CERC_L2_GETH_RPC_ENDPOINT"
blockDelayInMilliSecs = 60000
[upstream.cache]
name = "requests"
enabled = false
deleteOnStart = false
[jobQueue]
dbConnectionString = "postgres://vdbm:password@mobymask-watcher-db/mobymask-watcher-job-queue"
maxCompletionLagInSecs = 300
jobDelayInMilliSecs = 100
eventsInBatch = 50

View File

@ -1,13 +0,0 @@
# source'ed into container build scripts to do generic command setup
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
set -x
echo "Build environment variables:"
env
fi
build_command_args=""
if [[ ${CERC_FORCE_REBUILD} == "true" ]]; then
build_command_args="${build_command_args} --no-cache"
fi
if [[ -n "$CERC_CONTAINER_EXTRA_BUILD_ARGS" ]]; then
build_command_args="${build_command_args} ${CERC_CONTAINER_EXTRA_BUILD_ARGS}"
fi

View File

@ -1,5 +0,0 @@
#!/usr/bin/env bash
# Build a local version of the task executor for act-runner
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
docker build -t cerc/act-runner-task-executor:local -f ${CERC_REPO_BASE_DIR}/hosting/gitea/Dockerfile.task-executor ${build_command_args} ${SCRIPT_DIR}

View File

@ -1,5 +0,0 @@
#!/usr/bin/env bash
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
# Build a local version of the act-runner image
# TODO: enhance the default build code path to cope with this container (repo has an _ which needs to be converted to - in the image tag)
docker build -t cerc/act-runner:local -f ${CERC_REPO_BASE_DIR}/act_runner/Dockerfile ${build_command_args} ${CERC_REPO_BASE_DIR}/act_runner

View File

@ -11,6 +11,6 @@ DEPS=(github.com/fare/gerbil-utils
) ;
for i in ${DEPS[@]} ; do
echo "Installing gerbil package: $i"
gxpkg install $i
gxpkg install $i &&
gxpkg build $i
done

View File

@ -1,37 +1,14 @@
# Originally from: https://github.com/devcontainers/images/blob/main/src/javascript-node/.devcontainer/Dockerfile
# Which depends on: https://github.com/nodejs/docker-node/blob/main/Dockerfile-debian.template
# [Choice] Node.js version (use -bullseye variants on local arm64/Apple Silicon): 18, 16, 14, 18-bullseye, 16-bullseye, 14-bullseye, 18-buster, 16-buster, 14-buster
ARG VARIANT=18-bullseye
ARG VARIANT=16-bullseye
FROM node:${VARIANT}
# Set these args to change the uid/gid for the base container's "node" user to match that of the host user (so bind mounts work as expected).
ARG CERC_HOST_UID=1000
ARG CERC_HOST_GID=1000
# Make these values available at runtime to allow a consistency check.
ENV HOST_UID=${CERC_HOST_UID}
ENV HOST_GID=${CERC_HOST_GID}
ARG USERNAME=node
ARG NPM_GLOBAL=/usr/local/share/npm-global
# Add NPM global to PATH.
ENV PATH=${NPM_GLOBAL}/bin:${PATH}
SHELL ["/bin/bash", "-c"]
RUN \
# Don't switch container uid/gid if the host uid/gid is 1000 (which means it's already correct),
# or root (which won't work anyway) or <= 100 (which also won't work).
if [[ ${CERC_HOST_GID} -ne 1000 && ${CERC_HOST_GID} -ne 0 && ${CERC_HOST_GID} -gt 100 ]]; then \
groupmod -g ${CERC_HOST_GID} ${USERNAME}; \
fi \
&& if [[ ${CERC_HOST_UID} -ne 1000 && ${CERC_HOST_UID} -ne 0 && ${CERC_HOST_UID} -gt 100 ]]; then \
usermod -u ${CERC_HOST_UID} -g ${CERC_HOST_GID} ${USERNAME} && chown ${CERC_HOST_UID}:${CERC_HOST_GID} /home/${USERNAME}; \
fi
# Prevents npm from printing version warnings
ENV NPM_CONFIG_UPDATE_NOTIFIER=false
RUN \
# Configure global npm install location, use group to adapt to UID/GID changes
if ! cat /etc/group | grep -e "^npm:" > /dev/null 2>&1; then groupadd -r npm; fi \
@ -62,7 +39,6 @@ RUN mkdir /scripts
COPY build-npm-package.sh /scripts
COPY yarn-local-registry-fixup.sh /scripts
COPY build-npm-package-local-dependencies.sh /scripts
COPY check-uid.sh /scripts
ENV PATH="${PATH}:/scripts"
COPY entrypoint.sh .

View File

@ -1,7 +1,7 @@
#!/bin/bash
# Usage: build-npm-package-local-dependencies.sh <registry-url> <publish-with-this-version>
# Runs build-npm-package.sh after first fixing up yarn.lock to use a local
# npm registry for all packages in a specific scope (currently @cerc-io, @lirewine and @muknsys)
# npm registry for all packages in a spcific scope (currently @cerc-io)
if [ -n "$CERC_SCRIPT_DEBUG" ]; then
set -x
fi
@ -17,21 +17,18 @@ fi
set -e
local_npm_registry_url=$1
package_publish_version=$2
# If we need to handle an additional scope, add it to the list below:
npm_scopes_to_handle=("@cerc-io" "@lirewine" "@muknsys")
for npm_scope_for_local in ${npm_scopes_to_handle[@]}
# TODO: make this a paramater and allow a list of scopes
npm_scope_for_local="@cerc-io"
# We need to configure the local registry
npm config set ${npm_scope_for_local}:registry ${local_npm_registry_url}
npm config set -- ${local_npm_registry_url}:_authToken ${CERC_NPM_AUTH_TOKEN}
# Find the set of dependencies from the specified scope
mapfile -t dependencies_from_scope < <(cat package.json | jq -r '.dependencies | with_entries(if (.key|test("^'${npm_scope_for_local}'/.*$")) then ( {key: .key, value: .value } ) else empty end ) | keys[]')
echo "Fixing up dependencies"
for package in "${dependencies_from_scope[@]}"
do
# We need to configure the local registry
npm config set ${npm_scope_for_local}:registry ${local_npm_registry_url}
npm config set -- ${local_npm_registry_url}:_authToken ${CERC_NPM_AUTH_TOKEN}
# Find the set of dependencies from the specified scope
mapfile -t dependencies_from_scope < <(cat package.json | jq -r '.dependencies | with_entries(if (.key|test("^'${npm_scope_for_local}'/.*$")) then ( {key: .key, value: .value } ) else empty end ) | keys[]')
echo "Fixing up dependencies in scope ${npm_scope_for_local}"
for package in "${dependencies_from_scope[@]}"
do
echo "Fixing up package ${package}"
yarn-local-registry-fixup.sh $package ${local_npm_registry_url}
done
echo "Fixing up package ${package}"
yarn-local-registry-fixup.sh $package ${local_npm_registry_url}
done
echo "Running build"
build-npm-package.sh ${local_npm_registry_url} ${package_publish_version}

View File

@ -22,24 +22,14 @@ set -e
# Get the name of this package from package.json since we weren't passed that
package_name=$( cat package.json | jq -r .name )
local_npm_registry_url=$1
npm config set @cerc-io:registry ${local_npm_registry_url}
npm config set @lirewine:registry ${local_npm_registry_url}
npm config set @muknsys:registry ${local_npm_registry_url}
# Workaround bug in npm unpublish where it needs the url to be of the form //<foo> and not http://<foo>
local_npm_registry_url_fixed=$( echo ${local_npm_registry_url} | sed -e 's/^http[s]\{0,1\}://')
npm config set -- ${local_npm_registry_url_fixed}:_authToken ${CERC_NPM_AUTH_TOKEN}
npm config set @cerc-io:registry ${local_npm_registry_url}
npm config set -- ${local_npm_registry_url}:_authToken ${CERC_NPM_AUTH_TOKEN}
# First check if the version of this package we're trying to build already exists in the registry
package_exists=$( yarn info --json ${package_name}@${package_publish_version} 2>/dev/null | jq -r .data.dist.tarball )
if [[ ! -z "$package_exists" && "$package_exists" != "null" ]]; then
echo "${package_publish_version} of ${package_name} already exists in the registry"
if [[ ${CERC_FORCE_REBUILD} == "true" ]]; then
# Attempt to unpublish the existing package
echo "NOTE: unpublishing existing package version since force rebuild is enabled"
npm unpublish --force ${package_name}@${package_publish_version}
else
echo "skipping build since target version already exists"
exit 0
fi
echo "${package_publish_version} of ${package_name} already exists in the registry, skipping build"
exit 0
fi
echo "Build and publish ${package_name} version ${package_publish_version}"
yarn install

View File

@ -1,21 +0,0 @@
#!/bin/bash
# Make the container usable for uid/gid != 1000
if [[ -n "$CERC_SCRIPT_DEBUG" ]]; then
set -x
fi
current_uid=$(id -u)
current_gid=$(id -g)
# Don't check if running as root
if [[ ${current_uid} == 0 ]]; then
exit 0
fi
# Check the current uid/gid vs the uid/gid used to build the container.
# We do this because both bind mounts and npm tooling require the uid/gid to match.
if [[ ${current_gid} != ${HOST_GID} ]]; then
echo "Warning: running with gid: ${current_gid} which is not the gid for which this container was built (${HOST_GID})"
exit 0
fi
if [[ ${current_uid} != ${HOST_UID} ]]; then
echo "Warning: running with gid: ${current_uid} which is not the uid for which this container was built (${HOST_UID})"
exit 0
fi

View File

@ -1,3 +1,2 @@
#!/bin/sh
/scripts/check-uid.sh
exec "$@"

View File

@ -18,17 +18,16 @@ fi
set -e
target_package=$1
local_npm_registry_url=$2
# Extract the actual version pinned in yarn.lock
# See: https://stackoverflow.com/questions/60454251/how-to-know-the-version-of-currently-installed-package-from-yarn-lock
versioned_target_package=$(yarn list --pattern ${target_package} --depth=0 --json --non-interactive --no-progress | jq -r '.data.trees[].name')
# TODO: use jq rather than sed here:
versioned_target_package=$(grep ${target_package} package.json | sed -e 's#[[:space:]]\{1,\}\"\('${target_package}'\)\":[[:space:]]\{1,\}\"\(.*\)\",#\1@\2#' )
# Use yarn info to get URL checksums etc from the new registry
yarn_info_output=$(yarn info --json $versioned_target_package 2>/dev/null)
# First check if the target version actually exists.
# If it doesn't exist there will be no .data.dist.tarball element,
# and jq will output the string "null"
package_tarball=$(echo $yarn_info_output | jq -r .data.dist.tarball)
if [[ "$yarn_info_output" == "" || $package_tarball == "null" ]]; then
echo "FATAL: Target package version ($versioned_target_package) not found (or bad npm auth token)" >&2
if [[ $package_tarball == "null" ]]; then
echo "FATAL: Target package version ($versioned_target_package) not found" >&2
exit 1
fi
# Code below parses out the values we need

View File

@ -1,4 +1,3 @@
#!/usr/bin/env bash
# Build cerc/eth-probe
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/eth-probe:local ${build_command_args} ${CERC_REPO_BASE_DIR}/eth-probe
docker build -t cerc/eth-probe:local ${CERC_REPO_BASE_DIR}/eth-probe

View File

@ -1,4 +1,3 @@
#!/usr/bin/env bash
# Build cerc/eth-statediff-fill-service
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/eth-statediff-fill-service:local ${build_command_args} ${CERC_REPO_BASE_DIR}/eth-statediff-fill-service
docker build -t cerc/eth-statediff-fill-service:local ${CERC_REPO_BASE_DIR}/eth-statediff-fill-service

View File

@ -1,4 +1,3 @@
#!/usr/bin/env bash
# Build cerc/eth-statediff-service
source ${CERC_CONTAINER_BASE_DIR}/build-base.sh
docker build -t cerc/eth-statediff-service:local ${build_command_args} ${CERC_REPO_BASE_DIR}/eth-statediff-service
docker build -t cerc/eth-statediff-service:local ${CERC_REPO_BASE_DIR}/eth-statediff-service

View File

@ -6,7 +6,7 @@ RUN go install github.com/go-delve/delve/cmd/dlv@latest
FROM cerc/go-ethereum:local as geth
FROM alpine:3.17
FROM alpine:latest
RUN apk add --no-cache python3 python3-dev py3-pip curl wget jq build-base gettext libintl openssl bash bind-tools postgresql-client
COPY --from=delve /go/bin/dlv /usr/local/bin/
@ -22,18 +22,6 @@ COPY run-el.sh /opt/testnet/run.sh
RUN cd /opt/testnet && make genesis-el
COPY --from=geth /usr/local/bin/geth /usr/local/bin/
# Snag the genesis block info.
RUN geth --datadir ~/ethdata init /opt/testnet/build/el/geth.json && rm -f ~/ethdata/geth/nodekey
RUN cp -rp ~/ethdata ~/tmpeth && \
geth --datadir ~/tmpeth init /opt/testnet/build/el/geth.json && \
geth --datadir ~/tmpeth --http & \
sleep 5 && \
curl -q --location 'localhost:8545' \
--header 'Content-Type: application/json' \
--data '{ "jsonrpc": "2.0", "id": 14, "method": "eth_getBlockByNumber", "params": ["0x0", false] }' \
-o /opt/testnet/build/el/genesis_block.json && \
killall -9 geth && \
rm -rf ~/tmpeth
RUN geth init /opt/testnet/build/el/geth.json && rm -f ~/.ethereum/geth/nodekey
ENTRYPOINT ["/opt/testnet/run.sh"]

Some files were not shown because too many files have changed in this diff Show More