Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
793e0bc
feat(gov-proposals): hashing, schema, repos, dispatcher, routes (PR 8…
Apr 21, 2026
b089dc3
feat(gov-proposals): mailer templates + server wiring (PR 8 step 2/3)
Apr 21, 2026
ded0044
fix(gov-proposals): Codex round 1 (args to gObjectCheck, duplicate su…
Apr 21, 2026
548d509
fix(gov-proposals): Codex round 2 (payload-keyed idempotency, duplica…
Apr 21, 2026
2e479f9
PR8 round 3: race-safe /prepare + attach-collateral, per Codex review
Apr 21, 2026
74f15e9
ci: run Jest on every push / PR (Node 20 + 22)
Apr 22, 2026
40dcb4e
fix(proposals): compare-and-swap status guard in attachCollateral (Co…
Apr 22, 2026
fcd965e
fix(proposals): guard null rpc + re-preflight on idempotent replay (C…
Apr 22, 2026
960418c
fix(proposals): markSubmitted CAS + dispatcher race logs + paymentAmo…
Apr 22, 2026
481d758
fix(proposals): call gobject_check with 1 arg + parse { "Object statu…
Apr 22, 2026
6e5a0d9
fix(proposals): CAS-guard markFailed + verify delete actually removed…
Apr 22, 2026
4d65885
PR8 round 8: wrap prepare DB lookups + stop rearms + tests
Apr 22, 2026
5919561
PR8 round 9: serialize dispatcher in server.js + resolve draft inside…
Apr 22, 2026
601eeed
PR8 round 10: terminal duplicate-hash + tighter terminal error patter…
Apr 22, 2026
56c4491
PR8 Codex round 11 P1+P2: narrow terminal matcher, normalize UNIQUE-h…
Apr 22, 2026
386108c
PR8 Codex round 12 P1+P2: narrow rate-limit matcher + guard winner re…
Apr 22, 2026
d3050d2
Address Codex PR8 round 13 review
Apr 22, 2026
26be11d
fix(proposals): reject over-precision numeric payment amounts
Apr 22, 2026
748bf67
fix(proposals): enforce int64 cap on payment_amount_sats
Apr 22, 2026
30b5866
fix(proposals): send numeric revision/time to gobject_submit; int64 c…
Apr 22, 2026
5dd2098
fix(proposals): reject unsafe JSON-number paymentAmountSats before ha…
Apr 22, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
64 changes: 64 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
name: CI

on:
push:
branches: ['**']
pull_request:

concurrency:
group: ci-${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true

jobs:
test:
name: Jest (Node ${{ matrix.node }})
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
node: ['20', '22']

env:
CI: 'true'
# Keep tests hermetic: anything that auto-opts-in based on
# NODE_ENV stays in test mode, not dev/prod.
NODE_ENV: test

steps:
- name: Checkout
uses: actions/checkout@v4

- name: Set up Node ${{ matrix.node }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node }}
cache: npm

- name: Install
# `npm ci` is strict about package-lock.json; if the lock
# drifts we want the CI to fail loudly rather than silently
# resolve a new tree.
run: npm ci

- name: Run tests
run: npm test -- --runInBand --ci --colors

lint-sql:
name: Schema sanity
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4

- name: Verify migrations directory has a deterministic ordering
# Guard against accidentally shipping two migrations with
# colliding numeric prefixes — the runner sorts lexicographically
# and duplicate prefixes would make apply order undefined.
run: |
set -euo pipefail
cd db/migrations
dup=$(ls | awk -F'_' '{print $1}' | sort | uniq -d || true)
if [ -n "$dup" ]; then
echo "Duplicate migration prefix(es): $dup"
exit 1
fi
140 changes: 140 additions & 0 deletions db/migrations/001_init.sql
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,41 @@
-- short-lived under Core's time window, and keeping them server-side
-- expands attack surface with zero replay value; a retry regenerates
-- a fresh sig client-side from the vault.
--
-- proposal_drafts
-- User's in-progress proposal text. Server-side so the same draft
-- is available across devices once the user is logged in (Twitter-
-- compose-style: log out / close / switch devices and the drafts
-- follow the account). Drafts are plaintext because a governance
-- proposal's content is, by definition, about to go public on
-- chain — encrypting it would add friction for zero security
-- benefit. The payment_amount is stored in satoshis as INTEGER
-- (fits in int64 for every imaginable proposal size) to avoid the
-- float-precision traps of storing SYS decimals.
--
-- proposal_submissions
-- One row per proposal the user has actually committed to publishing
-- (i.e. they've advanced past the draft step). The row is created at
-- "prepare" time with a frozen canonical snapshot (parent_hash +
-- revision + time_unix + data_hex + proposal_hash) — those fields
-- are the hash preimage and must not change after this point, else
-- the 150 SYS collateral OP_RETURN would stop matching. The row
-- moves through a small state machine advanced partly by the user
-- (reporting a collateral txid) and partly by the reminder-style
-- dispatcher (watching confirmations, calling gobject_submit once
-- mature). Statuses:
-- prepared hash + dataHex computed, shown to user,
-- no collateral yet.
-- awaiting_collateral user has supplied a collateral txid;
-- dispatcher is polling confirmations.
-- submitted gobject_submit succeeded; governance_hash
-- is set. Terminal (happy path).
-- failed something fatal happened; fail_reason
-- is a stable machine code, fail_detail is
-- raw context. Terminal.
-- There is no 'abandoned' status — users who back out before paying
-- just DELETE their row. The status column has no CHECK constraint
-- so the repo layer owns validation (mirroring vote_receipts.status).

CREATE TABLE users (
id INTEGER PRIMARY KEY AUTOINCREMENT,
Expand Down Expand Up @@ -191,3 +226,108 @@ CREATE INDEX idx_receipts_user_proposal
ON vote_receipts(user_id, proposal_hash);
CREATE INDEX idx_receipts_user_recent
ON vote_receipts(user_id, submitted_at DESC);

-- proposal_drafts: user's in-progress proposal content. No canonical
-- snapshot or hash here — drafts haven't committed to an on-chain
-- identity yet. `payment_amount_sats` is an integer number of
-- satoshis (int64 range easily accommodates any realistic amount);
-- storing SYS as a decimal REAL would drift under float arithmetic.
-- `start_epoch` / `end_epoch` are nullable because a user may save
-- before choosing a superblock.
CREATE TABLE proposal_drafts (
Comment thread
sidhujag marked this conversation as resolved.
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
Comment on lines +237 to +239
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Add a new migration for proposal tables

These schema changes were appended to 001_init.sql, but this codebase applies migrations only once per filename (schema_migrations), so any existing deployment that already recorded 001_init.sql will never run these CREATE TABLE statements. After upgrading, /gov/proposals will hit runtime SQL errors (no such table: proposal_drafts / proposal_submissions) on production databases. Ship these DDL changes in a new migration file (e.g., 002_...sql) so upgrades actually apply them.

Useful? React with 👍 / 👎.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

N/A

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Acknowledged, but this is a false positive relative to this project's explicit pre-launch convention.

See the header of db/migrations/001_init.sql:

Until v1 ships, this file is the schema. Any rename, column addition, or constraint change that lands BEFORE launch belongs here, edited in place. Do NOT propose a forward ALTER TABLE migration as a "safer" alternative — there is no deployed database to migrate from, so a forward migration would only add file-count noise while describing a history that never existed. Once v1 is in production, this rule inverts and every schema change becomes a new 002+ file.

Nothing is deployed yet. The schema_migrations table is empty on every environment, so editing 001_init.sql in place does run the new CREATE TABLE / CREATE INDEX statements on first boot. The moment v1 ships and 001_init.sql is recorded in any production schema_migrations, this rule inverts and every subsequent schema change will land in a new 002+ file — that is exactly what the header calls out.

Keeping the review signal tight: marking this as "does not apply in this iteration". The new partial unique index (idx_proposal_submissions_user_payload_prepared added in 2e479f9) lives in 001_init.sql in keeping with that convention.

title TEXT NOT NULL DEFAULT '',
name TEXT NOT NULL DEFAULT '',
url TEXT NOT NULL DEFAULT '',
description TEXT NOT NULL DEFAULT '',
payment_address TEXT NOT NULL DEFAULT '',
payment_amount_sats INTEGER NOT NULL DEFAULT 0,
payment_count INTEGER NOT NULL DEFAULT 1,
start_epoch INTEGER,
end_epoch INTEGER,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL
);

CREATE INDEX idx_proposal_drafts_user_recent
ON proposal_drafts(user_id, updated_at DESC);

-- proposal_submissions: once the user commits to publishing, we
-- snapshot the canonical (parent_hash, revision, time_unix, data_hex,
-- proposal_hash) tuple. Anything derived from data_hex (name, url,
-- payment_*) is duplicated in typed columns for indexing and display,
-- but the source of truth for what the chain sees is data_hex — the
-- repo layer guarantees the denormalized columns stay in sync with
-- it. draft_id is intentionally ON DELETE SET NULL so a user can
-- clean up their drafts list without destroying the historical
-- record of what they submitted.
CREATE TABLE proposal_submissions (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER NOT NULL REFERENCES users(id) ON DELETE CASCADE,
draft_id INTEGER REFERENCES proposal_drafts(id) ON DELETE SET NULL,

parent_hash TEXT NOT NULL DEFAULT '0',
revision INTEGER NOT NULL DEFAULT 1,
time_unix INTEGER NOT NULL,
data_hex TEXT NOT NULL,
proposal_hash TEXT NOT NULL,

title TEXT NOT NULL DEFAULT '',
name TEXT NOT NULL,
url TEXT NOT NULL,
payment_address TEXT NOT NULL,
payment_amount_sats INTEGER NOT NULL,
payment_count INTEGER NOT NULL DEFAULT 1,
start_epoch INTEGER NOT NULL,
end_epoch INTEGER NOT NULL,

status TEXT NOT NULL,
collateral_txid TEXT,
collateral_confs INTEGER NOT NULL DEFAULT 0,
governance_hash TEXT,
fail_reason TEXT,
fail_detail TEXT,

created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL
);

-- Per-user recency index (for the "your submissions" page).
CREATE INDEX idx_proposal_submissions_user_recent
ON proposal_submissions(user_id, updated_at DESC);

-- Dispatcher-facing index: the watcher tick scans rows by status to
-- advance them, so keep that lookup fast regardless of table size.
CREATE INDEX idx_proposal_submissions_status
ON proposal_submissions(status, updated_at);

-- Partial uniqueness on collateral_txid: a given collateral tx can
-- only back a single proposal submission. Two rows claiming the same
-- txid is a bug (probably a duplicate "I paid, here's the txid" call
-- from the user). NULL txids are exempt, which is the correct
-- treatment for rows still in `prepared` state.
CREATE UNIQUE INDEX idx_proposal_submissions_collateral_txid
ON proposal_submissions(collateral_txid)
WHERE collateral_txid IS NOT NULL;

-- Codex PR8 round 3 P2: enforce /prepare idempotency at the DB layer.
-- The route reads by (user_id, data_hex, status='prepared') and then
-- inserts; without this partial unique index, two concurrent requests
-- with identical payload can both miss the read and both insert,
-- producing duplicate `prepared` rows for the same logical proposal.
-- Once the row moves past `prepared` (the user attaches collateral,
-- or it ends up `submitted`/`failed`), the partial predicate no
-- longer matches and a subsequent retry with the same dataHex is
-- free to create a fresh `prepared` row — which is the correct UX:
-- the old submission is locked to a specific collateral txid, and a
-- re-prepare is the user explicitly asking for a clean second take.
CREATE UNIQUE INDEX idx_proposal_submissions_user_payload_prepared
ON proposal_submissions(user_id, data_hex)
WHERE status = 'prepared';

-- Governance hash is likewise unique once set — it IS the proposal's
-- on-chain identity. A NULL is expected for rows not yet submitted.
CREATE UNIQUE INDEX idx_proposal_submissions_governance_hash
ON proposal_submissions(governance_hash)
WHERE governance_hash IS NOT NULL;
Loading
Loading